x86: make x86_64 accept the max_cpus parameter
[safe/jmp/linux-2.6] / arch / x86 / kernel / smpboot.c
1 #include <linux/init.h>
2 #include <linux/smp.h>
3 #include <linux/module.h>
4 #include <linux/sched.h>
5 #include <linux/percpu.h>
6
7 #include <asm/nmi.h>
8 #include <asm/irq.h>
9 #include <asm/smp.h>
10 #include <asm/cpu.h>
11 #include <asm/numa.h>
12
13 /* Number of siblings per CPU package */
14 int smp_num_siblings = 1;
15 EXPORT_SYMBOL(smp_num_siblings);
16
17 /* Last level cache ID of each logical CPU */
18 DEFINE_PER_CPU(u16, cpu_llc_id) = BAD_APICID;
19
20 /* bitmap of online cpus */
21 cpumask_t cpu_online_map __read_mostly;
22 EXPORT_SYMBOL(cpu_online_map);
23
24 cpumask_t cpu_callin_map;
25 cpumask_t cpu_callout_map;
26 cpumask_t cpu_possible_map;
27 EXPORT_SYMBOL(cpu_possible_map);
28
29 /* representing HT siblings of each logical CPU */
30 DEFINE_PER_CPU(cpumask_t, cpu_sibling_map);
31 EXPORT_PER_CPU_SYMBOL(cpu_sibling_map);
32
33 /* representing HT and core siblings of each logical CPU */
34 DEFINE_PER_CPU(cpumask_t, cpu_core_map);
35 EXPORT_PER_CPU_SYMBOL(cpu_core_map);
36
37 /* Per CPU bogomips and other parameters */
38 DEFINE_PER_CPU_SHARED_ALIGNED(struct cpuinfo_x86, cpu_info);
39 EXPORT_PER_CPU_SYMBOL(cpu_info);
40
41 /* representing cpus for which sibling maps can be computed */
42 static cpumask_t cpu_sibling_setup_map;
43
44 void __cpuinit set_cpu_sibling_map(int cpu)
45 {
46         int i;
47         struct cpuinfo_x86 *c = &cpu_data(cpu);
48
49         cpu_set(cpu, cpu_sibling_setup_map);
50
51         if (smp_num_siblings > 1) {
52                 for_each_cpu_mask(i, cpu_sibling_setup_map) {
53                         if (c->phys_proc_id == cpu_data(i).phys_proc_id &&
54                             c->cpu_core_id == cpu_data(i).cpu_core_id) {
55                                 cpu_set(i, per_cpu(cpu_sibling_map, cpu));
56                                 cpu_set(cpu, per_cpu(cpu_sibling_map, i));
57                                 cpu_set(i, per_cpu(cpu_core_map, cpu));
58                                 cpu_set(cpu, per_cpu(cpu_core_map, i));
59                                 cpu_set(i, c->llc_shared_map);
60                                 cpu_set(cpu, cpu_data(i).llc_shared_map);
61                         }
62                 }
63         } else {
64                 cpu_set(cpu, per_cpu(cpu_sibling_map, cpu));
65         }
66
67         cpu_set(cpu, c->llc_shared_map);
68
69         if (current_cpu_data.x86_max_cores == 1) {
70                 per_cpu(cpu_core_map, cpu) = per_cpu(cpu_sibling_map, cpu);
71                 c->booted_cores = 1;
72                 return;
73         }
74
75         for_each_cpu_mask(i, cpu_sibling_setup_map) {
76                 if (per_cpu(cpu_llc_id, cpu) != BAD_APICID &&
77                     per_cpu(cpu_llc_id, cpu) == per_cpu(cpu_llc_id, i)) {
78                         cpu_set(i, c->llc_shared_map);
79                         cpu_set(cpu, cpu_data(i).llc_shared_map);
80                 }
81                 if (c->phys_proc_id == cpu_data(i).phys_proc_id) {
82                         cpu_set(i, per_cpu(cpu_core_map, cpu));
83                         cpu_set(cpu, per_cpu(cpu_core_map, i));
84                         /*
85                          *  Does this new cpu bringup a new core?
86                          */
87                         if (cpus_weight(per_cpu(cpu_sibling_map, cpu)) == 1) {
88                                 /*
89                                  * for each core in package, increment
90                                  * the booted_cores for this new cpu
91                                  */
92                                 if (first_cpu(per_cpu(cpu_sibling_map, i)) == i)
93                                         c->booted_cores++;
94                                 /*
95                                  * increment the core count for all
96                                  * the other cpus in this package
97                                  */
98                                 if (i != cpu)
99                                         cpu_data(i).booted_cores++;
100                         } else if (i != cpu && !c->booted_cores)
101                                 c->booted_cores = cpu_data(i).booted_cores;
102                 }
103         }
104 }
105
106 /* maps the cpu to the sched domain representing multi-core */
107 cpumask_t cpu_coregroup_map(int cpu)
108 {
109         struct cpuinfo_x86 *c = &cpu_data(cpu);
110         /*
111          * For perf, we return last level cache shared map.
112          * And for power savings, we return cpu_core_map
113          */
114         if (sched_mc_power_savings || sched_smt_power_savings)
115                 return per_cpu(cpu_core_map, cpu);
116         else
117                 return c->llc_shared_map;
118 }
119
120
121 #ifdef CONFIG_HOTPLUG_CPU
122 void remove_siblinginfo(int cpu)
123 {
124         int sibling;
125         struct cpuinfo_x86 *c = &cpu_data(cpu);
126
127         for_each_cpu_mask(sibling, per_cpu(cpu_core_map, cpu)) {
128                 cpu_clear(cpu, per_cpu(cpu_core_map, sibling));
129                 /*/
130                  * last thread sibling in this cpu core going down
131                  */
132                 if (cpus_weight(per_cpu(cpu_sibling_map, cpu)) == 1)
133                         cpu_data(sibling).booted_cores--;
134         }
135
136         for_each_cpu_mask(sibling, per_cpu(cpu_sibling_map, cpu))
137                 cpu_clear(cpu, per_cpu(cpu_sibling_map, sibling));
138         cpus_clear(per_cpu(cpu_sibling_map, cpu));
139         cpus_clear(per_cpu(cpu_core_map, cpu));
140         c->phys_proc_id = 0;
141         c->cpu_core_id = 0;
142         cpu_clear(cpu, cpu_sibling_setup_map);
143 }
144
145 int additional_cpus __initdata = -1;
146
147 static __init int setup_additional_cpus(char *s)
148 {
149         return s && get_option(&s, &additional_cpus) ? 0 : -EINVAL;
150 }
151 early_param("additional_cpus", setup_additional_cpus);
152
153 /*
154  * cpu_possible_map should be static, it cannot change as cpu's
155  * are onlined, or offlined. The reason is per-cpu data-structures
156  * are allocated by some modules at init time, and dont expect to
157  * do this dynamically on cpu arrival/departure.
158  * cpu_present_map on the other hand can change dynamically.
159  * In case when cpu_hotplug is not compiled, then we resort to current
160  * behaviour, which is cpu_possible == cpu_present.
161  * - Ashok Raj
162  *
163  * Three ways to find out the number of additional hotplug CPUs:
164  * - If the BIOS specified disabled CPUs in ACPI/mptables use that.
165  * - The user can overwrite it with additional_cpus=NUM
166  * - Otherwise don't reserve additional CPUs.
167  * We do this because additional CPUs waste a lot of memory.
168  * -AK
169  */
170 __init void prefill_possible_map(void)
171 {
172         int i;
173         int possible;
174
175         if (additional_cpus == -1) {
176                 if (disabled_cpus > 0)
177                         additional_cpus = disabled_cpus;
178                 else
179                         additional_cpus = 0;
180         }
181         possible = num_processors + additional_cpus;
182         if (possible > NR_CPUS)
183                 possible = NR_CPUS;
184
185         printk(KERN_INFO "SMP: Allowing %d CPUs, %d hotplug CPUs\n",
186                 possible, max_t(int, possible - num_processors, 0));
187
188         for (i = 0; i < possible; i++)
189                 cpu_set(i, cpu_possible_map);
190 }
191
192 static void __ref remove_cpu_from_maps(int cpu)
193 {
194         cpu_clear(cpu, cpu_online_map);
195 #ifdef CONFIG_X86_64
196         cpu_clear(cpu, cpu_callout_map);
197         cpu_clear(cpu, cpu_callin_map);
198         /* was set by cpu_init() */
199         clear_bit(cpu, (unsigned long *)&cpu_initialized);
200         clear_node_cpumask(cpu);
201 #endif
202 }
203
204 int __cpu_disable(void)
205 {
206         int cpu = smp_processor_id();
207
208         /*
209          * Perhaps use cpufreq to drop frequency, but that could go
210          * into generic code.
211          *
212          * We won't take down the boot processor on i386 due to some
213          * interrupts only being able to be serviced by the BSP.
214          * Especially so if we're not using an IOAPIC   -zwane
215          */
216         if (cpu == 0)
217                 return -EBUSY;
218
219         if (nmi_watchdog == NMI_LOCAL_APIC)
220                 stop_apic_nmi_watchdog(NULL);
221         clear_local_APIC();
222
223         /*
224          * HACK:
225          * Allow any queued timer interrupts to get serviced
226          * This is only a temporary solution until we cleanup
227          * fixup_irqs as we do for IA64.
228          */
229         local_irq_enable();
230         mdelay(1);
231
232         local_irq_disable();
233         remove_siblinginfo(cpu);
234
235         /* It's now safe to remove this processor from the online map */
236         remove_cpu_from_maps(cpu);
237         fixup_irqs(cpu_online_map);
238         return 0;
239 }
240
241 void __cpu_die(unsigned int cpu)
242 {
243         /* We don't do anything here: idle task is faking death itself. */
244         unsigned int i;
245
246         for (i = 0; i < 10; i++) {
247                 /* They ack this in play_dead by setting CPU_DEAD */
248                 if (per_cpu(cpu_state, cpu) == CPU_DEAD) {
249                         printk(KERN_INFO "CPU %d is now offline\n", cpu);
250                         if (1 == num_online_cpus())
251                                 alternatives_smp_switch(0);
252                         return;
253                 }
254                 msleep(100);
255         }
256         printk(KERN_ERR "CPU %u didn't die...\n", cpu);
257 }
258 #else /* ... !CONFIG_HOTPLUG_CPU */
259 int __cpu_disable(void)
260 {
261         return -ENOSYS;
262 }
263
264 void __cpu_die(unsigned int cpu)
265 {
266         /* We said "no" in __cpu_disable */
267         BUG();
268 }
269 #endif
270
271 /*
272  * If the BIOS enumerates physical processors before logical,
273  * maxcpus=N at enumeration-time can be used to disable HT.
274  */
275 static int __init parse_maxcpus(char *arg)
276 {
277         extern unsigned int maxcpus;
278
279         maxcpus = simple_strtoul(arg, NULL, 0);
280         return 0;
281 }
282 early_param("maxcpus", parse_maxcpus);