1 #include <linux/init.h>
3 #include <linux/module.h>
4 #include <linux/sched.h>
5 #include <linux/percpu.h>
13 /* Number of siblings per CPU package */
14 int smp_num_siblings = 1;
15 EXPORT_SYMBOL(smp_num_siblings);
17 /* Last level cache ID of each logical CPU */
18 DEFINE_PER_CPU(u16, cpu_llc_id) = BAD_APICID;
20 /* bitmap of online cpus */
21 cpumask_t cpu_online_map __read_mostly;
22 EXPORT_SYMBOL(cpu_online_map);
24 cpumask_t cpu_callin_map;
25 cpumask_t cpu_callout_map;
26 cpumask_t cpu_possible_map;
27 EXPORT_SYMBOL(cpu_possible_map);
29 /* representing HT siblings of each logical CPU */
30 DEFINE_PER_CPU(cpumask_t, cpu_sibling_map);
31 EXPORT_PER_CPU_SYMBOL(cpu_sibling_map);
33 /* representing HT and core siblings of each logical CPU */
34 DEFINE_PER_CPU(cpumask_t, cpu_core_map);
35 EXPORT_PER_CPU_SYMBOL(cpu_core_map);
37 /* Per CPU bogomips and other parameters */
38 DEFINE_PER_CPU_SHARED_ALIGNED(struct cpuinfo_x86, cpu_info);
39 EXPORT_PER_CPU_SYMBOL(cpu_info);
41 /* representing cpus for which sibling maps can be computed */
42 static cpumask_t cpu_sibling_setup_map;
44 void __cpuinit set_cpu_sibling_map(int cpu)
47 struct cpuinfo_x86 *c = &cpu_data(cpu);
49 cpu_set(cpu, cpu_sibling_setup_map);
51 if (smp_num_siblings > 1) {
52 for_each_cpu_mask(i, cpu_sibling_setup_map) {
53 if (c->phys_proc_id == cpu_data(i).phys_proc_id &&
54 c->cpu_core_id == cpu_data(i).cpu_core_id) {
55 cpu_set(i, per_cpu(cpu_sibling_map, cpu));
56 cpu_set(cpu, per_cpu(cpu_sibling_map, i));
57 cpu_set(i, per_cpu(cpu_core_map, cpu));
58 cpu_set(cpu, per_cpu(cpu_core_map, i));
59 cpu_set(i, c->llc_shared_map);
60 cpu_set(cpu, cpu_data(i).llc_shared_map);
64 cpu_set(cpu, per_cpu(cpu_sibling_map, cpu));
67 cpu_set(cpu, c->llc_shared_map);
69 if (current_cpu_data.x86_max_cores == 1) {
70 per_cpu(cpu_core_map, cpu) = per_cpu(cpu_sibling_map, cpu);
75 for_each_cpu_mask(i, cpu_sibling_setup_map) {
76 if (per_cpu(cpu_llc_id, cpu) != BAD_APICID &&
77 per_cpu(cpu_llc_id, cpu) == per_cpu(cpu_llc_id, i)) {
78 cpu_set(i, c->llc_shared_map);
79 cpu_set(cpu, cpu_data(i).llc_shared_map);
81 if (c->phys_proc_id == cpu_data(i).phys_proc_id) {
82 cpu_set(i, per_cpu(cpu_core_map, cpu));
83 cpu_set(cpu, per_cpu(cpu_core_map, i));
85 * Does this new cpu bringup a new core?
87 if (cpus_weight(per_cpu(cpu_sibling_map, cpu)) == 1) {
89 * for each core in package, increment
90 * the booted_cores for this new cpu
92 if (first_cpu(per_cpu(cpu_sibling_map, i)) == i)
95 * increment the core count for all
96 * the other cpus in this package
99 cpu_data(i).booted_cores++;
100 } else if (i != cpu && !c->booted_cores)
101 c->booted_cores = cpu_data(i).booted_cores;
106 /* maps the cpu to the sched domain representing multi-core */
107 cpumask_t cpu_coregroup_map(int cpu)
109 struct cpuinfo_x86 *c = &cpu_data(cpu);
111 * For perf, we return last level cache shared map.
112 * And for power savings, we return cpu_core_map
114 if (sched_mc_power_savings || sched_smt_power_savings)
115 return per_cpu(cpu_core_map, cpu);
117 return c->llc_shared_map;
121 #ifdef CONFIG_HOTPLUG_CPU
122 void remove_siblinginfo(int cpu)
125 struct cpuinfo_x86 *c = &cpu_data(cpu);
127 for_each_cpu_mask(sibling, per_cpu(cpu_core_map, cpu)) {
128 cpu_clear(cpu, per_cpu(cpu_core_map, sibling));
130 * last thread sibling in this cpu core going down
132 if (cpus_weight(per_cpu(cpu_sibling_map, cpu)) == 1)
133 cpu_data(sibling).booted_cores--;
136 for_each_cpu_mask(sibling, per_cpu(cpu_sibling_map, cpu))
137 cpu_clear(cpu, per_cpu(cpu_sibling_map, sibling));
138 cpus_clear(per_cpu(cpu_sibling_map, cpu));
139 cpus_clear(per_cpu(cpu_core_map, cpu));
142 cpu_clear(cpu, cpu_sibling_setup_map);
145 int additional_cpus __initdata = -1;
147 static __init int setup_additional_cpus(char *s)
149 return s && get_option(&s, &additional_cpus) ? 0 : -EINVAL;
151 early_param("additional_cpus", setup_additional_cpus);
154 * cpu_possible_map should be static, it cannot change as cpu's
155 * are onlined, or offlined. The reason is per-cpu data-structures
156 * are allocated by some modules at init time, and dont expect to
157 * do this dynamically on cpu arrival/departure.
158 * cpu_present_map on the other hand can change dynamically.
159 * In case when cpu_hotplug is not compiled, then we resort to current
160 * behaviour, which is cpu_possible == cpu_present.
163 * Three ways to find out the number of additional hotplug CPUs:
164 * - If the BIOS specified disabled CPUs in ACPI/mptables use that.
165 * - The user can overwrite it with additional_cpus=NUM
166 * - Otherwise don't reserve additional CPUs.
167 * We do this because additional CPUs waste a lot of memory.
170 __init void prefill_possible_map(void)
175 if (additional_cpus == -1) {
176 if (disabled_cpus > 0)
177 additional_cpus = disabled_cpus;
181 possible = num_processors + additional_cpus;
182 if (possible > NR_CPUS)
185 printk(KERN_INFO "SMP: Allowing %d CPUs, %d hotplug CPUs\n",
186 possible, max_t(int, possible - num_processors, 0));
188 for (i = 0; i < possible; i++)
189 cpu_set(i, cpu_possible_map);
192 static void __ref remove_cpu_from_maps(int cpu)
194 cpu_clear(cpu, cpu_online_map);
196 cpu_clear(cpu, cpu_callout_map);
197 cpu_clear(cpu, cpu_callin_map);
198 /* was set by cpu_init() */
199 clear_bit(cpu, (unsigned long *)&cpu_initialized);
200 clear_node_cpumask(cpu);
204 int __cpu_disable(void)
206 int cpu = smp_processor_id();
209 * Perhaps use cpufreq to drop frequency, but that could go
212 * We won't take down the boot processor on i386 due to some
213 * interrupts only being able to be serviced by the BSP.
214 * Especially so if we're not using an IOAPIC -zwane
219 if (nmi_watchdog == NMI_LOCAL_APIC)
220 stop_apic_nmi_watchdog(NULL);
225 * Allow any queued timer interrupts to get serviced
226 * This is only a temporary solution until we cleanup
227 * fixup_irqs as we do for IA64.
233 remove_siblinginfo(cpu);
235 /* It's now safe to remove this processor from the online map */
236 remove_cpu_from_maps(cpu);
237 fixup_irqs(cpu_online_map);
241 void __cpu_die(unsigned int cpu)
243 /* We don't do anything here: idle task is faking death itself. */
246 for (i = 0; i < 10; i++) {
247 /* They ack this in play_dead by setting CPU_DEAD */
248 if (per_cpu(cpu_state, cpu) == CPU_DEAD) {
249 printk(KERN_INFO "CPU %d is now offline\n", cpu);
250 if (1 == num_online_cpus())
251 alternatives_smp_switch(0);
256 printk(KERN_ERR "CPU %u didn't die...\n", cpu);
258 #else /* ... !CONFIG_HOTPLUG_CPU */
259 int __cpu_disable(void)
264 void __cpu_die(unsigned int cpu)
266 /* We said "no" in __cpu_disable */
272 * If the BIOS enumerates physical processors before logical,
273 * maxcpus=N at enumeration-time can be used to disable HT.
275 static int __init parse_maxcpus(char *arg)
277 extern unsigned int maxcpus;
279 maxcpus = simple_strtoul(arg, NULL, 0);
282 early_param("maxcpus", parse_maxcpus);