1 #include <linux/init.h>
3 #include <linux/module.h>
4 #include <linux/sched.h>
6 /* Number of siblings per CPU package */
7 int smp_num_siblings = 1;
8 EXPORT_SYMBOL(smp_num_siblings);
10 /* Last level cache ID of each logical CPU */
11 DEFINE_PER_CPU(u16, cpu_llc_id) = BAD_APICID;
13 /* bitmap of online cpus */
14 cpumask_t cpu_online_map __read_mostly;
15 EXPORT_SYMBOL(cpu_online_map);
17 cpumask_t cpu_callin_map;
18 cpumask_t cpu_callout_map;
19 cpumask_t cpu_possible_map;
20 EXPORT_SYMBOL(cpu_possible_map);
22 /* representing HT siblings of each logical CPU */
23 DEFINE_PER_CPU(cpumask_t, cpu_sibling_map);
24 EXPORT_PER_CPU_SYMBOL(cpu_sibling_map);
26 /* representing HT and core siblings of each logical CPU */
27 DEFINE_PER_CPU(cpumask_t, cpu_core_map);
28 EXPORT_PER_CPU_SYMBOL(cpu_core_map);
30 /* Per CPU bogomips and other parameters */
31 DEFINE_PER_CPU_SHARED_ALIGNED(struct cpuinfo_x86, cpu_info);
32 EXPORT_PER_CPU_SYMBOL(cpu_info);
34 /* representing cpus for which sibling maps can be computed */
35 static cpumask_t cpu_sibling_setup_map;
37 void __cpuinit set_cpu_sibling_map(int cpu)
40 struct cpuinfo_x86 *c = &cpu_data(cpu);
42 cpu_set(cpu, cpu_sibling_setup_map);
44 if (smp_num_siblings > 1) {
45 for_each_cpu_mask(i, cpu_sibling_setup_map) {
46 if (c->phys_proc_id == cpu_data(i).phys_proc_id &&
47 c->cpu_core_id == cpu_data(i).cpu_core_id) {
48 cpu_set(i, per_cpu(cpu_sibling_map, cpu));
49 cpu_set(cpu, per_cpu(cpu_sibling_map, i));
50 cpu_set(i, per_cpu(cpu_core_map, cpu));
51 cpu_set(cpu, per_cpu(cpu_core_map, i));
52 cpu_set(i, c->llc_shared_map);
53 cpu_set(cpu, cpu_data(i).llc_shared_map);
57 cpu_set(cpu, per_cpu(cpu_sibling_map, cpu));
60 cpu_set(cpu, c->llc_shared_map);
62 if (current_cpu_data.x86_max_cores == 1) {
63 per_cpu(cpu_core_map, cpu) = per_cpu(cpu_sibling_map, cpu);
68 for_each_cpu_mask(i, cpu_sibling_setup_map) {
69 if (per_cpu(cpu_llc_id, cpu) != BAD_APICID &&
70 per_cpu(cpu_llc_id, cpu) == per_cpu(cpu_llc_id, i)) {
71 cpu_set(i, c->llc_shared_map);
72 cpu_set(cpu, cpu_data(i).llc_shared_map);
74 if (c->phys_proc_id == cpu_data(i).phys_proc_id) {
75 cpu_set(i, per_cpu(cpu_core_map, cpu));
76 cpu_set(cpu, per_cpu(cpu_core_map, i));
78 * Does this new cpu bringup a new core?
80 if (cpus_weight(per_cpu(cpu_sibling_map, cpu)) == 1) {
82 * for each core in package, increment
83 * the booted_cores for this new cpu
85 if (first_cpu(per_cpu(cpu_sibling_map, i)) == i)
88 * increment the core count for all
89 * the other cpus in this package
92 cpu_data(i).booted_cores++;
93 } else if (i != cpu && !c->booted_cores)
94 c->booted_cores = cpu_data(i).booted_cores;
99 /* maps the cpu to the sched domain representing multi-core */
100 cpumask_t cpu_coregroup_map(int cpu)
102 struct cpuinfo_x86 *c = &cpu_data(cpu);
104 * For perf, we return last level cache shared map.
105 * And for power savings, we return cpu_core_map
107 if (sched_mc_power_savings || sched_smt_power_savings)
108 return per_cpu(cpu_core_map, cpu);
110 return c->llc_shared_map;
114 #ifdef CONFIG_HOTPLUG_CPU
115 void remove_siblinginfo(int cpu)
118 struct cpuinfo_x86 *c = &cpu_data(cpu);
120 for_each_cpu_mask(sibling, per_cpu(cpu_core_map, cpu)) {
121 cpu_clear(cpu, per_cpu(cpu_core_map, sibling));
123 * last thread sibling in this cpu core going down
125 if (cpus_weight(per_cpu(cpu_sibling_map, cpu)) == 1)
126 cpu_data(sibling).booted_cores--;
129 for_each_cpu_mask(sibling, per_cpu(cpu_sibling_map, cpu))
130 cpu_clear(cpu, per_cpu(cpu_sibling_map, sibling));
131 cpus_clear(per_cpu(cpu_sibling_map, cpu));
132 cpus_clear(per_cpu(cpu_core_map, cpu));
135 cpu_clear(cpu, cpu_sibling_setup_map);
138 int additional_cpus __initdata = -1;
140 static __init int setup_additional_cpus(char *s)
142 return s && get_option(&s, &additional_cpus) ? 0 : -EINVAL;
144 early_param("additional_cpus", setup_additional_cpus);
147 * cpu_possible_map should be static, it cannot change as cpu's
148 * are onlined, or offlined. The reason is per-cpu data-structures
149 * are allocated by some modules at init time, and dont expect to
150 * do this dynamically on cpu arrival/departure.
151 * cpu_present_map on the other hand can change dynamically.
152 * In case when cpu_hotplug is not compiled, then we resort to current
153 * behaviour, which is cpu_possible == cpu_present.
156 * Three ways to find out the number of additional hotplug CPUs:
157 * - If the BIOS specified disabled CPUs in ACPI/mptables use that.
158 * - The user can overwrite it with additional_cpus=NUM
159 * - Otherwise don't reserve additional CPUs.
160 * We do this because additional CPUs waste a lot of memory.
163 __init void prefill_possible_map(void)
168 if (additional_cpus == -1) {
169 if (disabled_cpus > 0)
170 additional_cpus = disabled_cpus;
174 possible = num_processors + additional_cpus;
175 if (possible > NR_CPUS)
178 printk(KERN_INFO "SMP: Allowing %d CPUs, %d hotplug CPUs\n",
179 possible, max_t(int, possible - num_processors, 0));
181 for (i = 0; i < possible; i++)
182 cpu_set(i, cpu_possible_map);