1 #include <linux/kernel.h>
2 #include <linux/module.h>
3 #include <linux/init.h>
4 #include <linux/bootmem.h>
5 #include <linux/percpu.h>
6 #include <linux/kexec.h>
7 #include <linux/crash_dump.h>
9 #include <linux/topology.h>
10 #include <asm/sections.h>
11 #include <asm/processor.h>
12 #include <asm/setup.h>
13 #include <asm/mpspec.h>
14 #include <asm/apicdef.h>
15 #include <asm/highmem.h>
16 #include <asm/proto.h>
17 #include <asm/cpumask.h>
19 #ifdef CONFIG_DEBUG_PER_CPU_MAPS
20 # define DBG(x...) printk(KERN_DEBUG x)
25 #ifdef CONFIG_X86_LOCAL_APIC
26 unsigned int num_processors;
27 unsigned disabled_cpus __cpuinitdata;
28 /* Processor that is doing the boot up */
29 unsigned int boot_cpu_physical_apicid = -1U;
30 EXPORT_SYMBOL(boot_cpu_physical_apicid);
31 unsigned int max_physical_apicid;
33 /* Bitmask of physically existing CPUs */
34 physid_mask_t phys_cpu_present_map;
38 * Map cpu index to physical APIC ID
40 DEFINE_EARLY_PER_CPU(u16, x86_cpu_to_apicid, BAD_APICID);
41 DEFINE_EARLY_PER_CPU(u16, x86_bios_cpu_apicid, BAD_APICID);
42 EXPORT_EARLY_PER_CPU_SYMBOL(x86_cpu_to_apicid);
43 EXPORT_EARLY_PER_CPU_SYMBOL(x86_bios_cpu_apicid);
45 #if defined(CONFIG_NUMA) && defined(CONFIG_X86_64)
46 #define X86_64_NUMA 1 /* (used later) */
49 * Map cpu index to node index
51 DEFINE_EARLY_PER_CPU(int, x86_cpu_to_node_map, NUMA_NO_NODE);
52 EXPORT_EARLY_PER_CPU_SYMBOL(x86_cpu_to_node_map);
55 * Which logical CPUs are on which nodes
57 cpumask_t *node_to_cpumask_map;
58 EXPORT_SYMBOL(node_to_cpumask_map);
61 * Setup node_to_cpumask_map
63 static void __init setup_node_to_cpumask_map(void);
66 static inline void setup_node_to_cpumask_map(void) { }
70 * Define load_pda_offset() and per-cpu __pda for x86_64.
71 * load_pda_offset() is responsible for loading the offset of pda into
74 * On SMP, pda offset also duals as percpu base address and thus it
75 * should be at the start of per-cpu area. To achieve this, it's
76 * preallocated in vmlinux_64.lds.S directly instead of using
80 void __cpuinit load_pda_offset(int cpu)
82 /* Memory clobbers used to order pda/percpu accesses */
84 wrmsrl(MSR_GS_BASE, cpu_pda(cpu));
88 DEFINE_PER_CPU(struct x8664_pda, __pda);
90 EXPORT_PER_CPU_SYMBOL(__pda);
91 #endif /* CONFIG_SMP && CONFIG_X86_64 */
95 /* correctly size the local cpu masks */
96 static void setup_cpu_local_masks(void)
98 alloc_bootmem_cpumask_var(&cpu_initialized_mask);
99 alloc_bootmem_cpumask_var(&cpu_callin_mask);
100 alloc_bootmem_cpumask_var(&cpu_callout_mask);
101 alloc_bootmem_cpumask_var(&cpu_sibling_setup_mask);
104 #else /* CONFIG_X86_32 */
106 static inline void setup_cpu_local_masks(void)
110 #endif /* CONFIG_X86_32 */
112 #ifdef CONFIG_HAVE_SETUP_PER_CPU_AREA
114 * Copy data used in early init routines from the initial arrays to the
115 * per cpu data areas. These arrays then become expendable and the
116 * *_early_ptr's are zeroed indicating that the static arrays are gone.
118 static void __init setup_per_cpu_maps(void)
122 for_each_possible_cpu(cpu) {
123 per_cpu(x86_cpu_to_apicid, cpu) =
124 early_per_cpu_map(x86_cpu_to_apicid, cpu);
125 per_cpu(x86_bios_cpu_apicid, cpu) =
126 early_per_cpu_map(x86_bios_cpu_apicid, cpu);
128 per_cpu(x86_cpu_to_node_map, cpu) =
129 early_per_cpu_map(x86_cpu_to_node_map, cpu);
133 /* indicate the early static arrays will soon be gone */
134 early_per_cpu_ptr(x86_cpu_to_apicid) = NULL;
135 early_per_cpu_ptr(x86_bios_cpu_apicid) = NULL;
137 early_per_cpu_ptr(x86_cpu_to_node_map) = NULL;
142 unsigned long __per_cpu_offset[NR_CPUS] __read_mostly = {
143 [0] = (unsigned long)__per_cpu_load,
146 unsigned long __per_cpu_offset[NR_CPUS] __read_mostly;
148 EXPORT_SYMBOL(__per_cpu_offset);
152 * Declare PDA itself and support (irqstack,tss,pgd) as per cpu data.
153 * Always point %gs to its beginning
155 void __init setup_per_cpu_areas(void)
157 ssize_t size, old_size;
160 unsigned long align = 1;
162 /* Copy section for each CPU (we discard the original) */
163 old_size = PERCPU_ENOUGH_ROOM;
164 align = max_t(unsigned long, PAGE_SIZE, align);
165 size = roundup(old_size, align);
167 pr_info("NR_CPUS:%d nr_cpumask_bits:%d nr_cpu_ids:%d nr_node_ids:%d\n",
168 NR_CPUS, nr_cpumask_bits, nr_cpu_ids, nr_node_ids);
170 pr_info("PERCPU: Allocating %zd bytes of per cpu data\n", size);
172 for_each_possible_cpu(cpu) {
173 #ifndef CONFIG_NEED_MULTIPLE_NODES
174 ptr = __alloc_bootmem(size, align,
175 __pa(MAX_DMA_ADDRESS));
177 int node = early_cpu_to_node(cpu);
178 if (!node_online(node) || !NODE_DATA(node)) {
179 ptr = __alloc_bootmem(size, align,
180 __pa(MAX_DMA_ADDRESS));
181 pr_info("cpu %d has no node %d or node-local memory\n",
183 pr_debug("per cpu data for cpu%d at %016lx\n",
186 ptr = __alloc_bootmem_node(NODE_DATA(node), size, align,
187 __pa(MAX_DMA_ADDRESS));
188 pr_debug("per cpu data for cpu%d on node%d at %016lx\n",
189 cpu, node, __pa(ptr));
193 memcpy(ptr, __per_cpu_load, __per_cpu_end - __per_cpu_start);
194 per_cpu_offset(cpu) = ptr - __per_cpu_start;
195 per_cpu(this_cpu_off, cpu) = per_cpu_offset(cpu);
197 per_cpu(irq_stack_ptr, cpu) =
198 (char *)per_cpu(irq_stack, cpu) + IRQ_STACK_SIZE - 64;
200 * CPU0 modified pda in the init data area, reload pda
201 * offset for CPU0 and clear the area for others.
206 memset(cpu_pda(cpu), 0, sizeof(*cpu_pda(cpu)));
209 DBG("PERCPU: cpu %4d %p\n", cpu, ptr);
212 /* Setup percpu data maps */
213 setup_per_cpu_maps();
215 /* Setup node to cpumask map */
216 setup_node_to_cpumask_map();
218 /* Setup cpu initialized, callin, callout masks */
219 setup_cpu_local_masks();
227 * Allocate node_to_cpumask_map based on number of available nodes
228 * Requires node_possible_map to be valid.
230 * Note: node_to_cpumask() is not valid until after this is done.
231 * (Use CONFIG_DEBUG_PER_CPU_MAPS to check this.)
233 static void __init setup_node_to_cpumask_map(void)
235 unsigned int node, num = 0;
238 /* setup nr_node_ids if not done yet */
239 if (nr_node_ids == MAX_NUMNODES) {
240 for_each_node_mask(node, node_possible_map)
242 nr_node_ids = num + 1;
245 /* allocate the map */
246 map = alloc_bootmem_low(nr_node_ids * sizeof(cpumask_t));
247 DBG("node_to_cpumask_map at %p for %d nodes\n", map, nr_node_ids);
249 pr_debug("Node to cpumask map at %p for %d nodes\n",
252 /* node_to_cpumask() will now work */
253 node_to_cpumask_map = map;
256 void __cpuinit numa_set_node(int cpu, int node)
258 int *cpu_to_node_map = early_per_cpu_ptr(x86_cpu_to_node_map);
260 /* early setting, no percpu area yet */
261 if (cpu_to_node_map) {
262 cpu_to_node_map[cpu] = node;
266 #ifdef CONFIG_DEBUG_PER_CPU_MAPS
267 if (cpu >= nr_cpu_ids || !per_cpu_offset(cpu)) {
268 printk(KERN_ERR "numa_set_node: invalid cpu# (%d)\n", cpu);
273 per_cpu(x86_cpu_to_node_map, cpu) = node;
275 if (node != NUMA_NO_NODE)
276 cpu_pda(cpu)->nodenumber = node;
279 void __cpuinit numa_clear_node(int cpu)
281 numa_set_node(cpu, NUMA_NO_NODE);
284 #ifndef CONFIG_DEBUG_PER_CPU_MAPS
286 void __cpuinit numa_add_cpu(int cpu)
288 cpu_set(cpu, node_to_cpumask_map[early_cpu_to_node(cpu)]);
291 void __cpuinit numa_remove_cpu(int cpu)
293 cpu_clear(cpu, node_to_cpumask_map[early_cpu_to_node(cpu)]);
296 #else /* CONFIG_DEBUG_PER_CPU_MAPS */
299 * --------- debug versions of the numa functions ---------
301 static void __cpuinit numa_set_cpumask(int cpu, int enable)
303 int node = early_cpu_to_node(cpu);
307 if (node_to_cpumask_map == NULL) {
308 printk(KERN_ERR "node_to_cpumask_map NULL\n");
313 mask = &node_to_cpumask_map[node];
317 cpu_clear(cpu, *mask);
319 cpulist_scnprintf(buf, sizeof(buf), mask);
320 printk(KERN_DEBUG "%s cpu %d node %d: mask now %s\n",
321 enable ? "numa_add_cpu" : "numa_remove_cpu", cpu, node, buf);
324 void __cpuinit numa_add_cpu(int cpu)
326 numa_set_cpumask(cpu, 1);
329 void __cpuinit numa_remove_cpu(int cpu)
331 numa_set_cpumask(cpu, 0);
334 int cpu_to_node(int cpu)
336 if (early_per_cpu_ptr(x86_cpu_to_node_map)) {
338 "cpu_to_node(%d): usage too early!\n", cpu);
340 return early_per_cpu_ptr(x86_cpu_to_node_map)[cpu];
342 return per_cpu(x86_cpu_to_node_map, cpu);
344 EXPORT_SYMBOL(cpu_to_node);
347 * Same function as cpu_to_node() but used if called before the
348 * per_cpu areas are setup.
350 int early_cpu_to_node(int cpu)
352 if (early_per_cpu_ptr(x86_cpu_to_node_map))
353 return early_per_cpu_ptr(x86_cpu_to_node_map)[cpu];
355 if (!per_cpu_offset(cpu)) {
357 "early_cpu_to_node(%d): no per_cpu area!\n", cpu);
361 return per_cpu(x86_cpu_to_node_map, cpu);
366 static const cpumask_t cpu_mask_none;
369 * Returns a pointer to the bitmask of CPUs on Node 'node'.
371 const cpumask_t *cpumask_of_node(int node)
373 if (node_to_cpumask_map == NULL) {
375 "cpumask_of_node(%d): no node_to_cpumask_map!\n",
378 return (const cpumask_t *)&cpu_online_map;
380 if (node >= nr_node_ids) {
382 "cpumask_of_node(%d): node > nr_node_ids(%d)\n",
385 return &cpu_mask_none;
387 return &node_to_cpumask_map[node];
389 EXPORT_SYMBOL(cpumask_of_node);
392 * Returns a bitmask of CPUs on Node 'node'.
394 * Side note: this function creates the returned cpumask on the stack
395 * so with a high NR_CPUS count, excessive stack space is used. The
396 * node_to_cpumask_ptr function should be used whenever possible.
398 cpumask_t node_to_cpumask(int node)
400 if (node_to_cpumask_map == NULL) {
402 "node_to_cpumask(%d): no node_to_cpumask_map!\n", node);
404 return cpu_online_map;
406 if (node >= nr_node_ids) {
408 "node_to_cpumask(%d): node > nr_node_ids(%d)\n",
411 return cpu_mask_none;
413 return node_to_cpumask_map[node];
415 EXPORT_SYMBOL(node_to_cpumask);
418 * --------- end of debug versions of the numa functions ---------
421 #endif /* CONFIG_DEBUG_PER_CPU_MAPS */
423 #endif /* X86_64_NUMA */