1 #include <linux/kernel.h>
2 #include <linux/module.h>
3 #include <linux/init.h>
4 #include <linux/bootmem.h>
5 #include <linux/percpu.h>
6 #include <linux/kexec.h>
7 #include <linux/crash_dump.h>
9 #include <linux/topology.h>
10 #include <asm/sections.h>
11 #include <asm/processor.h>
12 #include <asm/setup.h>
13 #include <asm/mpspec.h>
14 #include <asm/apicdef.h>
15 #include <asm/highmem.h>
16 #include <asm/proto.h>
17 #include <asm/cpumask.h>
19 #ifdef CONFIG_DEBUG_PER_CPU_MAPS
20 # define DBG(x...) printk(KERN_DEBUG x)
25 #ifdef CONFIG_X86_LOCAL_APIC
26 unsigned int num_processors;
27 unsigned disabled_cpus __cpuinitdata;
28 /* Processor that is doing the boot up */
29 unsigned int boot_cpu_physical_apicid = -1U;
30 EXPORT_SYMBOL(boot_cpu_physical_apicid);
31 unsigned int max_physical_apicid;
33 /* Bitmask of physically existing CPUs */
34 physid_mask_t phys_cpu_present_map;
38 * Map cpu index to physical APIC ID
40 DEFINE_EARLY_PER_CPU(u16, x86_cpu_to_apicid, BAD_APICID);
41 DEFINE_EARLY_PER_CPU(u16, x86_bios_cpu_apicid, BAD_APICID);
42 EXPORT_EARLY_PER_CPU_SYMBOL(x86_cpu_to_apicid);
43 EXPORT_EARLY_PER_CPU_SYMBOL(x86_bios_cpu_apicid);
45 #if defined(CONFIG_NUMA) && defined(CONFIG_X86_64)
46 #define X86_64_NUMA 1 /* (used later) */
49 * Map cpu index to node index
51 DEFINE_EARLY_PER_CPU(int, x86_cpu_to_node_map, NUMA_NO_NODE);
52 EXPORT_EARLY_PER_CPU_SYMBOL(x86_cpu_to_node_map);
55 * Which logical CPUs are on which nodes
57 cpumask_t *node_to_cpumask_map;
58 EXPORT_SYMBOL(node_to_cpumask_map);
61 * Setup node_to_cpumask_map
63 static void __init setup_node_to_cpumask_map(void);
66 static inline void setup_node_to_cpumask_map(void) { }
70 * Define load_pda_offset() and per-cpu __pda for x86_64.
71 * load_pda_offset() is responsible for loading the offset of pda into
74 * On SMP, pda offset also duals as percpu base address and thus it
75 * should be at the start of per-cpu area. To achieve this, it's
76 * preallocated in vmlinux_64.lds.S directly instead of using
80 void __cpuinit load_pda_offset(int cpu)
82 /* Memory clobbers used to order pda/percpu accesses */
84 wrmsrl(MSR_GS_BASE, cpu_pda(cpu));
88 DEFINE_PER_CPU(struct x8664_pda, __pda);
89 EXPORT_PER_CPU_SYMBOL(__pda);
92 #endif /* CONFIG_SMP && CONFIG_X86_64 */
96 /* correctly size the local cpu masks */
97 static void setup_cpu_local_masks(void)
99 alloc_bootmem_cpumask_var(&cpu_initialized_mask);
100 alloc_bootmem_cpumask_var(&cpu_callin_mask);
101 alloc_bootmem_cpumask_var(&cpu_callout_mask);
102 alloc_bootmem_cpumask_var(&cpu_sibling_setup_mask);
105 #else /* CONFIG_X86_32 */
107 static inline void setup_cpu_local_masks(void)
111 #endif /* CONFIG_X86_32 */
113 #ifdef CONFIG_HAVE_SETUP_PER_CPU_AREA
115 * Copy data used in early init routines from the initial arrays to the
116 * per cpu data areas. These arrays then become expendable and the
117 * *_early_ptr's are zeroed indicating that the static arrays are gone.
119 static void __init setup_per_cpu_maps(void)
123 for_each_possible_cpu(cpu) {
124 per_cpu(x86_cpu_to_apicid, cpu) =
125 early_per_cpu_map(x86_cpu_to_apicid, cpu);
126 per_cpu(x86_bios_cpu_apicid, cpu) =
127 early_per_cpu_map(x86_bios_cpu_apicid, cpu);
129 per_cpu(x86_cpu_to_node_map, cpu) =
130 early_per_cpu_map(x86_cpu_to_node_map, cpu);
134 /* indicate the early static arrays will soon be gone */
135 early_per_cpu_ptr(x86_cpu_to_apicid) = NULL;
136 early_per_cpu_ptr(x86_bios_cpu_apicid) = NULL;
138 early_per_cpu_ptr(x86_cpu_to_node_map) = NULL;
143 unsigned long __per_cpu_offset[NR_CPUS] __read_mostly = {
144 [0] = (unsigned long)__per_cpu_load,
147 unsigned long __per_cpu_offset[NR_CPUS] __read_mostly;
149 EXPORT_SYMBOL(__per_cpu_offset);
153 * Declare PDA itself and support (irqstack,tss,pgd) as per cpu data.
154 * Always point %gs to its beginning
156 void __init setup_per_cpu_areas(void)
158 ssize_t size, old_size;
161 unsigned long align = 1;
163 /* Copy section for each CPU (we discard the original) */
164 old_size = PERCPU_ENOUGH_ROOM;
165 align = max_t(unsigned long, PAGE_SIZE, align);
166 size = roundup(old_size, align);
168 pr_info("NR_CPUS:%d nr_cpumask_bits:%d nr_cpu_ids:%d nr_node_ids:%d\n",
169 NR_CPUS, nr_cpumask_bits, nr_cpu_ids, nr_node_ids);
171 pr_info("PERCPU: Allocating %zd bytes of per cpu data\n", size);
173 for_each_possible_cpu(cpu) {
174 #ifndef CONFIG_NEED_MULTIPLE_NODES
175 ptr = __alloc_bootmem(size, align,
176 __pa(MAX_DMA_ADDRESS));
178 int node = early_cpu_to_node(cpu);
179 if (!node_online(node) || !NODE_DATA(node)) {
180 ptr = __alloc_bootmem(size, align,
181 __pa(MAX_DMA_ADDRESS));
182 pr_info("cpu %d has no node %d or node-local memory\n",
184 pr_debug("per cpu data for cpu%d at %016lx\n",
187 ptr = __alloc_bootmem_node(NODE_DATA(node), size, align,
188 __pa(MAX_DMA_ADDRESS));
189 pr_debug("per cpu data for cpu%d on node%d at %016lx\n",
190 cpu, node, __pa(ptr));
194 memcpy(ptr, __per_cpu_load, __per_cpu_end - __per_cpu_start);
195 per_cpu_offset(cpu) = ptr - __per_cpu_start;
198 * CPU0 modified pda in the init data area, reload pda
199 * offset for CPU0 and clear the area for others.
204 memset(cpu_pda(cpu), 0, sizeof(*cpu_pda(cpu)));
206 per_cpu(this_cpu_off, cpu) = per_cpu_offset(cpu);
208 DBG("PERCPU: cpu %4d %p\n", cpu, ptr);
211 /* Setup percpu data maps */
212 setup_per_cpu_maps();
214 /* Setup node to cpumask map */
215 setup_node_to_cpumask_map();
217 /* Setup cpu initialized, callin, callout masks */
218 setup_cpu_local_masks();
226 * Allocate node_to_cpumask_map based on number of available nodes
227 * Requires node_possible_map to be valid.
229 * Note: node_to_cpumask() is not valid until after this is done.
230 * (Use CONFIG_DEBUG_PER_CPU_MAPS to check this.)
232 static void __init setup_node_to_cpumask_map(void)
234 unsigned int node, num = 0;
237 /* setup nr_node_ids if not done yet */
238 if (nr_node_ids == MAX_NUMNODES) {
239 for_each_node_mask(node, node_possible_map)
241 nr_node_ids = num + 1;
244 /* allocate the map */
245 map = alloc_bootmem_low(nr_node_ids * sizeof(cpumask_t));
246 DBG("node_to_cpumask_map at %p for %d nodes\n", map, nr_node_ids);
248 pr_debug("Node to cpumask map at %p for %d nodes\n",
251 /* node_to_cpumask() will now work */
252 node_to_cpumask_map = map;
255 void __cpuinit numa_set_node(int cpu, int node)
257 int *cpu_to_node_map = early_per_cpu_ptr(x86_cpu_to_node_map);
259 /* early setting, no percpu area yet */
260 if (cpu_to_node_map) {
261 cpu_to_node_map[cpu] = node;
265 #ifdef CONFIG_DEBUG_PER_CPU_MAPS
266 if (cpu >= nr_cpu_ids || !per_cpu_offset(cpu)) {
267 printk(KERN_ERR "numa_set_node: invalid cpu# (%d)\n", cpu);
272 per_cpu(x86_cpu_to_node_map, cpu) = node;
274 if (node != NUMA_NO_NODE)
275 cpu_pda(cpu)->nodenumber = node;
278 void __cpuinit numa_clear_node(int cpu)
280 numa_set_node(cpu, NUMA_NO_NODE);
283 #ifndef CONFIG_DEBUG_PER_CPU_MAPS
285 void __cpuinit numa_add_cpu(int cpu)
287 cpu_set(cpu, node_to_cpumask_map[early_cpu_to_node(cpu)]);
290 void __cpuinit numa_remove_cpu(int cpu)
292 cpu_clear(cpu, node_to_cpumask_map[early_cpu_to_node(cpu)]);
295 #else /* CONFIG_DEBUG_PER_CPU_MAPS */
298 * --------- debug versions of the numa functions ---------
300 static void __cpuinit numa_set_cpumask(int cpu, int enable)
302 int node = early_cpu_to_node(cpu);
306 if (node_to_cpumask_map == NULL) {
307 printk(KERN_ERR "node_to_cpumask_map NULL\n");
312 mask = &node_to_cpumask_map[node];
316 cpu_clear(cpu, *mask);
318 cpulist_scnprintf(buf, sizeof(buf), mask);
319 printk(KERN_DEBUG "%s cpu %d node %d: mask now %s\n",
320 enable ? "numa_add_cpu" : "numa_remove_cpu", cpu, node, buf);
323 void __cpuinit numa_add_cpu(int cpu)
325 numa_set_cpumask(cpu, 1);
328 void __cpuinit numa_remove_cpu(int cpu)
330 numa_set_cpumask(cpu, 0);
333 int cpu_to_node(int cpu)
335 if (early_per_cpu_ptr(x86_cpu_to_node_map)) {
337 "cpu_to_node(%d): usage too early!\n", cpu);
339 return early_per_cpu_ptr(x86_cpu_to_node_map)[cpu];
341 return per_cpu(x86_cpu_to_node_map, cpu);
343 EXPORT_SYMBOL(cpu_to_node);
346 * Same function as cpu_to_node() but used if called before the
347 * per_cpu areas are setup.
349 int early_cpu_to_node(int cpu)
351 if (early_per_cpu_ptr(x86_cpu_to_node_map))
352 return early_per_cpu_ptr(x86_cpu_to_node_map)[cpu];
354 if (!per_cpu_offset(cpu)) {
356 "early_cpu_to_node(%d): no per_cpu area!\n", cpu);
360 return per_cpu(x86_cpu_to_node_map, cpu);
365 static const cpumask_t cpu_mask_none;
368 * Returns a pointer to the bitmask of CPUs on Node 'node'.
370 const cpumask_t *cpumask_of_node(int node)
372 if (node_to_cpumask_map == NULL) {
374 "cpumask_of_node(%d): no node_to_cpumask_map!\n",
377 return (const cpumask_t *)&cpu_online_map;
379 if (node >= nr_node_ids) {
381 "cpumask_of_node(%d): node > nr_node_ids(%d)\n",
384 return &cpu_mask_none;
386 return &node_to_cpumask_map[node];
388 EXPORT_SYMBOL(cpumask_of_node);
391 * Returns a bitmask of CPUs on Node 'node'.
393 * Side note: this function creates the returned cpumask on the stack
394 * so with a high NR_CPUS count, excessive stack space is used. The
395 * node_to_cpumask_ptr function should be used whenever possible.
397 cpumask_t node_to_cpumask(int node)
399 if (node_to_cpumask_map == NULL) {
401 "node_to_cpumask(%d): no node_to_cpumask_map!\n", node);
403 return cpu_online_map;
405 if (node >= nr_node_ids) {
407 "node_to_cpumask(%d): node > nr_node_ids(%d)\n",
410 return cpu_mask_none;
412 return node_to_cpumask_map[node];
414 EXPORT_SYMBOL(node_to_cpumask);
417 * --------- end of debug versions of the numa functions ---------
420 #endif /* CONFIG_DEBUG_PER_CPU_MAPS */
422 #endif /* X86_64_NUMA */