x86: move setup_cpu_local_masks()
[safe/jmp/linux-2.6] / arch / x86 / kernel / setup_percpu.c
1 #include <linux/kernel.h>
2 #include <linux/module.h>
3 #include <linux/init.h>
4 #include <linux/bootmem.h>
5 #include <linux/percpu.h>
6 #include <linux/kexec.h>
7 #include <linux/crash_dump.h>
8 #include <linux/smp.h>
9 #include <linux/topology.h>
10 #include <asm/sections.h>
11 #include <asm/processor.h>
12 #include <asm/setup.h>
13 #include <asm/mpspec.h>
14 #include <asm/apicdef.h>
15 #include <asm/highmem.h>
16 #include <asm/proto.h>
17 #include <asm/cpumask.h>
18
19 #ifdef CONFIG_DEBUG_PER_CPU_MAPS
20 # define DBG(x...) printk(KERN_DEBUG x)
21 #else
22 # define DBG(x...)
23 #endif
24
25 /*
26  * Could be inside CONFIG_HAVE_SETUP_PER_CPU_AREA with other stuff but
27  * voyager wants cpu_number too.
28  */
29 #ifdef CONFIG_SMP
30 DEFINE_PER_CPU(int, cpu_number);
31 EXPORT_PER_CPU_SYMBOL(cpu_number);
32 #endif
33
34 #ifdef CONFIG_X86_LOCAL_APIC
35 unsigned int num_processors;
36 unsigned disabled_cpus __cpuinitdata;
37 /* Processor that is doing the boot up */
38 unsigned int boot_cpu_physical_apicid = -1U;
39 EXPORT_SYMBOL(boot_cpu_physical_apicid);
40 unsigned int max_physical_apicid;
41
42 /* Bitmask of physically existing CPUs */
43 physid_mask_t phys_cpu_present_map;
44 #endif
45
46 /*
47  * Map cpu index to physical APIC ID
48  */
49 DEFINE_EARLY_PER_CPU(u16, x86_cpu_to_apicid, BAD_APICID);
50 DEFINE_EARLY_PER_CPU(u16, x86_bios_cpu_apicid, BAD_APICID);
51 EXPORT_EARLY_PER_CPU_SYMBOL(x86_cpu_to_apicid);
52 EXPORT_EARLY_PER_CPU_SYMBOL(x86_bios_cpu_apicid);
53
54 #ifdef CONFIG_HAVE_SETUP_PER_CPU_AREA
55
56 #ifdef CONFIG_X86_64
57 unsigned long __per_cpu_offset[NR_CPUS] __read_mostly = {
58         [0] = (unsigned long)__per_cpu_load,
59 };
60 #else
61 unsigned long __per_cpu_offset[NR_CPUS] __read_mostly;
62 #endif
63 EXPORT_SYMBOL(__per_cpu_offset);
64
65 /*
66  * Great future plan:
67  * Declare PDA itself and support (irqstack,tss,pgd) as per cpu data.
68  * Always point %gs to its beginning
69  */
70 void __init setup_per_cpu_areas(void)
71 {
72         ssize_t size, old_size;
73         char *ptr;
74         int cpu;
75         unsigned long align = 1;
76
77         /* Copy section for each CPU (we discard the original) */
78         old_size = PERCPU_ENOUGH_ROOM;
79         align = max_t(unsigned long, PAGE_SIZE, align);
80         size = roundup(old_size, align);
81
82         pr_info("NR_CPUS:%d nr_cpumask_bits:%d nr_cpu_ids:%d nr_node_ids:%d\n",
83                 NR_CPUS, nr_cpumask_bits, nr_cpu_ids, nr_node_ids);
84
85         pr_info("PERCPU: Allocating %zd bytes of per cpu data\n", size);
86
87         for_each_possible_cpu(cpu) {
88 #ifndef CONFIG_NEED_MULTIPLE_NODES
89                 ptr = __alloc_bootmem(size, align,
90                                  __pa(MAX_DMA_ADDRESS));
91 #else
92                 int node = early_cpu_to_node(cpu);
93                 if (!node_online(node) || !NODE_DATA(node)) {
94                         ptr = __alloc_bootmem(size, align,
95                                          __pa(MAX_DMA_ADDRESS));
96                         pr_info("cpu %d has no node %d or node-local memory\n",
97                                 cpu, node);
98                         pr_debug("per cpu data for cpu%d at %016lx\n",
99                                  cpu, __pa(ptr));
100                 } else {
101                         ptr = __alloc_bootmem_node(NODE_DATA(node), size, align,
102                                                         __pa(MAX_DMA_ADDRESS));
103                         pr_debug("per cpu data for cpu%d on node%d at %016lx\n",
104                                 cpu, node, __pa(ptr));
105                 }
106 #endif
107
108                 memcpy(ptr, __per_cpu_load, __per_cpu_end - __per_cpu_start);
109                 per_cpu_offset(cpu) = ptr - __per_cpu_start;
110                 per_cpu(this_cpu_off, cpu) = per_cpu_offset(cpu);
111                 per_cpu(cpu_number, cpu) = cpu;
112                 /*
113                  * Copy data used in early init routines from the initial arrays to the
114                  * per cpu data areas.  These arrays then become expendable and the
115                  * *_early_ptr's are zeroed indicating that the static arrays are gone.
116                  */
117                 per_cpu(x86_cpu_to_apicid, cpu) =
118                                 early_per_cpu_map(x86_cpu_to_apicid, cpu);
119                 per_cpu(x86_bios_cpu_apicid, cpu) =
120                                 early_per_cpu_map(x86_bios_cpu_apicid, cpu);
121 #ifdef CONFIG_X86_64
122                 per_cpu(irq_stack_ptr, cpu) =
123                         per_cpu(irq_stack_union.irq_stack, cpu) + IRQ_STACK_SIZE - 64;
124 #ifdef CONFIG_NUMA
125                 per_cpu(x86_cpu_to_node_map, cpu) =
126                                 early_per_cpu_map(x86_cpu_to_node_map, cpu);
127 #endif
128                 /*
129                  * Up to this point, CPU0 has been using .data.init
130                  * area.  Reload %gs offset for CPU0.
131                  */
132                 if (cpu == 0)
133                         load_gs_base(cpu);
134 #endif
135
136                 DBG("PERCPU: cpu %4d %p\n", cpu, ptr);
137         }
138
139         /* indicate the early static arrays will soon be gone */
140         early_per_cpu_ptr(x86_cpu_to_apicid) = NULL;
141         early_per_cpu_ptr(x86_bios_cpu_apicid) = NULL;
142 #if defined(CONFIG_X86_64) && defined(CONFIG_NUMA)
143         early_per_cpu_ptr(x86_cpu_to_node_map) = NULL;
144 #endif
145
146         /* Setup node to cpumask map */
147         setup_node_to_cpumask_map();
148
149         /* Setup cpu initialized, callin, callout masks */
150         setup_cpu_local_masks();
151 }
152
153 #endif
154