1 #include <linux/kernel.h>
2 #include <linux/module.h>
3 #include <linux/init.h>
4 #include <linux/bootmem.h>
5 #include <linux/percpu.h>
6 #include <linux/kexec.h>
7 #include <linux/crash_dump.h>
9 #include <linux/topology.h>
10 #include <linux/pfn.h>
11 #include <asm/sections.h>
12 #include <asm/processor.h>
13 #include <asm/setup.h>
14 #include <asm/mpspec.h>
15 #include <asm/apicdef.h>
16 #include <asm/highmem.h>
17 #include <asm/proto.h>
18 #include <asm/cpumask.h>
20 #include <asm/stackprotector.h>
22 #ifdef CONFIG_DEBUG_PER_CPU_MAPS
23 # define DBG(x...) printk(KERN_DEBUG x)
28 DEFINE_PER_CPU(int, cpu_number);
29 EXPORT_PER_CPU_SYMBOL(cpu_number);
32 #define BOOT_PERCPU_OFFSET ((unsigned long)__per_cpu_load)
34 #define BOOT_PERCPU_OFFSET 0
37 DEFINE_PER_CPU(unsigned long, this_cpu_off) = BOOT_PERCPU_OFFSET;
38 EXPORT_PER_CPU_SYMBOL(this_cpu_off);
40 unsigned long __per_cpu_offset[NR_CPUS] __read_mostly = {
41 [0 ... NR_CPUS-1] = BOOT_PERCPU_OFFSET,
43 EXPORT_SYMBOL(__per_cpu_offset);
46 * pcpu_need_numa - determine percpu allocation needs to consider NUMA
48 * If NUMA is not configured or there is only one NUMA node available,
49 * there is no reason to consider NUMA. This function determines
50 * whether percpu allocation should consider NUMA or not.
53 * true if NUMA should be considered; otherwise, false.
55 static bool __init pcpu_need_numa(void)
57 #ifdef CONFIG_NEED_MULTIPLE_NODES
58 pg_data_t *last = NULL;
61 for_each_possible_cpu(cpu) {
62 int node = early_cpu_to_node(cpu);
64 if (node_online(node) && NODE_DATA(node) &&
65 last && last != NODE_DATA(node))
68 last = NODE_DATA(node);
75 * pcpu_alloc_bootmem - NUMA friendly alloc_bootmem wrapper for percpu
76 * @cpu: cpu to allocate for
77 * @size: size allocation in bytes
80 * Allocate @size bytes aligned at @align for cpu @cpu. This wrapper
81 * does the right thing for NUMA regardless of the current
85 * Pointer to the allocated area on success, NULL on failure.
87 static void * __init pcpu_alloc_bootmem(unsigned int cpu, unsigned long size,
90 const unsigned long goal = __pa(MAX_DMA_ADDRESS);
91 #ifdef CONFIG_NEED_MULTIPLE_NODES
92 int node = early_cpu_to_node(cpu);
95 if (!node_online(node) || !NODE_DATA(node)) {
96 ptr = __alloc_bootmem_nopanic(size, align, goal);
97 pr_info("cpu %d has no node %d or node-local memory\n",
99 pr_debug("per cpu data for cpu%d %lu bytes at %016lx\n",
100 cpu, size, __pa(ptr));
102 ptr = __alloc_bootmem_node_nopanic(NODE_DATA(node),
104 pr_debug("per cpu data for cpu%d %lu bytes on node%d at "
105 "%016lx\n", cpu, size, node, __pa(ptr));
109 return __alloc_bootmem_nopanic(size, align, goal);
114 * Embedding allocator
116 * The first chunk is sized to just contain the static area plus
117 * PERCPU_DYNAMIC_RESERVE and allocated as a contiguous area using
118 * bootmem allocator and used as-is without being mapped into vmalloc
119 * area. This enables the first chunk to piggy back on the linear
120 * physical PMD mapping and doesn't add any additional pressure to
123 static void *pcpue_ptr __initdata;
124 static size_t pcpue_unit_size __initdata;
126 static struct page * __init pcpue_get_page(unsigned int cpu, int pageno)
128 return virt_to_page(pcpue_ptr + cpu * pcpue_unit_size
129 + ((size_t)pageno << PAGE_SHIFT));
132 static ssize_t __init setup_pcpu_embed(size_t static_size)
137 * If large page isn't supported, there's no benefit in doing
138 * this. Also, embedding allocation doesn't play well with
141 if (!cpu_has_pse || pcpu_need_numa())
144 /* allocate and copy */
145 pcpue_unit_size = PFN_ALIGN(static_size + PERCPU_DYNAMIC_RESERVE);
146 pcpue_unit_size = max(pcpue_unit_size, PCPU_MIN_UNIT_SIZE);
147 pcpue_ptr = pcpu_alloc_bootmem(0, num_possible_cpus() * pcpue_unit_size,
152 for_each_possible_cpu(cpu)
153 memcpy(pcpue_ptr + cpu * pcpue_unit_size, __per_cpu_load,
156 /* we're ready, commit */
157 pr_info("PERCPU: Embedded %zu pages at %p, static data %zu bytes\n",
158 pcpue_unit_size >> PAGE_SHIFT, pcpue_ptr, static_size);
160 return pcpu_setup_first_chunk(pcpue_get_page, static_size,
162 pcpue_unit_size - static_size, pcpue_ptr,
169 * This is the basic allocator. Static percpu area is allocated
170 * page-by-page and most of initialization is done by the generic
173 static struct page **pcpu4k_pages __initdata;
174 static int pcpu4k_nr_static_pages __initdata;
176 static struct page * __init pcpu4k_get_page(unsigned int cpu, int pageno)
178 if (pageno < pcpu4k_nr_static_pages)
179 return pcpu4k_pages[cpu * pcpu4k_nr_static_pages + pageno];
183 static void __init pcpu4k_populate_pte(unsigned long addr)
185 populate_extra_pte(addr);
188 static ssize_t __init setup_pcpu_4k(size_t static_size)
195 pcpu4k_nr_static_pages = PFN_UP(static_size);
197 /* unaligned allocations can't be freed, round up to page size */
198 pages_size = PFN_ALIGN(pcpu4k_nr_static_pages * num_possible_cpus()
199 * sizeof(pcpu4k_pages[0]));
200 pcpu4k_pages = alloc_bootmem(pages_size);
202 /* allocate and copy */
204 for_each_possible_cpu(cpu)
205 for (i = 0; i < pcpu4k_nr_static_pages; i++) {
208 ptr = pcpu_alloc_bootmem(cpu, PAGE_SIZE, PAGE_SIZE);
212 memcpy(ptr, __per_cpu_load + i * PAGE_SIZE, PAGE_SIZE);
213 pcpu4k_pages[j++] = virt_to_page(ptr);
216 /* we're ready, commit */
217 pr_info("PERCPU: Allocated %d 4k pages, static data %zu bytes\n",
218 pcpu4k_nr_static_pages, static_size);
220 ret = pcpu_setup_first_chunk(pcpu4k_get_page, static_size, 0, 0, NULL,
221 pcpu4k_populate_pte);
226 free_bootmem(__pa(page_address(pcpu4k_pages[j])), PAGE_SIZE);
229 free_bootmem(__pa(pcpu4k_pages), pages_size);
233 static inline void setup_percpu_segment(int cpu)
236 struct desc_struct gdt;
238 pack_descriptor(&gdt, per_cpu_offset(cpu), 0xFFFFF,
239 0x2 | DESCTYPE_S, 0x8);
241 write_gdt_entry(get_cpu_gdt_table(cpu),
242 GDT_ENTRY_PERCPU, &gdt, DESCTYPE_S);
248 * Declare PDA itself and support (irqstack,tss,pgd) as per cpu data.
249 * Always point %gs to its beginning
251 void __init setup_per_cpu_areas(void)
253 size_t static_size = __per_cpu_end - __per_cpu_start;
256 size_t pcpu_unit_size;
259 pr_info("NR_CPUS:%d nr_cpumask_bits:%d nr_cpu_ids:%d nr_node_ids:%d\n",
260 NR_CPUS, nr_cpumask_bits, nr_cpu_ids, nr_node_ids);
262 /* allocate percpu area */
263 ret = setup_pcpu_embed(static_size);
265 ret = setup_pcpu_4k(static_size);
267 panic("cannot allocate static percpu area (%zu bytes, err=%zd)",
270 pcpu_unit_size = ret;
272 /* alrighty, percpu areas up and running */
273 delta = (unsigned long)pcpu_base_addr - (unsigned long)__per_cpu_start;
274 for_each_possible_cpu(cpu) {
275 per_cpu_offset(cpu) = delta + cpu * pcpu_unit_size;
276 per_cpu(this_cpu_off, cpu) = per_cpu_offset(cpu);
277 per_cpu(cpu_number, cpu) = cpu;
278 setup_percpu_segment(cpu);
279 setup_stack_canary_segment(cpu);
281 * Copy data used in early init routines from the
282 * initial arrays to the per cpu data areas. These
283 * arrays then become expendable and the *_early_ptr's
284 * are zeroed indicating that the static arrays are
287 #ifdef CONFIG_X86_LOCAL_APIC
288 per_cpu(x86_cpu_to_apicid, cpu) =
289 early_per_cpu_map(x86_cpu_to_apicid, cpu);
290 per_cpu(x86_bios_cpu_apicid, cpu) =
291 early_per_cpu_map(x86_bios_cpu_apicid, cpu);
294 per_cpu(irq_stack_ptr, cpu) =
295 per_cpu(irq_stack_union.irq_stack, cpu) +
298 per_cpu(x86_cpu_to_node_map, cpu) =
299 early_per_cpu_map(x86_cpu_to_node_map, cpu);
303 * Up to this point, the boot CPU has been using .data.init
304 * area. Reload any changed state for the boot CPU.
306 if (cpu == boot_cpu_id)
307 switch_to_new_gdt(cpu);
309 DBG("PERCPU: cpu %4d %p\n", cpu, ptr);
312 /* indicate the early static arrays will soon be gone */
313 #ifdef CONFIG_X86_LOCAL_APIC
314 early_per_cpu_ptr(x86_cpu_to_apicid) = NULL;
315 early_per_cpu_ptr(x86_bios_cpu_apicid) = NULL;
317 #if defined(CONFIG_X86_64) && defined(CONFIG_NUMA)
318 early_per_cpu_ptr(x86_cpu_to_node_map) = NULL;
321 /* Setup node to cpumask map */
322 setup_node_to_cpumask_map();
324 /* Setup cpu initialized, callin, callout masks */
325 setup_cpu_local_masks();