2 * Generic VM initialization for x86-64 NUMA setups.
3 * Copyright 2002,2003 Andi Kleen, SuSE Labs.
5 #include <linux/kernel.h>
7 #include <linux/string.h>
8 #include <linux/init.h>
9 #include <linux/bootmem.h>
10 #include <linux/mmzone.h>
11 #include <linux/ctype.h>
12 #include <linux/module.h>
13 #include <linux/nodemask.h>
14 #include <linux/sched.h>
17 #include <asm/proto.h>
27 struct pglist_data *node_data[MAX_NUMNODES] __read_mostly;
28 EXPORT_SYMBOL(node_data);
30 bootmem_data_t plat_node_bdata[MAX_NUMNODES];
32 struct memnode memnode;
34 u16 x86_cpu_to_node_map_init[NR_CPUS] __initdata = {
35 [0 ... NR_CPUS-1] = NUMA_NO_NODE
37 void *x86_cpu_to_node_map_early_ptr;
38 EXPORT_SYMBOL(x86_cpu_to_node_map_init);
39 EXPORT_SYMBOL(x86_cpu_to_node_map_early_ptr);
40 DEFINE_PER_CPU(u16, x86_cpu_to_node_map) = NUMA_NO_NODE;
41 EXPORT_PER_CPU_SYMBOL(x86_cpu_to_node_map);
43 u16 apicid_to_node[MAX_LOCAL_APIC] __cpuinitdata = {
44 [0 ... MAX_LOCAL_APIC-1] = NUMA_NO_NODE
47 cpumask_t node_to_cpumask_map[MAX_NUMNODES] __read_mostly;
48 EXPORT_SYMBOL(node_to_cpumask_map);
50 int numa_off __initdata;
51 unsigned long __initdata nodemap_addr;
52 unsigned long __initdata nodemap_size;
55 * Given a shift value, try to populate memnodemap[]
58 * 0 if memnodmap[] too small (of shift too small)
59 * -1 if node overlap or lost ram (shift too big)
61 static int __init populate_memnodemap(const struct bootnode *nodes,
62 int numnodes, int shift)
64 unsigned long addr, end;
67 memset(memnodemap, 0xff, memnodemapsize);
68 for (i = 0; i < numnodes; i++) {
69 addr = nodes[i].start;
73 if ((end >> shift) >= memnodemapsize)
76 if (memnodemap[addr >> shift] != 0xff)
78 memnodemap[addr >> shift] = i;
79 addr += (1UL << shift);
86 static int __init allocate_cachealigned_memnodemap(void)
88 unsigned long pad, pad_addr;
90 memnodemap = memnode.embedded_map;
91 if (memnodemapsize <= 48)
94 pad = L1_CACHE_BYTES - 1;
96 nodemap_size = pad + memnodemapsize;
97 nodemap_addr = find_e820_area(pad_addr, end_pfn<<PAGE_SHIFT,
99 if (nodemap_addr == -1UL) {
101 "NUMA: Unable to allocate Memory to Node hash map\n");
102 nodemap_addr = nodemap_size = 0;
105 pad_addr = (nodemap_addr + pad) & ~pad;
106 memnodemap = phys_to_virt(pad_addr);
108 printk(KERN_DEBUG "NUMA: Allocated memnodemap from %lx - %lx\n",
109 nodemap_addr, nodemap_addr + nodemap_size);
114 * The LSB of all start and end addresses in the node map is the value of the
115 * maximum possible shift.
117 static int __init extract_lsb_from_nodes(const struct bootnode *nodes,
120 int i, nodes_used = 0;
121 unsigned long start, end;
122 unsigned long bitfield = 0, memtop = 0;
124 for (i = 0; i < numnodes; i++) {
125 start = nodes[i].start;
137 i = find_first_bit(&bitfield, sizeof(unsigned long)*8);
138 memnodemapsize = (memtop >> i)+1;
142 int __init compute_hash_shift(struct bootnode *nodes, int numnodes)
146 shift = extract_lsb_from_nodes(nodes, numnodes);
147 if (allocate_cachealigned_memnodemap())
149 printk(KERN_DEBUG "NUMA: Using %d for the hash shift.\n",
152 if (populate_memnodemap(nodes, numnodes, shift) != 1) {
153 printk(KERN_INFO "Your memory is not aligned you need to "
154 "rebuild your kernel with a bigger NODEMAPSIZE "
155 "shift=%d\n", shift);
161 int early_pfn_to_nid(unsigned long pfn)
163 return phys_to_nid(pfn << PAGE_SHIFT);
166 static void * __init early_node_mem(int nodeid, unsigned long start,
167 unsigned long end, unsigned long size)
169 unsigned long mem = find_e820_area(start, end, size);
174 ptr = __alloc_bootmem_nopanic(size,
175 SMP_CACHE_BYTES, __pa(MAX_DMA_ADDRESS));
177 printk(KERN_ERR "Cannot find %lu bytes in node %d\n",
184 /* Initialize bootmem allocator for a node */
185 void __init setup_node_bootmem(int nodeid, unsigned long start,
188 unsigned long start_pfn, end_pfn, bootmap_pages, bootmap_size;
189 unsigned long bootmap_start, nodedata_phys;
191 const int pgdat_size = round_up(sizeof(pg_data_t), PAGE_SIZE);
193 start = round_up(start, ZONE_ALIGN);
195 printk(KERN_INFO "Bootmem setup node %d %016lx-%016lx\n", nodeid,
198 start_pfn = start >> PAGE_SHIFT;
199 end_pfn = end >> PAGE_SHIFT;
201 node_data[nodeid] = early_node_mem(nodeid, start, end, pgdat_size);
202 if (node_data[nodeid] == NULL)
204 nodedata_phys = __pa(node_data[nodeid]);
206 memset(NODE_DATA(nodeid), 0, sizeof(pg_data_t));
207 NODE_DATA(nodeid)->bdata = &plat_node_bdata[nodeid];
208 NODE_DATA(nodeid)->node_start_pfn = start_pfn;
209 NODE_DATA(nodeid)->node_spanned_pages = end_pfn - start_pfn;
211 /* Find a place for the bootmem map */
212 bootmap_pages = bootmem_bootmap_pages(end_pfn - start_pfn);
213 bootmap_start = round_up(nodedata_phys + pgdat_size, PAGE_SIZE);
214 bootmap = early_node_mem(nodeid, bootmap_start, end,
215 bootmap_pages<<PAGE_SHIFT);
216 if (bootmap == NULL) {
217 if (nodedata_phys < start || nodedata_phys >= end)
218 free_bootmem((unsigned long)node_data[nodeid],
220 node_data[nodeid] = NULL;
223 bootmap_start = __pa(bootmap);
224 Dprintk("bootmap start %lu pages %lu\n", bootmap_start, bootmap_pages);
226 bootmap_size = init_bootmem_node(NODE_DATA(nodeid),
227 bootmap_start >> PAGE_SHIFT,
230 free_bootmem_with_active_regions(nodeid, end);
232 reserve_bootmem_node(NODE_DATA(nodeid), nodedata_phys, pgdat_size);
233 reserve_bootmem_node(NODE_DATA(nodeid), bootmap_start,
234 bootmap_pages<<PAGE_SHIFT);
235 #ifdef CONFIG_ACPI_NUMA
236 srat_reserve_add_area(nodeid);
238 node_set_online(nodeid);
241 #ifdef CONFIG_FLAT_NODE_MEM_MAP
242 /* Initialize final allocator for a zone */
243 static void __init flat_setup_node_zones(int nodeid)
245 unsigned long start_pfn, end_pfn, memmapsize, limit;
247 start_pfn = node_start_pfn(nodeid);
248 end_pfn = node_end_pfn(nodeid);
250 Dprintk(KERN_INFO "Setting up memmap for node %d %lx-%lx\n",
251 nodeid, start_pfn, end_pfn);
254 * Try to allocate mem_map at end to not fill up precious <4GB
257 memmapsize = sizeof(struct page) * (end_pfn-start_pfn);
258 limit = end_pfn << PAGE_SHIFT;
260 NODE_DATA(nodeid)->node_mem_map =
261 __alloc_bootmem_core(NODE_DATA(nodeid)->bdata,
262 memmapsize, SMP_CACHE_BYTES,
263 round_down(limit - memmapsize, PAGE_SIZE),
267 #define flat_setup_node_zones(i) do {} while (0)
271 * There are unfortunately some poorly designed mainboards around that
272 * only connect memory to a single CPU. This breaks the 1:1 cpu->node
273 * mapping. To avoid this fill in the mapping for all possible CPUs,
274 * as the number of CPUs is not known yet. We round robin the existing
277 void __init numa_init_array(void)
281 rr = first_node(node_online_map);
282 for (i = 0; i < NR_CPUS; i++) {
283 if (cpu_to_node(i) != NUMA_NO_NODE)
285 numa_set_node(i, rr);
286 rr = next_node(rr, node_online_map);
287 if (rr == MAX_NUMNODES)
288 rr = first_node(node_online_map);
292 #ifdef CONFIG_NUMA_EMU
294 char *cmdline __initdata;
297 * Setups up nid to range from addr to addr + size. If the end
298 * boundary is greater than max_addr, then max_addr is used instead.
299 * The return value is 0 if there is additional memory left for
300 * allocation past addr and -1 otherwise. addr is adjusted to be at
301 * the end of the node.
303 static int __init setup_node_range(int nid, struct bootnode *nodes, u64 *addr,
304 u64 size, u64 max_addr)
308 nodes[nid].start = *addr;
310 if (*addr >= max_addr) {
314 nodes[nid].end = *addr;
315 node_set(nid, node_possible_map);
316 printk(KERN_INFO "Faking node %d at %016Lx-%016Lx (%LuMB)\n", nid,
317 nodes[nid].start, nodes[nid].end,
318 (nodes[nid].end - nodes[nid].start) >> 20);
323 * Splits num_nodes nodes up equally starting at node_start. The return value
324 * is the number of nodes split up and addr is adjusted to be at the end of the
325 * last node allocated.
327 static int __init split_nodes_equally(struct bootnode *nodes, u64 *addr,
328 u64 max_addr, int node_start,
337 if (num_nodes > MAX_NUMNODES)
338 num_nodes = MAX_NUMNODES;
339 size = (max_addr - *addr - e820_hole_size(*addr, max_addr)) /
342 * Calculate the number of big nodes that can be allocated as a result
343 * of consolidating the leftovers.
345 big = ((size & ~FAKE_NODE_MIN_HASH_MASK) * num_nodes) /
348 /* Round down to nearest FAKE_NODE_MIN_SIZE. */
349 size &= FAKE_NODE_MIN_HASH_MASK;
351 printk(KERN_ERR "Not enough memory for each node. "
352 "NUMA emulation disabled.\n");
356 for (i = node_start; i < num_nodes + node_start; i++) {
357 u64 end = *addr + size;
360 end += FAKE_NODE_MIN_SIZE;
362 * The final node can have the remaining system RAM. Other
363 * nodes receive roughly the same amount of available pages.
365 if (i == num_nodes + node_start - 1)
368 while (end - *addr - e820_hole_size(*addr, end) <
370 end += FAKE_NODE_MIN_SIZE;
371 if (end > max_addr) {
376 if (setup_node_range(i, nodes, addr, end - *addr, max_addr) < 0)
379 return i - node_start + 1;
383 * Splits the remaining system RAM into chunks of size. The remaining memory is
384 * always assigned to a final node and can be asymmetric. Returns the number of
387 static int __init split_nodes_by_size(struct bootnode *nodes, u64 *addr,
388 u64 max_addr, int node_start, u64 size)
391 size = (size << 20) & FAKE_NODE_MIN_HASH_MASK;
392 while (!setup_node_range(i++, nodes, addr, size, max_addr))
394 return i - node_start;
398 * Sets up the system RAM area from start_pfn to end_pfn according to the
399 * numa=fake command-line option.
401 static int __init numa_emulation(unsigned long start_pfn, unsigned long end_pfn)
403 struct bootnode nodes[MAX_NUMNODES];
404 u64 size, addr = start_pfn << PAGE_SHIFT;
405 u64 max_addr = end_pfn << PAGE_SHIFT;
406 int num_nodes = 0, num = 0, coeff_flag, coeff = -1, i;
408 memset(&nodes, 0, sizeof(nodes));
410 * If the numa=fake command-line is just a single number N, split the
411 * system RAM into N fake nodes.
413 if (!strchr(cmdline, '*') && !strchr(cmdline, ',')) {
414 long n = simple_strtol(cmdline, NULL, 0);
416 num_nodes = split_nodes_equally(nodes, &addr, max_addr, 0, n);
422 /* Parse the command line. */
423 for (coeff_flag = 0; ; cmdline++) {
424 if (*cmdline && isdigit(*cmdline)) {
425 num = num * 10 + *cmdline - '0';
428 if (*cmdline == '*') {
433 if (!*cmdline || *cmdline == ',') {
437 * Round down to the nearest FAKE_NODE_MIN_SIZE.
438 * Command-line coefficients are in megabytes.
440 size = ((u64)num << 20) & FAKE_NODE_MIN_HASH_MASK;
442 for (i = 0; i < coeff; i++, num_nodes++)
443 if (setup_node_range(num_nodes, nodes,
444 &addr, size, max_addr) < 0)
456 /* Fill remainder of system RAM, if appropriate. */
457 if (addr < max_addr) {
458 if (coeff_flag && coeff < 0) {
459 /* Split remaining nodes into num-sized chunks */
460 num_nodes += split_nodes_by_size(nodes, &addr, max_addr,
464 switch (*(cmdline - 1)) {
466 /* Split remaining nodes into coeff chunks */
469 num_nodes += split_nodes_equally(nodes, &addr, max_addr,
473 /* Do not allocate remaining system RAM */
476 /* Give one final node */
477 setup_node_range(num_nodes, nodes, &addr,
478 max_addr - addr, max_addr);
483 memnode_shift = compute_hash_shift(nodes, num_nodes);
484 if (memnode_shift < 0) {
486 printk(KERN_ERR "No NUMA hash function found. NUMA emulation "
492 * We need to vacate all active ranges that may have been registered by
493 * SRAT and set acpi_numa to -1 so that srat_disabled() always returns
494 * true. NUMA emulation has succeeded so we will not scan ACPI nodes.
496 remove_all_active_ranges();
497 #ifdef CONFIG_ACPI_NUMA
500 for_each_node_mask(i, node_possible_map) {
501 e820_register_active_regions(i, nodes[i].start >> PAGE_SHIFT,
502 nodes[i].end >> PAGE_SHIFT);
503 setup_node_bootmem(i, nodes[i].start, nodes[i].end);
505 acpi_fake_nodes(nodes, num_nodes);
509 #endif /* CONFIG_NUMA_EMU */
511 void __init numa_initmem_init(unsigned long start_pfn, unsigned long end_pfn)
515 nodes_clear(node_possible_map);
517 #ifdef CONFIG_NUMA_EMU
518 if (cmdline && !numa_emulation(start_pfn, end_pfn))
520 nodes_clear(node_possible_map);
523 #ifdef CONFIG_ACPI_NUMA
524 if (!numa_off && !acpi_scan_nodes(start_pfn << PAGE_SHIFT,
525 end_pfn << PAGE_SHIFT))
527 nodes_clear(node_possible_map);
530 #ifdef CONFIG_K8_NUMA
531 if (!numa_off && !k8_scan_nodes(start_pfn<<PAGE_SHIFT,
532 end_pfn<<PAGE_SHIFT))
534 nodes_clear(node_possible_map);
536 printk(KERN_INFO "%s\n",
537 numa_off ? "NUMA turned off" : "No NUMA configuration found");
539 printk(KERN_INFO "Faking a node at %016lx-%016lx\n",
540 start_pfn << PAGE_SHIFT,
541 end_pfn << PAGE_SHIFT);
542 /* setup dummy node covering all memory */
544 memnodemap = memnode.embedded_map;
546 nodes_clear(node_online_map);
548 node_set(0, node_possible_map);
549 for (i = 0; i < NR_CPUS; i++)
551 /* cpumask_of_cpu() may not be available during early startup */
552 memset(&node_to_cpumask_map[0], 0, sizeof(node_to_cpumask_map[0]));
553 cpu_set(0, node_to_cpumask_map[0]);
554 e820_register_active_regions(0, start_pfn, end_pfn);
555 setup_node_bootmem(0, start_pfn << PAGE_SHIFT, end_pfn << PAGE_SHIFT);
558 __cpuinit void numa_add_cpu(int cpu)
560 set_bit(cpu, (unsigned long *)&node_to_cpumask_map[cpu_to_node(cpu)]);
563 void __cpuinit numa_set_node(int cpu, int node)
565 u16 *cpu_to_node_map = (u16 *)x86_cpu_to_node_map_early_ptr;
567 cpu_pda(cpu)->nodenumber = node;
570 cpu_to_node_map[cpu] = node;
571 else if(per_cpu_offset(cpu))
572 per_cpu(x86_cpu_to_node_map, cpu) = node;
574 Dprintk(KERN_INFO "Setting node for non-present cpu %d\n", cpu);
577 unsigned long __init numa_free_all_bootmem(void)
579 unsigned long pages = 0;
582 for_each_online_node(i)
583 pages += free_all_bootmem_node(NODE_DATA(i));
588 void __init paging_init(void)
590 unsigned long max_zone_pfns[MAX_NR_ZONES];
593 memset(max_zone_pfns, 0, sizeof(max_zone_pfns));
594 max_zone_pfns[ZONE_DMA] = MAX_DMA_PFN;
595 max_zone_pfns[ZONE_DMA32] = MAX_DMA32_PFN;
596 max_zone_pfns[ZONE_NORMAL] = end_pfn;
598 sparse_memory_present_with_active_regions(MAX_NUMNODES);
601 for_each_online_node(i)
602 flat_setup_node_zones(i);
604 free_area_init_nodes(max_zone_pfns);
607 static __init int numa_setup(char *opt)
611 if (!strncmp(opt, "off", 3))
613 #ifdef CONFIG_NUMA_EMU
614 if (!strncmp(opt, "fake=", 5))
617 #ifdef CONFIG_ACPI_NUMA
618 if (!strncmp(opt, "noacpi", 6))
620 if (!strncmp(opt, "hotadd=", 7))
621 hotadd_percent = simple_strtoul(opt+7, NULL, 10);
625 early_param("numa", numa_setup);
628 * Setup early cpu_to_node.
630 * Populate cpu_to_node[] only if x86_cpu_to_apicid[],
631 * and apicid_to_node[] tables have valid entries for a CPU.
632 * This means we skip cpu_to_node[] initialisation for NUMA
633 * emulation and faking node case (when running a kernel compiled
634 * for NUMA on a non NUMA box), which is OK as cpu_to_node[]
635 * is already initialized in a round robin manner at numa_init_array,
636 * prior to this call, and this initialization is good enough
637 * for the fake NUMA cases.
639 void __init init_cpu_to_node(void)
643 for (i = 0; i < NR_CPUS; i++) {
644 u16 apicid = x86_cpu_to_apicid_init[i];
646 if (apicid == BAD_APICID)
648 if (apicid_to_node[apicid] == NUMA_NO_NODE)
650 numa_set_node(i, apicid_to_node[apicid]);