2 * bootmem - A boot-time physical memory allocator and configurator
4 * Copyright (C) 1999 Ingo Molnar
5 * 1999 Kanoj Sarcar, SGI
8 * Access to this subsystem has to be serialized externally (which is true
9 * for the boot process anyway).
11 #include <linux/init.h>
12 #include <linux/pfn.h>
13 #include <linux/bootmem.h>
14 #include <linux/module.h>
18 #include <asm/processor.h>
22 unsigned long max_low_pfn;
23 unsigned long min_low_pfn;
24 unsigned long max_pfn;
26 #ifdef CONFIG_CRASH_DUMP
28 * If we have booted due to a crash, max_pfn will be a very low value. We need
29 * to know the amount of memory that the previous kernel used.
31 unsigned long saved_max_pfn;
34 bootmem_data_t bootmem_node_data[MAX_NUMNODES] __initdata;
36 static struct list_head bdata_list __initdata = LIST_HEAD_INIT(bdata_list);
38 static int bootmem_debug;
40 static int __init bootmem_debug_setup(char *buf)
45 early_param("bootmem_debug", bootmem_debug_setup);
47 #define bdebug(fmt, args...) ({ \
48 if (unlikely(bootmem_debug)) \
51 __FUNCTION__, ## args); \
54 static unsigned long __init bootmap_bytes(unsigned long pages)
56 unsigned long bytes = (pages + 7) / 8;
58 return ALIGN(bytes, sizeof(long));
62 * bootmem_bootmap_pages - calculate bitmap size in pages
63 * @pages: number of pages the bitmap has to represent
65 unsigned long __init bootmem_bootmap_pages(unsigned long pages)
67 unsigned long bytes = bootmap_bytes(pages);
69 return PAGE_ALIGN(bytes) >> PAGE_SHIFT;
75 static void __init link_bootmem(bootmem_data_t *bdata)
77 struct list_head *iter;
79 list_for_each(iter, &bdata_list) {
82 ent = list_entry(iter, bootmem_data_t, list);
83 if (bdata->node_boot_start < ent->node_boot_start)
86 list_add_tail(&bdata->list, iter);
90 * Called once to set up the allocator itself.
92 static unsigned long __init init_bootmem_core(bootmem_data_t *bdata,
93 unsigned long mapstart, unsigned long start, unsigned long end)
95 unsigned long mapsize;
97 mminit_validate_memmodel_limits(&start, &end);
98 bdata->node_bootmem_map = phys_to_virt(PFN_PHYS(mapstart));
99 bdata->node_boot_start = PFN_PHYS(start);
100 bdata->node_low_pfn = end;
104 * Initially all pages are reserved - setup_arch() has to
105 * register free RAM areas explicitly.
107 mapsize = bootmap_bytes(end - start);
108 memset(bdata->node_bootmem_map, 0xff, mapsize);
110 bdebug("nid=%td start=%lx map=%lx end=%lx mapsize=%lx\n",
111 bdata - bootmem_node_data, start, mapstart, end, mapsize);
117 * init_bootmem_node - register a node as boot memory
118 * @pgdat: node to register
119 * @freepfn: pfn where the bitmap for this node is to be placed
120 * @startpfn: first pfn on the node
121 * @endpfn: first pfn after the node
123 * Returns the number of bytes needed to hold the bitmap for this node.
125 unsigned long __init init_bootmem_node(pg_data_t *pgdat, unsigned long freepfn,
126 unsigned long startpfn, unsigned long endpfn)
128 return init_bootmem_core(pgdat->bdata, freepfn, startpfn, endpfn);
132 * init_bootmem - register boot memory
133 * @start: pfn where the bitmap is to be placed
134 * @pages: number of available physical pages
136 * Returns the number of bytes needed to hold the bitmap.
138 unsigned long __init init_bootmem(unsigned long start, unsigned long pages)
142 return init_bootmem_core(NODE_DATA(0)->bdata, start, 0, pages);
145 static unsigned long __init free_all_bootmem_core(bootmem_data_t *bdata)
149 unsigned long start, end, pages, count = 0;
151 if (!bdata->node_bootmem_map)
154 start = PFN_DOWN(bdata->node_boot_start);
155 end = bdata->node_low_pfn;
158 * If the start is aligned to the machines wordsize, we might
159 * be able to free pages in bulks of that order.
161 aligned = !(start & (BITS_PER_LONG - 1));
163 bdebug("nid=%td start=%lx end=%lx aligned=%d\n",
164 bdata - bootmem_node_data, start, end, aligned);
166 while (start < end) {
167 unsigned long *map, idx, vec;
169 map = bdata->node_bootmem_map;
170 idx = start - PFN_DOWN(bdata->node_boot_start);
171 vec = ~map[idx / BITS_PER_LONG];
173 if (aligned && vec == ~0UL && start + BITS_PER_LONG < end) {
174 int order = ilog2(BITS_PER_LONG);
176 __free_pages_bootmem(pfn_to_page(start), order);
177 count += BITS_PER_LONG;
179 unsigned long off = 0;
181 while (vec && off < BITS_PER_LONG) {
183 page = pfn_to_page(start + off);
184 __free_pages_bootmem(page, 0);
191 start += BITS_PER_LONG;
194 page = virt_to_page(bdata->node_bootmem_map);
195 pages = bdata->node_low_pfn - PFN_DOWN(bdata->node_boot_start);
196 pages = bootmem_bootmap_pages(pages);
199 __free_pages_bootmem(page++, 0);
201 bdebug("nid=%td released=%lx\n", bdata - bootmem_node_data, count);
207 * free_all_bootmem_node - release a node's free pages to the buddy allocator
208 * @pgdat: node to be released
210 * Returns the number of pages actually released.
212 unsigned long __init free_all_bootmem_node(pg_data_t *pgdat)
214 register_page_bootmem_info_node(pgdat);
215 return free_all_bootmem_core(pgdat->bdata);
219 * free_all_bootmem - release free pages to the buddy allocator
221 * Returns the number of pages actually released.
223 unsigned long __init free_all_bootmem(void)
225 return free_all_bootmem_core(NODE_DATA(0)->bdata);
228 static void __init free_bootmem_core(bootmem_data_t *bdata, unsigned long addr,
231 unsigned long sidx, eidx;
237 if (addr + size < bdata->node_boot_start ||
238 PFN_DOWN(addr) > bdata->node_low_pfn)
241 * round down end of usable mem, partially free pages are
242 * considered reserved.
245 if (addr >= bdata->node_boot_start &&
246 PFN_DOWN(addr - bdata->node_boot_start) < bdata->hint_idx)
247 bdata->hint_idx = PFN_DOWN(addr - bdata->node_boot_start);
250 * Round up to index to the range.
252 if (PFN_UP(addr) > PFN_DOWN(bdata->node_boot_start))
253 sidx = PFN_UP(addr) - PFN_DOWN(bdata->node_boot_start);
257 eidx = PFN_DOWN(addr + size - bdata->node_boot_start);
258 if (eidx > bdata->node_low_pfn - PFN_DOWN(bdata->node_boot_start))
259 eidx = bdata->node_low_pfn - PFN_DOWN(bdata->node_boot_start);
261 bdebug("nid=%td start=%lx end=%lx\n", bdata - bootmem_node_data,
262 sidx + PFN_DOWN(bdata->node_boot_start),
263 eidx + PFN_DOWN(bdata->node_boot_start));
265 for (i = sidx; i < eidx; i++) {
266 if (unlikely(!test_and_clear_bit(i, bdata->node_bootmem_map)))
272 * free_bootmem_node - mark a page range as usable
273 * @pgdat: node the range resides on
274 * @physaddr: starting address of the range
275 * @size: size of the range in bytes
277 * Partial pages will be considered reserved and left as they are.
279 * Only physical pages that actually reside on @pgdat are marked.
281 void __init free_bootmem_node(pg_data_t *pgdat, unsigned long physaddr,
284 free_bootmem_core(pgdat->bdata, physaddr, size);
288 * free_bootmem - mark a page range as usable
289 * @addr: starting address of the range
290 * @size: size of the range in bytes
292 * Partial pages will be considered reserved and left as they are.
294 * All physical pages within the range are marked, no matter what
295 * node they reside on.
297 void __init free_bootmem(unsigned long addr, unsigned long size)
299 bootmem_data_t *bdata;
300 list_for_each_entry(bdata, &bdata_list, list)
301 free_bootmem_core(bdata, addr, size);
305 * Marks a particular physical memory range as unallocatable. Usable RAM
306 * might be used for boot-time allocations - or it might get added
307 * to the free page pool later on.
309 static int __init can_reserve_bootmem_core(bootmem_data_t *bdata,
310 unsigned long addr, unsigned long size, int flags)
312 unsigned long sidx, eidx;
317 /* out of range, don't hold other */
318 if (addr + size < bdata->node_boot_start ||
319 PFN_DOWN(addr) > bdata->node_low_pfn)
323 * Round up to index to the range.
325 if (addr > bdata->node_boot_start)
326 sidx= PFN_DOWN(addr - bdata->node_boot_start);
330 eidx = PFN_UP(addr + size - bdata->node_boot_start);
331 if (eidx > bdata->node_low_pfn - PFN_DOWN(bdata->node_boot_start))
332 eidx = bdata->node_low_pfn - PFN_DOWN(bdata->node_boot_start);
334 for (i = sidx; i < eidx; i++) {
335 if (test_bit(i, bdata->node_bootmem_map)) {
336 if (flags & BOOTMEM_EXCLUSIVE)
345 static void __init reserve_bootmem_core(bootmem_data_t *bdata,
346 unsigned long addr, unsigned long size, int flags)
348 unsigned long sidx, eidx;
354 if (addr + size < bdata->node_boot_start ||
355 PFN_DOWN(addr) > bdata->node_low_pfn)
359 * Round up to index to the range.
361 if (addr > bdata->node_boot_start)
362 sidx= PFN_DOWN(addr - bdata->node_boot_start);
366 eidx = PFN_UP(addr + size - bdata->node_boot_start);
367 if (eidx > bdata->node_low_pfn - PFN_DOWN(bdata->node_boot_start))
368 eidx = bdata->node_low_pfn - PFN_DOWN(bdata->node_boot_start);
370 bdebug("nid=%td start=%lx end=%lx flags=%x\n",
371 bdata - bootmem_node_data,
372 sidx + PFN_DOWN(bdata->node_boot_start),
373 eidx + PFN_DOWN(bdata->node_boot_start),
376 for (i = sidx; i < eidx; i++)
377 if (test_and_set_bit(i, bdata->node_bootmem_map))
378 bdebug("hm, page %lx reserved twice.\n",
379 PFN_DOWN(bdata->node_boot_start) + i);
383 * reserve_bootmem_node - mark a page range as reserved
384 * @pgdat: node the range resides on
385 * @physaddr: starting address of the range
386 * @size: size of the range in bytes
387 * @flags: reservation flags (see linux/bootmem.h)
389 * Partial pages will be reserved.
391 * Only physical pages that actually reside on @pgdat are marked.
393 int __init reserve_bootmem_node(pg_data_t *pgdat, unsigned long physaddr,
394 unsigned long size, int flags)
398 ret = can_reserve_bootmem_core(pgdat->bdata, physaddr, size, flags);
401 reserve_bootmem_core(pgdat->bdata, physaddr, size, flags);
405 #ifndef CONFIG_HAVE_ARCH_BOOTMEM_NODE
407 * reserve_bootmem - mark a page range as usable
408 * @addr: starting address of the range
409 * @size: size of the range in bytes
410 * @flags: reservation flags (see linux/bootmem.h)
412 * Partial pages will be reserved.
414 * All physical pages within the range are marked, no matter what
415 * node they reside on.
417 int __init reserve_bootmem(unsigned long addr, unsigned long size,
420 bootmem_data_t *bdata;
423 list_for_each_entry(bdata, &bdata_list, list) {
424 ret = can_reserve_bootmem_core(bdata, addr, size, flags);
428 list_for_each_entry(bdata, &bdata_list, list)
429 reserve_bootmem_core(bdata, addr, size, flags);
433 #endif /* !CONFIG_HAVE_ARCH_BOOTMEM_NODE */
435 static void * __init alloc_bootmem_core(struct bootmem_data *bdata,
436 unsigned long size, unsigned long align,
437 unsigned long goal, unsigned long limit)
439 unsigned long min, max, start, sidx, midx, step;
442 BUG_ON(align & (align - 1));
443 BUG_ON(limit && goal + size > limit);
445 if (!bdata->node_bootmem_map)
448 bdebug("nid=%td size=%lx [%lu pages] align=%lx goal=%lx limit=%lx\n",
449 bdata - bootmem_node_data, size, PAGE_ALIGN(size) >> PAGE_SHIFT,
452 min = PFN_DOWN(bdata->node_boot_start);
453 max = bdata->node_low_pfn;
456 limit >>= PAGE_SHIFT;
458 if (limit && max > limit)
463 step = max(align >> PAGE_SHIFT, 1UL);
465 if (goal && min < goal && goal < max)
466 start = ALIGN(goal, step);
468 start = ALIGN(min, step);
470 sidx = start - PFN_DOWN(bdata->node_boot_start);
471 midx = max - PFN_DOWN(bdata->node_boot_start);
473 if (bdata->hint_idx > sidx) {
474 /* Make sure we retry on failure */
476 sidx = ALIGN(bdata->hint_idx, step);
482 unsigned long eidx, i, start_off, end_off;
484 sidx = find_next_zero_bit(bdata->node_bootmem_map, midx, sidx);
485 sidx = ALIGN(sidx, step);
486 eidx = sidx + PFN_UP(size);
488 if (sidx >= midx || eidx > midx)
491 for (i = sidx; i < eidx; i++)
492 if (test_bit(i, bdata->node_bootmem_map)) {
493 sidx = ALIGN(i, step);
499 if (bdata->last_end_off &&
500 PFN_DOWN(bdata->last_end_off) + 1 == sidx)
501 start_off = ALIGN(bdata->last_end_off, align);
503 start_off = PFN_PHYS(sidx);
505 merge = PFN_DOWN(start_off) < sidx;
506 end_off = start_off + size;
508 bdata->last_end_off = end_off;
509 bdata->hint_idx = PFN_UP(end_off);
512 * Reserve the area now:
514 for (i = PFN_DOWN(start_off) + merge;
515 i < PFN_UP(end_off); i++)
516 if (test_and_set_bit(i, bdata->node_bootmem_map))
519 region = phys_to_virt(bdata->node_boot_start + start_off);
520 memset(region, 0, size);
534 * __alloc_bootmem_nopanic - allocate boot memory without panicking
535 * @size: size of the request in bytes
536 * @align: alignment of the region
537 * @goal: preferred starting address of the region
539 * The goal is dropped if it can not be satisfied and the allocation will
540 * fall back to memory below @goal.
542 * Allocation may happen on any node in the system.
544 * Returns NULL on failure.
546 void * __init __alloc_bootmem_nopanic(unsigned long size, unsigned long align,
549 bootmem_data_t *bdata;
552 list_for_each_entry(bdata, &bdata_list, list) {
553 ptr = alloc_bootmem_core(bdata, size, align, goal, 0);
561 * __alloc_bootmem - allocate boot memory
562 * @size: size of the request in bytes
563 * @align: alignment of the region
564 * @goal: preferred starting address of the region
566 * The goal is dropped if it can not be satisfied and the allocation will
567 * fall back to memory below @goal.
569 * Allocation may happen on any node in the system.
571 * The function panics if the request can not be satisfied.
573 void * __init __alloc_bootmem(unsigned long size, unsigned long align,
576 void *mem = __alloc_bootmem_nopanic(size,align,goal);
581 * Whoops, we cannot satisfy the allocation request.
583 printk(KERN_ALERT "bootmem alloc of %lu bytes failed!\n", size);
584 panic("Out of memory");
589 * __alloc_bootmem_node - allocate boot memory from a specific node
590 * @pgdat: node to allocate from
591 * @size: size of the request in bytes
592 * @align: alignment of the region
593 * @goal: preferred starting address of the region
595 * The goal is dropped if it can not be satisfied and the allocation will
596 * fall back to memory below @goal.
598 * Allocation may fall back to any node in the system if the specified node
599 * can not hold the requested memory.
601 * The function panics if the request can not be satisfied.
603 void * __init __alloc_bootmem_node(pg_data_t *pgdat, unsigned long size,
604 unsigned long align, unsigned long goal)
608 ptr = alloc_bootmem_core(pgdat->bdata, size, align, goal, 0);
612 return __alloc_bootmem(size, align, goal);
615 #ifdef CONFIG_SPARSEMEM
617 * alloc_bootmem_section - allocate boot memory from a specific section
618 * @size: size of the request in bytes
619 * @section_nr: sparse map section to allocate from
621 * Return NULL on failure.
623 void * __init alloc_bootmem_section(unsigned long size,
624 unsigned long section_nr)
627 unsigned long limit, goal, start_nr, end_nr, pfn;
628 struct pglist_data *pgdat;
630 pfn = section_nr_to_pfn(section_nr);
631 goal = PFN_PHYS(pfn);
632 limit = PFN_PHYS(section_nr_to_pfn(section_nr + 1)) - 1;
633 pgdat = NODE_DATA(early_pfn_to_nid(pfn));
634 ptr = alloc_bootmem_core(pgdat->bdata, size, SMP_CACHE_BYTES, goal,
640 start_nr = pfn_to_section_nr(PFN_DOWN(__pa(ptr)));
641 end_nr = pfn_to_section_nr(PFN_DOWN(__pa(ptr) + size));
642 if (start_nr != section_nr || end_nr != section_nr) {
643 printk(KERN_WARNING "alloc_bootmem failed on section %ld.\n",
645 free_bootmem_core(pgdat->bdata, __pa(ptr), size);
653 void * __init __alloc_bootmem_node_nopanic(pg_data_t *pgdat, unsigned long size,
654 unsigned long align, unsigned long goal)
658 ptr = alloc_bootmem_core(pgdat->bdata, size, align, goal, 0);
662 return __alloc_bootmem_nopanic(size, align, goal);
665 #ifndef ARCH_LOW_ADDRESS_LIMIT
666 #define ARCH_LOW_ADDRESS_LIMIT 0xffffffffUL
670 * __alloc_bootmem_low - allocate low boot memory
671 * @size: size of the request in bytes
672 * @align: alignment of the region
673 * @goal: preferred starting address of the region
675 * The goal is dropped if it can not be satisfied and the allocation will
676 * fall back to memory below @goal.
678 * Allocation may happen on any node in the system.
680 * The function panics if the request can not be satisfied.
682 void * __init __alloc_bootmem_low(unsigned long size, unsigned long align,
685 bootmem_data_t *bdata;
688 list_for_each_entry(bdata, &bdata_list, list) {
689 ptr = alloc_bootmem_core(bdata, size, align, goal,
690 ARCH_LOW_ADDRESS_LIMIT);
696 * Whoops, we cannot satisfy the allocation request.
698 printk(KERN_ALERT "low bootmem alloc of %lu bytes failed!\n", size);
699 panic("Out of low memory");
704 * __alloc_bootmem_low_node - allocate low boot memory from a specific node
705 * @pgdat: node to allocate from
706 * @size: size of the request in bytes
707 * @align: alignment of the region
708 * @goal: preferred starting address of the region
710 * The goal is dropped if it can not be satisfied and the allocation will
711 * fall back to memory below @goal.
713 * Allocation may fall back to any node in the system if the specified node
714 * can not hold the requested memory.
716 * The function panics if the request can not be satisfied.
718 void * __init __alloc_bootmem_low_node(pg_data_t *pgdat, unsigned long size,
719 unsigned long align, unsigned long goal)
721 return alloc_bootmem_core(pgdat->bdata, size, align, goal,
722 ARCH_LOW_ADDRESS_LIMIT);