include cleanup: Update gfp.h and slab.h includes to prepare for breaking implicit...
[safe/jmp/linux-2.6] / arch / x86 / mm / init_64.c
index 69ddfbd..ee41bba 100644 (file)
@@ -29,6 +29,7 @@
 #include <linux/module.h>
 #include <linux/memory_hotplug.h>
 #include <linux/nmi.h>
+#include <linux/gfp.h>
 
 #include <asm/processor.h>
 #include <asm/bios_ebda.h>
@@ -572,6 +573,7 @@ kernel_physical_mapping_init(unsigned long start,
 void __init initmem_init(unsigned long start_pfn, unsigned long end_pfn,
                                int acpi, int k8)
 {
+#ifndef CONFIG_NO_BOOTMEM
        unsigned long bootmap_size, bootmap;
 
        bootmap_size = bootmem_bootmap_pages(end_pfn)<<PAGE_SHIFT;
@@ -579,13 +581,15 @@ void __init initmem_init(unsigned long start_pfn, unsigned long end_pfn,
                                 PAGE_SIZE);
        if (bootmap == -1L)
                panic("Cannot find bootmem map of size %ld\n", bootmap_size);
+       reserve_early(bootmap, bootmap + bootmap_size, "BOOTMAP");
        /* don't touch min_low_pfn */
        bootmap_size = init_bootmem_node(NODE_DATA(0), bootmap >> PAGE_SHIFT,
                                         0, end_pfn);
        e820_register_active_regions(0, start_pfn, end_pfn);
        free_bootmem_with_active_regions(0, end_pfn);
-       early_res_to_bootmem(0, end_pfn<<PAGE_SHIFT);
-       reserve_bootmem(bootmap, bootmap_size, BOOTMEM_DEFAULT);
+#else
+       e820_register_active_regions(0, start_pfn, end_pfn);
+#endif
 }
 #endif
 
@@ -974,7 +978,7 @@ vmemmap_populate(struct page *start_page, unsigned long size, int node)
                        if (pmd_none(*pmd)) {
                                pte_t entry;
 
-                               p = vmemmap_alloc_block(PMD_SIZE, node);
+                               p = vmemmap_alloc_block_buf(PMD_SIZE, node);
                                if (!p)
                                        return -ENOMEM;