fuse: prepare lookup for nfs export
[safe/jmp/linux-2.6] / mm / page_alloc.c
index d1cf4f0..6da6672 100644 (file)
@@ -45,6 +45,7 @@
 #include <linux/fault-inject.h>
 #include <linux/page-isolation.h>
 #include <linux/memcontrol.h>
+#include <linux/debugobjects.h>
 
 #include <asm/tlbflush.h>
 #include <asm/div64.h>
@@ -152,9 +153,9 @@ static unsigned long __meminitdata dma_reserve;
   static unsigned long __meminitdata node_boundary_start_pfn[MAX_NUMNODES];
   static unsigned long __meminitdata node_boundary_end_pfn[MAX_NUMNODES];
 #endif /* CONFIG_MEMORY_HOTPLUG_RESERVE */
-  unsigned long __initdata required_kernelcore;
+  static unsigned long __initdata required_kernelcore;
   static unsigned long __initdata required_movablecore;
-  unsigned long __meminitdata zone_movable_pfn[MAX_NUMNODES];
+  static unsigned long __meminitdata zone_movable_pfn[MAX_NUMNODES];
 
   /* movable_zone is the "real" zone pages in ZONE_MOVABLE are taken from */
   int movable_zone;
@@ -236,16 +237,7 @@ static void bad_page(struct page *page)
        printk(KERN_EMERG "Trying to fix it up, but a reboot is needed\n"
                KERN_EMERG "Backtrace:\n");
        dump_stack();
-       page->flags &= ~(1 << PG_lru    |
-                       1 << PG_private |
-                       1 << PG_locked  |
-                       1 << PG_active  |
-                       1 << PG_dirty   |
-                       1 << PG_reclaim |
-                       1 << PG_slab    |
-                       1 << PG_swapcache |
-                       1 << PG_writeback |
-                       1 << PG_buddy );
+       page->flags &= ~PAGE_FLAGS_CLEAR_WHEN_BAD;
        set_page_count(page, 0);
        reset_page_mapcount(page);
        page->mapping = NULL;
@@ -272,7 +264,7 @@ static void free_compound_page(struct page *page)
        __free_pages_ok(page, compound_order(page));
 }
 
-static void prep_compound_page(struct page *page, unsigned long order)
+void prep_compound_page(struct page *page, unsigned long order)
 {
        int i;
        int nr_pages = 1 << order;
@@ -440,8 +432,9 @@ static inline void __free_one_page(struct page *page,
 
                buddy = __page_find_buddy(page, page_idx, order);
                if (!page_is_buddy(page, buddy, order))
-                       break;          /* Move the buddy up one level. */
+                       break;
 
+               /* Our buddy is free, merge with it and move up one order. */
                list_del(&buddy->lru);
                zone->free_area[order].nr_free--;
                rmv_page_order(buddy);
@@ -462,16 +455,7 @@ static inline int free_pages_check(struct page *page)
                (page->mapping != NULL)  |
                (page_get_page_cgroup(page) != NULL) |
                (page_count(page) != 0)  |
-               (page->flags & (
-                       1 << PG_lru     |
-                       1 << PG_private |
-                       1 << PG_locked  |
-                       1 << PG_active  |
-                       1 << PG_slab    |
-                       1 << PG_swapcache |
-                       1 << PG_writeback |
-                       1 << PG_reserved |
-                       1 << PG_buddy ))))
+               (page->flags & PAGE_FLAGS_CHECK_AT_FREE)))
                bad_page(page);
        if (PageDirty(page))
                __ClearPageDirty(page);
@@ -532,8 +516,11 @@ static void __free_pages_ok(struct page *page, unsigned int order)
        if (reserved)
                return;
 
-       if (!PageHighMem(page))
+       if (!PageHighMem(page)) {
                debug_check_no_locks_freed(page_address(page),PAGE_SIZE<<order);
+               debug_check_no_obj_freed(page_address(page),
+                                          PAGE_SIZE << order);
+       }
        arch_free_page(page, order);
        kernel_map_pages(page, 1 << order, 0);
 
@@ -546,7 +533,7 @@ static void __free_pages_ok(struct page *page, unsigned int order)
 /*
  * permit the bootmem allocator to evade page validation on high-order frees
  */
-void __free_pages_bootmem(struct page *page, unsigned int order)
+void __meminit __free_pages_bootmem(struct page *page, unsigned int order)
 {
        if (order == 0) {
                __ClearPageReserved(page);
@@ -612,17 +599,7 @@ static int prep_new_page(struct page *page, int order, gfp_t gfp_flags)
                (page->mapping != NULL)  |
                (page_get_page_cgroup(page) != NULL) |
                (page_count(page) != 0)  |
-               (page->flags & (
-                       1 << PG_lru     |
-                       1 << PG_private |
-                       1 << PG_locked  |
-                       1 << PG_active  |
-                       1 << PG_dirty   |
-                       1 << PG_slab    |
-                       1 << PG_swapcache |
-                       1 << PG_writeback |
-                       1 << PG_reserved |
-                       1 << PG_buddy ))))
+               (page->flags & PAGE_FLAGS_CHECK_AT_PREP)))
                bad_page(page);
 
        /*
@@ -697,9 +674,9 @@ static int fallbacks[MIGRATE_TYPES][MIGRATE_TYPES-1] = {
  * Note that start_page and end_pages are not aligned on a pageblock
  * boundary. If alignment is required, use move_freepages_block()
  */
-int move_freepages(struct zone *zone,
-                       struct page *start_page, struct page *end_page,
-                       int migratetype)
+static int move_freepages(struct zone *zone,
+                         struct page *start_page, struct page *end_page,
+                         int migratetype)
 {
        struct page *page;
        unsigned long order;
@@ -738,7 +715,8 @@ int move_freepages(struct zone *zone,
        return pages_moved;
 }
 
-int move_freepages_block(struct zone *zone, struct page *page, int migratetype)
+static int move_freepages_block(struct zone *zone, struct page *page,
+                               int migratetype)
 {
        unsigned long start_pfn, end_pfn;
        struct page *start_page, *end_page;
@@ -942,7 +920,7 @@ void drain_local_pages(void *arg)
  */
 void drain_all_pages(void)
 {
-       on_each_cpu(drain_local_pages, NULL, 0, 1);
+       on_each_cpu(drain_local_pages, NULL, 1);
 }
 
 #ifdef CONFIG_HIBERNATION
@@ -995,8 +973,10 @@ static void free_hot_cold_page(struct page *page, int cold)
        if (free_pages_check(page))
                return;
 
-       if (!PageHighMem(page))
+       if (!PageHighMem(page)) {
                debug_check_no_locks_freed(page_address(page), PAGE_SIZE);
+               debug_check_no_obj_freed(page_address(page), PAGE_SIZE);
+       }
        arch_free_page(page, 0);
        kernel_map_pages(page, 1, 0);
 
@@ -1390,6 +1370,9 @@ get_page_from_freelist(gfp_t gfp_mask, nodemask_t *nodemask, unsigned int order,
 
        (void)first_zones_zonelist(zonelist, high_zoneidx, nodemask,
                                                        &preferred_zone);
+       if (!preferred_zone)
+               return NULL;
+
        classzone_idx = zone_idx(preferred_zone);
 
 zonelist_scan:
@@ -1448,7 +1431,7 @@ try_next_zone:
 /*
  * This is the 'heart' of the zoned buddy allocator.
  */
-static struct page *
+struct page *
 __alloc_pages_internal(gfp_t gfp_mask, unsigned int order,
                        struct zonelist *zonelist, nodemask_t *nodemask)
 {
@@ -1461,7 +1444,8 @@ __alloc_pages_internal(gfp_t gfp_mask, unsigned int order,
        struct task_struct *p = current;
        int do_retry;
        int alloc_flags;
-       int did_some_progress;
+       unsigned long did_some_progress;
+       unsigned long pages_reclaimed = 0;
 
        might_sleep_if(wait);
 
@@ -1611,14 +1595,26 @@ nofail_alloc:
         * Don't let big-order allocations loop unless the caller explicitly
         * requests that.  Wait for some write requests to complete then retry.
         *
-        * In this implementation, __GFP_REPEAT means __GFP_NOFAIL for order
-        * <= 3, but that may not be true in other implementations.
+        * In this implementation, order <= PAGE_ALLOC_COSTLY_ORDER
+        * means __GFP_NOFAIL, but that may not be true in other
+        * implementations.
+        *
+        * For order > PAGE_ALLOC_COSTLY_ORDER, if __GFP_REPEAT is
+        * specified, then we retry until we no longer reclaim any pages
+        * (above), or we've reclaimed an order of pages at least as
+        * large as the allocation's order. In both cases, if the
+        * allocation still fails, we stop retrying.
         */
+       pages_reclaimed += did_some_progress;
        do_retry = 0;
        if (!(gfp_mask & __GFP_NORETRY)) {
-               if ((order <= PAGE_ALLOC_COSTLY_ORDER) ||
-                                               (gfp_mask & __GFP_REPEAT))
+               if (order <= PAGE_ALLOC_COSTLY_ORDER) {
                        do_retry = 1;
+               } else {
+                       if (gfp_mask & __GFP_REPEAT &&
+                               pages_reclaimed < (1 << order))
+                                       do_retry = 1;
+               }
                if (gfp_mask & __GFP_NOFAIL)
                        do_retry = 1;
        }
@@ -1638,22 +1634,7 @@ nopage:
 got_pg:
        return page;
 }
-
-struct page *
-__alloc_pages(gfp_t gfp_mask, unsigned int order,
-               struct zonelist *zonelist)
-{
-       return __alloc_pages_internal(gfp_mask, order, zonelist, NULL);
-}
-
-struct page *
-__alloc_pages_nodemask(gfp_t gfp_mask, unsigned int order,
-               struct zonelist *zonelist, nodemask_t *nodemask)
-{
-       return __alloc_pages_internal(gfp_mask, order, zonelist, nodemask);
-}
-
-EXPORT_SYMBOL(__alloc_pages);
+EXPORT_SYMBOL(__alloc_pages_internal);
 
 /*
  * Common helper functions.
@@ -1717,6 +1698,59 @@ void free_pages(unsigned long addr, unsigned int order)
 
 EXPORT_SYMBOL(free_pages);
 
+/**
+ * alloc_pages_exact - allocate an exact number physically-contiguous pages.
+ * @size: the number of bytes to allocate
+ * @gfp_mask: GFP flags for the allocation
+ *
+ * This function is similar to alloc_pages(), except that it allocates the
+ * minimum number of pages to satisfy the request.  alloc_pages() can only
+ * allocate memory in power-of-two pages.
+ *
+ * This function is also limited by MAX_ORDER.
+ *
+ * Memory allocated by this function must be released by free_pages_exact().
+ */
+void *alloc_pages_exact(size_t size, gfp_t gfp_mask)
+{
+       unsigned int order = get_order(size);
+       unsigned long addr;
+
+       addr = __get_free_pages(gfp_mask, order);
+       if (addr) {
+               unsigned long alloc_end = addr + (PAGE_SIZE << order);
+               unsigned long used = addr + PAGE_ALIGN(size);
+
+               split_page(virt_to_page(addr), order);
+               while (used < alloc_end) {
+                       free_page(used);
+                       used += PAGE_SIZE;
+               }
+       }
+
+       return (void *)addr;
+}
+EXPORT_SYMBOL(alloc_pages_exact);
+
+/**
+ * free_pages_exact - release memory allocated via alloc_pages_exact()
+ * @virt: the value returned by alloc_pages_exact.
+ * @size: size of allocation, same value as passed to alloc_pages_exact().
+ *
+ * Release the memory allocated by a previous call to alloc_pages_exact.
+ */
+void free_pages_exact(void *virt, size_t size)
+{
+       unsigned long addr = (unsigned long)virt;
+       unsigned long end = addr + PAGE_ALIGN(size);
+
+       while (addr < end) {
+               free_page(addr);
+               addr += PAGE_SIZE;
+       }
+}
+EXPORT_SYMBOL(free_pages_exact);
+
 static unsigned int nr_free_zone_pages(int offset)
 {
        struct zoneref *z;
@@ -2334,7 +2368,6 @@ static void build_zonelists(pg_data_t *pgdat)
 static void build_zonelist_cache(pg_data_t *pgdat)
 {
        pgdat->node_zonelists[0].zlcache_ptr = NULL;
-       pgdat->node_zonelists[1].zlcache_ptr = NULL;
 }
 
 #endif /* CONFIG_NUMA */
@@ -2359,6 +2392,7 @@ void build_all_zonelists(void)
 
        if (system_state == SYSTEM_BOOTING) {
                __build_all_zonelists(NULL);
+               mminit_verify_zonelist();
                cpuset_init_current_mems_allowed();
        } else {
                /* we have to stop all cpus to guarantee there is no user
@@ -2524,7 +2558,9 @@ void __meminit memmap_init_zone(unsigned long size, int nid, unsigned long zone,
        struct page *page;
        unsigned long end_pfn = start_pfn + size;
        unsigned long pfn;
+       struct zone *z;
 
+       z = &NODE_DATA(nid)->node_zones[zone];
        for (pfn = start_pfn; pfn < end_pfn; pfn++) {
                /*
                 * There can be holes in boot-time mem_map[]s
@@ -2539,10 +2575,10 @@ void __meminit memmap_init_zone(unsigned long size, int nid, unsigned long zone,
                }
                page = pfn_to_page(pfn);
                set_page_links(page, zone, nid, pfn);
+               mminit_verify_page_links(page, zone, nid, pfn);
                init_page_count(page);
                reset_page_mapcount(page);
                SetPageReserved(page);
-
                /*
                 * Mark the block movable so that blocks are reserved for
                 * movable at startup. This will force kernel allocations
@@ -2551,8 +2587,15 @@ void __meminit memmap_init_zone(unsigned long size, int nid, unsigned long zone,
                 * kernel allocations are made. Later some blocks near
                 * the start are marked MIGRATE_RESERVE by
                 * setup_zone_migrate_reserve()
+                *
+                * bitmap is created for zone's valid pfn range. but memmap
+                * can be created for invalid pages (for alignment)
+                * check here not to call set_pageblock_migratetype() against
+                * pfn out of zone.
                 */
-               if ((pfn & (pageblock_nr_pages-1)))
+               if ((z->zone_start_pfn <= pfn)
+                   && (pfn < z->zone_start_pfn + z->spanned_pages)
+                   && !(pfn & (pageblock_nr_pages - 1)))
                        set_pageblock_migratetype(page, MIGRATE_MOVABLE);
 
                INIT_LIST_HEAD(&page->lru);
@@ -2610,7 +2653,7 @@ static int zone_batchsize(struct zone *zone)
        return batch;
 }
 
-inline void setup_pageset(struct per_cpu_pageset *p, unsigned long batch)
+static void setup_pageset(struct per_cpu_pageset *p, unsigned long batch)
 {
        struct per_cpu_pages *pcp;
 
@@ -2777,7 +2820,7 @@ int zone_wait_table_init(struct zone *zone, unsigned long zone_size_pages)
        alloc_size = zone->wait_table_hash_nr_entries
                                        * sizeof(wait_queue_head_t);
 
-       if (system_state == SYSTEM_BOOTING) {
+       if (!slab_is_available()) {
                zone->wait_table = (wait_queue_head_t *)
                        alloc_bootmem_node(pgdat, alloc_size);
        } else {
@@ -2835,7 +2878,11 @@ __meminit int init_currently_empty_zone(struct zone *zone,
 
        zone->zone_start_pfn = zone_start_pfn;
 
-       memmap_init(size, pgdat->node_id, zone_idx(zone), zone_start_pfn);
+       mminit_dprintk(MMINIT_TRACE, "memmap_init",
+                       "Initialising map node %d zone %lu pfns %lu -> %lu\n",
+                       pgdat->node_id,
+                       (unsigned long)zone_idx(zone),
+                       zone_start_pfn, (zone_start_pfn + size));
 
        zone_init_free_lists(zone);
 
@@ -2930,6 +2977,18 @@ void __init free_bootmem_with_active_regions(int nid,
        }
 }
 
+void __init work_with_active_regions(int nid, work_fn_t work_fn, void *data)
+{
+       int i;
+       int ret;
+
+       for_each_active_range_index_in_nid(i, nid) {
+               ret = work_fn(early_node_map[i].start_pfn,
+                             early_node_map[i].end_pfn, data);
+               if (ret)
+                       break;
+       }
+}
 /**
  * sparse_memory_present_with_active_regions - Call memory_present for each active range
  * @nid: The node to call memory_present for. If MAX_NUMNODES, all nodes will be used.
@@ -2964,7 +3023,8 @@ void __init sparse_memory_present_with_active_regions(int nid)
 void __init push_node_boundaries(unsigned int nid,
                unsigned long start_pfn, unsigned long end_pfn)
 {
-       printk(KERN_DEBUG "Entering push_node_boundaries(%u, %lu, %lu)\n",
+       mminit_dprintk(MMINIT_TRACE, "zoneboundary",
+                       "Entering push_node_boundaries(%u, %lu, %lu)\n",
                        nid, start_pfn, end_pfn);
 
        /* Initialise the boundary for this node if necessary */
@@ -2982,7 +3042,8 @@ void __init push_node_boundaries(unsigned int nid,
 static void __meminit account_node_boundary(unsigned int nid,
                unsigned long *start_pfn, unsigned long *end_pfn)
 {
-       printk(KERN_DEBUG "Entering account_node_boundary(%u, %lu, %lu)\n",
+       mminit_dprintk(MMINIT_TRACE, "zoneboundary",
+                       "Entering account_node_boundary(%u, %lu, %lu)\n",
                        nid, *start_pfn, *end_pfn);
 
        /* Return if boundary information has not been provided */
@@ -3039,7 +3100,7 @@ void __meminit get_pfn_range_for_nid(unsigned int nid,
  * assumption is made that zones within a node are ordered in monotonic
  * increasing memory addresses so that the "highest" populated zone is used
  */
-void __init find_usable_zone_for_movable(void)
+static void __init find_usable_zone_for_movable(void)
 {
        int zone_index;
        for (zone_index = MAX_NR_ZONES - 1; zone_index >= 0; zone_index--) {
@@ -3065,7 +3126,7 @@ void __init find_usable_zone_for_movable(void)
  * highest usable zone for ZONE_MOVABLE. This preserves the assumption that
  * zones within a node are in order of monotonic increases memory addresses
  */
-void __meminit adjust_zone_range_for_zone_movable(int nid,
+static void __meminit adjust_zone_range_for_zone_movable(int nid,
                                        unsigned long zone_type,
                                        unsigned long node_start_pfn,
                                        unsigned long node_end_pfn,
@@ -3126,7 +3187,7 @@ static unsigned long __meminit zone_spanned_pages_in_node(int nid,
  * Return the number of holes in a range on a node. If nid is MAX_NUMNODES,
  * then all holes in the requested range will be accounted for.
  */
-unsigned long __meminit __absent_pages_in_range(int nid,
+static unsigned long __meminit __absent_pages_in_range(int nid,
                                unsigned long range_start_pfn,
                                unsigned long range_end_pfn)
 {
@@ -3353,11 +3414,12 @@ static void __paginginit free_area_init_core(struct pglist_data *pgdat,
                 * is used by this zone for memmap. This affects the watermark
                 * and per-cpu initialisations
                 */
-               memmap_pages = (size * sizeof(struct page)) >> PAGE_SHIFT;
+               memmap_pages =
+                       PAGE_ALIGN(size * sizeof(struct page)) >> PAGE_SHIFT;
                if (realsize >= memmap_pages) {
                        realsize -= memmap_pages;
-                       printk(KERN_DEBUG
-                               "  %s zone: %lu pages used for memmap\n",
+                       mminit_dprintk(MMINIT_TRACE, "memmap_init",
+                               "%s zone: %lu pages used for memmap\n",
                                zone_names[j], memmap_pages);
                } else
                        printk(KERN_WARNING
@@ -3367,7 +3429,8 @@ static void __paginginit free_area_init_core(struct pglist_data *pgdat,
                /* Account for reserved pages */
                if (j == 0 && realsize > dma_reserve) {
                        realsize -= dma_reserve;
-                       printk(KERN_DEBUG "  %s zone: %lu pages reserved\n",
+                       mminit_dprintk(MMINIT_TRACE, "memmap_init",
+                                       "%s zone: %lu pages reserved\n",
                                        zone_names[0], dma_reserve);
                }
 
@@ -3406,6 +3469,7 @@ static void __paginginit free_area_init_core(struct pglist_data *pgdat,
                ret = init_currently_empty_zone(zone, zone_start_pfn,
                                                size, MEMMAP_EARLY);
                BUG_ON(ret);
+               memmap_init(size, nid, j, zone_start_pfn);
                zone_start_pfn += size;
        }
 }
@@ -3451,15 +3515,21 @@ static void __init_refok alloc_node_mem_map(struct pglist_data *pgdat)
 #endif /* CONFIG_FLAT_NODE_MEM_MAP */
 }
 
-void __paginginit free_area_init_node(int nid, struct pglist_data *pgdat,
-               unsigned long *zones_size, unsigned long node_start_pfn,
-               unsigned long *zholes_size)
+void __paginginit free_area_init_node(int nid, unsigned long *zones_size,
+               unsigned long node_start_pfn, unsigned long *zholes_size)
 {
+       pg_data_t *pgdat = NODE_DATA(nid);
+
        pgdat->node_id = nid;
        pgdat->node_start_pfn = node_start_pfn;
        calculate_node_totalpages(pgdat, zones_size, zholes_size);
 
        alloc_node_mem_map(pgdat);
+#ifdef CONFIG_FLAT_NODE_MEM_MAP
+       printk(KERN_DEBUG "free_area_init_node: node %d, pgdat %08lx, node_mem_map %08lx\n",
+               nid, (unsigned long)pgdat,
+               (unsigned long)pgdat->node_mem_map);
+#endif
 
        free_area_init_core(pgdat, zones_size, zholes_size);
 }
@@ -3502,10 +3572,13 @@ void __init add_active_range(unsigned int nid, unsigned long start_pfn,
 {
        int i;
 
-       printk(KERN_DEBUG "Entering add_active_range(%d, %lu, %lu) "
-                         "%d entries of %d used\n",
-                         nid, start_pfn, end_pfn,
-                         nr_nodemap_entries, MAX_ACTIVE_REGIONS);
+       mminit_dprintk(MMINIT_TRACE, "memory_register",
+                       "Entering add_active_range(%d, %#lx, %#lx) "
+                       "%d entries of %d used\n",
+                       nid, start_pfn, end_pfn,
+                       nr_nodemap_entries, MAX_ACTIVE_REGIONS);
+
+       mminit_validate_memmodel_limits(&start_pfn, &end_pfn);
 
        /* Merge with existing active regions if possible */
        for (i = 0; i < nr_nodemap_entries; i++) {
@@ -3546,27 +3619,68 @@ void __init add_active_range(unsigned int nid, unsigned long start_pfn,
 }
 
 /**
- * shrink_active_range - Shrink an existing registered range of PFNs
+ * remove_active_range - Shrink an existing registered range of PFNs
  * @nid: The node id the range is on that should be shrunk
- * @old_end_pfn: The old end PFN of the range
- * @new_end_pfn: The new PFN of the range
+ * @start_pfn: The new PFN of the range
+ * @end_pfn: The new PFN of the range
  *
  * i386 with NUMA use alloc_remap() to store a node_mem_map on a local node.
- * The map is kept at the end physical page range that has already been
- * registered with add_active_range(). This function allows an arch to shrink
- * an existing registered range.
+ * The map is kept near the end physical page range that has already been
+ * registered. This function allows an arch to shrink an existing registered
+ * range.
  */
-void __init shrink_active_range(unsigned int nid, unsigned long old_end_pfn,
-                                               unsigned long new_end_pfn)
+void __init remove_active_range(unsigned int nid, unsigned long start_pfn,
+                               unsigned long end_pfn)
 {
-       int i;
+       int i, j;
+       int removed = 0;
+
+       printk(KERN_DEBUG "remove_active_range (%d, %lu, %lu)\n",
+                         nid, start_pfn, end_pfn);
 
        /* Find the old active region end and shrink */
-       for_each_active_range_index_in_nid(i, nid)
-               if (early_node_map[i].end_pfn == old_end_pfn) {
-                       early_node_map[i].end_pfn = new_end_pfn;
-                       break;
+       for_each_active_range_index_in_nid(i, nid) {
+               if (early_node_map[i].start_pfn >= start_pfn &&
+                   early_node_map[i].end_pfn <= end_pfn) {
+                       /* clear it */
+                       early_node_map[i].start_pfn = 0;
+                       early_node_map[i].end_pfn = 0;
+                       removed = 1;
+                       continue;
+               }
+               if (early_node_map[i].start_pfn < start_pfn &&
+                   early_node_map[i].end_pfn > start_pfn) {
+                       unsigned long temp_end_pfn = early_node_map[i].end_pfn;
+                       early_node_map[i].end_pfn = start_pfn;
+                       if (temp_end_pfn > end_pfn)
+                               add_active_range(nid, end_pfn, temp_end_pfn);
+                       continue;
+               }
+               if (early_node_map[i].start_pfn >= start_pfn &&
+                   early_node_map[i].end_pfn > end_pfn &&
+                   early_node_map[i].start_pfn < end_pfn) {
+                       early_node_map[i].start_pfn = end_pfn;
+                       continue;
                }
+       }
+
+       if (!removed)
+               return;
+
+       /* remove the blank ones */
+       for (i = nr_nodemap_entries - 1; i > 0; i--) {
+               if (early_node_map[i].nid != nid)
+                       continue;
+               if (early_node_map[i].end_pfn)
+                       continue;
+               /* we found it, get rid of it */
+               for (j = i; j < nr_nodemap_entries - 1; j++)
+                       memcpy(&early_node_map[j], &early_node_map[j+1],
+                               sizeof(early_node_map[j]));
+               j = nr_nodemap_entries - 1;
+               memset(&early_node_map[j], 0, sizeof(early_node_map[j]));
+               nr_nodemap_entries--;
+       }
 }
 
 /**
@@ -3610,7 +3724,7 @@ static void __init sort_node_map(void)
 }
 
 /* Find the lowest pfn for a node */
-unsigned long __init find_min_pfn_for_node(unsigned long nid)
+static unsigned long __init find_min_pfn_for_node(int nid)
 {
        int i;
        unsigned long min_pfn = ULONG_MAX;
@@ -3621,7 +3735,7 @@ unsigned long __init find_min_pfn_for_node(unsigned long nid)
 
        if (min_pfn == ULONG_MAX) {
                printk(KERN_WARNING
-                       "Could not find start_pfn for node %lu\n", nid);
+                       "Could not find start_pfn for node %d\n", nid);
                return 0;
        }
 
@@ -3682,7 +3796,7 @@ static unsigned long __init early_calculate_totalpages(void)
  * memory. When they don't, some nodes will have more kernelcore than
  * others
  */
-void __init find_zone_movable_pfns_for_nodes(unsigned long *movable_pfn)
+static void __init find_zone_movable_pfns_for_nodes(unsigned long *movable_pfn)
 {
        int i, nid;
        unsigned long usable_startpfn;
@@ -3877,7 +3991,7 @@ void __init free_area_init_nodes(unsigned long *max_zone_pfn)
        for (i = 0; i < MAX_NR_ZONES; i++) {
                if (i == ZONE_MOVABLE)
                        continue;
-               printk("  %-8s %8lu -> %8lu\n",
+               printk("  %-8s %0#10lx -> %0#10lx\n",
                                zone_names[i],
                                arch_zone_lowest_possible_pfn[i],
                                arch_zone_highest_possible_pfn[i]);
@@ -3893,15 +4007,16 @@ void __init free_area_init_nodes(unsigned long *max_zone_pfn)
        /* Print out the early_node_map[] */
        printk("early_node_map[%d] active PFN ranges\n", nr_nodemap_entries);
        for (i = 0; i < nr_nodemap_entries; i++)
-               printk("  %3d: %8lu -> %8lu\n", early_node_map[i].nid,
+               printk("  %3d: %0#10lx -> %0#10lx\n", early_node_map[i].nid,
                                                early_node_map[i].start_pfn,
                                                early_node_map[i].end_pfn);
 
        /* Initialise every node */
+       mminit_verify_pageflags_layout();
        setup_nr_node_ids();
        for_each_online_node(nid) {
                pg_data_t *pgdat = NODE_DATA(nid);
-               free_area_init_node(nid, pgdat, NULL,
+               free_area_init_node(nid, NULL,
                                find_min_pfn_for_node(nid), NULL);
 
                /* Any memory on that node */
@@ -3966,15 +4081,13 @@ void __init set_dma_reserve(unsigned long new_dma_reserve)
 }
 
 #ifndef CONFIG_NEED_MULTIPLE_NODES
-static bootmem_data_t contig_bootmem_data;
-struct pglist_data contig_page_data = { .bdata = &contig_bootmem_data };
-
+struct pglist_data contig_page_data = { .bdata = &bootmem_node_data[0] };
 EXPORT_SYMBOL(contig_page_data);
 #endif
 
 void __init free_area_init(unsigned long *zones_size)
 {
-       free_area_init_node(0, NODE_DATA(0), zones_size,
+       free_area_init_node(0, zones_size,
                        __pa(PAGE_OFFSET) >> PAGE_SHIFT, NULL);
 }
 
@@ -4464,6 +4577,8 @@ void set_pageblock_flags_group(struct page *page, unsigned long flags,
        pfn = page_to_pfn(page);
        bitmap = get_pageblock_bitmap(zone, pfn);
        bitidx = pfn_to_bitidx(zone, pfn);
+       VM_BUG_ON(pfn < zone->zone_start_pfn);
+       VM_BUG_ON(pfn >= zone->zone_start_pfn + zone->spanned_pages);
 
        for (; start_bitidx <= end_bitidx; start_bitidx++, value <<= 1)
                if (flags & value)