[ALSA] Add ALSA support for the SEGA Dreamcast PCM device
[safe/jmp/linux-2.6] / mm / page_alloc.c
index ebd425c..43cb3b3 100644 (file)
@@ -40,6 +40,7 @@
 #include <linux/sort.h>
 #include <linux/pfn.h>
 #include <linux/backing-dev.h>
+#include <linux/fault-inject.h>
 
 #include <asm/tlbflush.h>
 #include <asm/div64.h>
@@ -72,40 +73,39 @@ static void __free_pages_ok(struct page *page, unsigned int order);
  * don't need any ZONE_NORMAL reservation
  */
 int sysctl_lowmem_reserve_ratio[MAX_NR_ZONES-1] = {
+#ifdef CONFIG_ZONE_DMA
         256,
+#endif
 #ifdef CONFIG_ZONE_DMA32
         256,
 #endif
 #ifdef CONFIG_HIGHMEM
-        32
+        32,
 #endif
+        32,
 };
 
 EXPORT_SYMBOL(totalram_pages);
 
-/*
- * Used by page_zone() to look up the address of the struct zone whose
- * id is encoded in the upper bits of page->flags
- */
-struct zone *zone_table[1 << ZONETABLE_SHIFT] __read_mostly;
-EXPORT_SYMBOL(zone_table);
-
-static char *zone_names[MAX_NR_ZONES] = {
+static char * const zone_names[MAX_NR_ZONES] = {
+#ifdef CONFIG_ZONE_DMA
         "DMA",
+#endif
 #ifdef CONFIG_ZONE_DMA32
         "DMA32",
 #endif
         "Normal",
 #ifdef CONFIG_HIGHMEM
-        "HighMem"
+        "HighMem",
 #endif
+        "Movable",
 };
 
 int min_free_kbytes = 1024;
 
 unsigned long __meminitdata nr_kernel_pages;
 unsigned long __meminitdata nr_all_pages;
-static unsigned long __initdata dma_reserve;
+static unsigned long __meminitdata dma_reserve;
 
 #ifdef CONFIG_ARCH_POPULATES_NODE_MAP
   /*
@@ -128,16 +128,28 @@ static unsigned long __initdata dma_reserve;
     #endif
   #endif
 
-  struct node_active_region __initdata early_node_map[MAX_ACTIVE_REGIONS];
-  int __initdata nr_nodemap_entries;
-  unsigned long __initdata arch_zone_lowest_possible_pfn[MAX_NR_ZONES];
-  unsigned long __initdata arch_zone_highest_possible_pfn[MAX_NR_ZONES];
+  static struct node_active_region __meminitdata early_node_map[MAX_ACTIVE_REGIONS];
+  static int __meminitdata nr_nodemap_entries;
+  static unsigned long __meminitdata arch_zone_lowest_possible_pfn[MAX_NR_ZONES];
+  static unsigned long __meminitdata arch_zone_highest_possible_pfn[MAX_NR_ZONES];
 #ifdef CONFIG_MEMORY_HOTPLUG_RESERVE
-  unsigned long __initdata node_boundary_start_pfn[MAX_NUMNODES];
-  unsigned long __initdata node_boundary_end_pfn[MAX_NUMNODES];
+  static unsigned long __meminitdata node_boundary_start_pfn[MAX_NUMNODES];
+  static unsigned long __meminitdata node_boundary_end_pfn[MAX_NUMNODES];
 #endif /* CONFIG_MEMORY_HOTPLUG_RESERVE */
+  unsigned long __initdata required_kernelcore;
+  unsigned long __initdata required_movablecore;
+  unsigned long __initdata zone_movable_pfn[MAX_NUMNODES];
+
+  /* movable_zone is the "real" zone pages in ZONE_MOVABLE are taken from */
+  int movable_zone;
+  EXPORT_SYMBOL(movable_zone);
 #endif /* CONFIG_ARCH_POPULATES_NODE_MAP */
 
+#if MAX_NUMNODES > 1
+int nr_node_ids __read_mostly = MAX_NUMNODES;
+EXPORT_SYMBOL(nr_node_ids);
+#endif
+
 #ifdef CONFIG_DEBUG_VM
 static int page_outside_zone_boundaries(struct zone *zone, struct page *page)
 {
@@ -158,10 +170,8 @@ static int page_outside_zone_boundaries(struct zone *zone, struct page *page)
 
 static int page_is_consistent(struct zone *zone, struct page *page)
 {
-#ifdef CONFIG_HOLES_IN_ZONE
-       if (!pfn_valid(page_to_pfn(page)))
+       if (!pfn_valid_within(page_to_pfn(page)))
                return 0;
-#endif
        if (zone != page_zone(page))
                return 0;
 
@@ -229,7 +239,7 @@ static void bad_page(struct page *page)
 
 static void free_compound_page(struct page *page)
 {
-       __free_pages_ok(page, (unsigned long)page[1].lru.prev);
+       __free_pages_ok(page, compound_order(page));
 }
 
 static void prep_compound_page(struct page *page, unsigned long order)
@@ -237,13 +247,14 @@ static void prep_compound_page(struct page *page, unsigned long order)
        int i;
        int nr_pages = 1 << order;
 
-       page[1].lru.next = (void *)free_compound_page;  /* set dtor */
-       page[1].lru.prev = (void *)order;
-       for (i = 0; i < nr_pages; i++) {
+       set_compound_page_dtor(page, free_compound_page);
+       set_compound_order(page, order);
+       __SetPageHead(page);
+       for (i = 1; i < nr_pages; i++) {
                struct page *p = page + i;
 
-               __SetPageCompound(p);
-               set_page_private(p, (unsigned long)page);
+               __SetPageTail(p);
+               p->first_page = page;
        }
 }
 
@@ -252,16 +263,19 @@ static void destroy_compound_page(struct page *page, unsigned long order)
        int i;
        int nr_pages = 1 << order;
 
-       if (unlikely((unsigned long)page[1].lru.prev != order))
+       if (unlikely(compound_order(page) != order))
                bad_page(page);
 
-       for (i = 0; i < nr_pages; i++) {
+       if (unlikely(!PageHead(page)))
+                       bad_page(page);
+       __ClearPageHead(page);
+       for (i = 1; i < nr_pages; i++) {
                struct page *p = page + i;
 
-               if (unlikely(!PageCompound(p) |
-                               (page_private(p) != (unsigned long)page)))
+               if (unlikely(!PageTail(p) |
+                               (p->first_page != page)))
                        bad_page(page);
-               __ClearPageCompound(p);
+               __ClearPageTail(p);
        }
 }
 
@@ -348,10 +362,8 @@ __find_combined_index(unsigned long page_idx, unsigned int order)
 static inline int page_is_buddy(struct page *page, struct page *buddy,
                                                                int order)
 {
-#ifdef CONFIG_HOLES_IN_ZONE
-       if (!pfn_valid(page_to_pfn(buddy)))
+       if (!pfn_valid_within(page_to_pfn(buddy)))
                return 0;
-#endif
 
        if (page_zone_id(page) != page_zone_id(buddy))
                return 0;
@@ -401,7 +413,7 @@ static inline void __free_one_page(struct page *page,
        VM_BUG_ON(page_idx & (order_size - 1));
        VM_BUG_ON(bad_range(zone, page));
 
-       zone->free_pages += order_size;
+       __mod_zone_page_state(zone, NR_FREE_PAGES, order_size);
        while (order < MAX_ORDER-1) {
                unsigned long combined_idx;
                struct free_area *area;
@@ -435,7 +447,6 @@ static inline int free_pages_check(struct page *page)
                        1 << PG_private |
                        1 << PG_locked  |
                        1 << PG_active  |
-                       1 << PG_reclaim |
                        1 << PG_slab    |
                        1 << PG_swapcache |
                        1 << PG_writeback |
@@ -486,7 +497,7 @@ static void free_one_page(struct zone *zone, struct page *page, int order)
        spin_lock(&zone->lock);
        zone->all_unreclaimable = 0;
        zone->pages_scanned = 0;
-       __free_one_page(page, zone ,order);
+       __free_one_page(page, zoneorder);
        spin_unlock(&zone->lock);
 }
 
@@ -585,7 +596,6 @@ static int prep_new_page(struct page *page, int order, gfp_t gfp_flags)
                        1 << PG_locked  |
                        1 << PG_active  |
                        1 << PG_dirty   |
-                       1 << PG_reclaim |
                        1 << PG_slab    |
                        1 << PG_swapcache |
                        1 << PG_writeback |
@@ -600,11 +610,13 @@ static int prep_new_page(struct page *page, int order, gfp_t gfp_flags)
        if (PageReserved(page))
                return 1;
 
-       page->flags &= ~(1 << PG_uptodate | 1 << PG_error |
+       page->flags &= ~(1 << PG_uptodate | 1 << PG_error | 1 << PG_readahead |
                        1 << PG_referenced | 1 << PG_arch_1 |
-                       1 << PG_checked | 1 << PG_mappedtodisk);
+                       1 << PG_owner_priv_1 | 1 << PG_mappedtodisk);
        set_page_private(page, 0);
        set_page_refcounted(page);
+
+       arch_alloc_page(page, order);
        kernel_map_pages(page, 1 << order, 1);
 
        if (gfp_flags & __GFP_ZERO)
@@ -635,7 +647,7 @@ static struct page *__rmqueue(struct zone *zone, unsigned int order)
                list_del(&page->lru);
                rmv_page_order(page);
                area->nr_free--;
-               zone->free_pages -= 1UL << order;
+               __mod_zone_page_state(zone, NR_FREE_PAGES, - (1UL << order));
                expand(zone, page, order, current_order, area);
                return page;
        }
@@ -666,41 +678,29 @@ static int rmqueue_bulk(struct zone *zone, unsigned int order,
 
 #ifdef CONFIG_NUMA
 /*
- * Called from the slab reaper to drain pagesets on a particular node that
- * belongs to the currently executing processor.
+ * Called from the vmstat counter updater to drain pagesets of this
+ * currently executing processor on remote nodes after they have
+ * expired.
+ *
  * Note that this function must be called with the thread pinned to
  * a single processor.
  */
-void drain_node_pages(int nodeid)
+void drain_zone_pages(struct zone *zone, struct per_cpu_pages *pcp)
 {
-       int i;
-       enum zone_type z;
        unsigned long flags;
+       int to_drain;
 
-       for (z = 0; z < MAX_NR_ZONES; z++) {
-               struct zone *zone = NODE_DATA(nodeid)->node_zones + z;
-               struct per_cpu_pageset *pset;
-
-               if (!populated_zone(zone))
-                       continue;
-
-               pset = zone_pcp(zone, smp_processor_id());
-               for (i = 0; i < ARRAY_SIZE(pset->pcp); i++) {
-                       struct per_cpu_pages *pcp;
-
-                       pcp = &pset->pcp[i];
-                       if (pcp->count) {
-                               local_irq_save(flags);
-                               free_pages_bulk(zone, pcp->count, &pcp->list, 0);
-                               pcp->count = 0;
-                               local_irq_restore(flags);
-                       }
-               }
-       }
+       local_irq_save(flags);
+       if (pcp->count >= pcp->batch)
+               to_drain = pcp->batch;
+       else
+               to_drain = pcp->count;
+       free_pages_bulk(zone, to_drain, &pcp->list, 0);
+       pcp->count -= to_drain;
+       local_irq_restore(flags);
 }
 #endif
 
-#if defined(CONFIG_PM) || defined(CONFIG_HOTPLUG_CPU)
 static void __drain_pages(unsigned int cpu)
 {
        unsigned long flags;
@@ -710,6 +710,9 @@ static void __drain_pages(unsigned int cpu)
        for_each_zone(zone) {
                struct per_cpu_pageset *pset;
 
+               if (!populated_zone(zone))
+                       continue;
+
                pset = zone_pcp(zone, cpu);
                for (i = 0; i < ARRAY_SIZE(pset->pcp); i++) {
                        struct per_cpu_pages *pcp;
@@ -722,7 +725,6 @@ static void __drain_pages(unsigned int cpu)
                }
        }
 }
-#endif /* CONFIG_PM || CONFIG_HOTPLUG_CPU */
 
 #ifdef CONFIG_PM
 
@@ -743,8 +745,8 @@ void mark_free_pages(struct zone *zone)
                if (pfn_valid(pfn)) {
                        struct page *page = pfn_to_page(pfn);
 
-                       if (!PageNosave(page))
-                               ClearPageNosaveFree(page);
+                       if (!swsusp_page_is_forbidden(page))
+                               swsusp_unset_page_free(page);
                }
 
        for (order = MAX_ORDER - 1; order >= 0; --order)
@@ -753,7 +755,7 @@ void mark_free_pages(struct zone *zone)
 
                        pfn = page_to_pfn(list_entry(curr, struct page, lru));
                        for (i = 0; i < (1UL << order); i++)
-                               SetPageNosaveFree(pfn_to_page(pfn + i));
+                               swsusp_set_page_free(pfn_to_page(pfn + i));
                }
 
        spin_unlock_irqrestore(&zone->lock, flags);
@@ -853,7 +855,7 @@ again:
                pcp = &zone_pcp(zone, cpu)->pcp[cold];
                local_irq_save(flags);
                if (!pcp->count) {
-                       pcp->count += rmqueue_bulk(zone, 0,
+                       pcp->count = rmqueue_bulk(zone, 0,
                                                pcp->batch, &pcp->list);
                        if (unlikely(!pcp->count))
                                goto failed;
@@ -893,6 +895,101 @@ failed:
 #define ALLOC_HIGH             0x20 /* __GFP_HIGH set */
 #define ALLOC_CPUSET           0x40 /* check for correct cpuset */
 
+#ifdef CONFIG_FAIL_PAGE_ALLOC
+
+static struct fail_page_alloc_attr {
+       struct fault_attr attr;
+
+       u32 ignore_gfp_highmem;
+       u32 ignore_gfp_wait;
+       u32 min_order;
+
+#ifdef CONFIG_FAULT_INJECTION_DEBUG_FS
+
+       struct dentry *ignore_gfp_highmem_file;
+       struct dentry *ignore_gfp_wait_file;
+       struct dentry *min_order_file;
+
+#endif /* CONFIG_FAULT_INJECTION_DEBUG_FS */
+
+} fail_page_alloc = {
+       .attr = FAULT_ATTR_INITIALIZER,
+       .ignore_gfp_wait = 1,
+       .ignore_gfp_highmem = 1,
+       .min_order = 1,
+};
+
+static int __init setup_fail_page_alloc(char *str)
+{
+       return setup_fault_attr(&fail_page_alloc.attr, str);
+}
+__setup("fail_page_alloc=", setup_fail_page_alloc);
+
+static int should_fail_alloc_page(gfp_t gfp_mask, unsigned int order)
+{
+       if (order < fail_page_alloc.min_order)
+               return 0;
+       if (gfp_mask & __GFP_NOFAIL)
+               return 0;
+       if (fail_page_alloc.ignore_gfp_highmem && (gfp_mask & __GFP_HIGHMEM))
+               return 0;
+       if (fail_page_alloc.ignore_gfp_wait && (gfp_mask & __GFP_WAIT))
+               return 0;
+
+       return should_fail(&fail_page_alloc.attr, 1 << order);
+}
+
+#ifdef CONFIG_FAULT_INJECTION_DEBUG_FS
+
+static int __init fail_page_alloc_debugfs(void)
+{
+       mode_t mode = S_IFREG | S_IRUSR | S_IWUSR;
+       struct dentry *dir;
+       int err;
+
+       err = init_fault_attr_dentries(&fail_page_alloc.attr,
+                                      "fail_page_alloc");
+       if (err)
+               return err;
+       dir = fail_page_alloc.attr.dentries.dir;
+
+       fail_page_alloc.ignore_gfp_wait_file =
+               debugfs_create_bool("ignore-gfp-wait", mode, dir,
+                                     &fail_page_alloc.ignore_gfp_wait);
+
+       fail_page_alloc.ignore_gfp_highmem_file =
+               debugfs_create_bool("ignore-gfp-highmem", mode, dir,
+                                     &fail_page_alloc.ignore_gfp_highmem);
+       fail_page_alloc.min_order_file =
+               debugfs_create_u32("min-order", mode, dir,
+                                  &fail_page_alloc.min_order);
+
+       if (!fail_page_alloc.ignore_gfp_wait_file ||
+            !fail_page_alloc.ignore_gfp_highmem_file ||
+            !fail_page_alloc.min_order_file) {
+               err = -ENOMEM;
+               debugfs_remove(fail_page_alloc.ignore_gfp_wait_file);
+               debugfs_remove(fail_page_alloc.ignore_gfp_highmem_file);
+               debugfs_remove(fail_page_alloc.min_order_file);
+               cleanup_fault_attr_dentries(&fail_page_alloc.attr);
+       }
+
+       return err;
+}
+
+late_initcall(fail_page_alloc_debugfs);
+
+#endif /* CONFIG_FAULT_INJECTION_DEBUG_FS */
+
+#else /* CONFIG_FAIL_PAGE_ALLOC */
+
+static inline int should_fail_alloc_page(gfp_t gfp_mask, unsigned int order)
+{
+       return 0;
+}
+
+#endif /* CONFIG_FAIL_PAGE_ALLOC */
+
 /*
  * Return 1 if free pages are above 'mark'. This takes into account the order
  * of the allocation.
@@ -901,8 +998,8 @@ int zone_watermark_ok(struct zone *z, int order, unsigned long mark,
                      int classzone_idx, int alloc_flags)
 {
        /* free_pages my go negative - that's OK */
-       unsigned long min = mark;
-       long free_pages = z->free_pages - (1 << order) + 1;
+       long min = mark;
+       long free_pages = zone_page_state(z, NR_FREE_PAGES) - (1 << order) + 1;
        int o;
 
        if (alloc_flags & ALLOC_HIGH)
@@ -925,31 +1022,160 @@ int zone_watermark_ok(struct zone *z, int order, unsigned long mark,
        return 1;
 }
 
+#ifdef CONFIG_NUMA
+/*
+ * zlc_setup - Setup for "zonelist cache".  Uses cached zone data to
+ * skip over zones that are not allowed by the cpuset, or that have
+ * been recently (in last second) found to be nearly full.  See further
+ * comments in mmzone.h.  Reduces cache footprint of zonelist scans
+ * that have to skip over alot of full or unallowed zones.
+ *
+ * If the zonelist cache is present in the passed in zonelist, then
+ * returns a pointer to the allowed node mask (either the current
+ * tasks mems_allowed, or node_online_map.)
+ *
+ * If the zonelist cache is not available for this zonelist, does
+ * nothing and returns NULL.
+ *
+ * If the fullzones BITMAP in the zonelist cache is stale (more than
+ * a second since last zap'd) then we zap it out (clear its bits.)
+ *
+ * We hold off even calling zlc_setup, until after we've checked the
+ * first zone in the zonelist, on the theory that most allocations will
+ * be satisfied from that first zone, so best to examine that zone as
+ * quickly as we can.
+ */
+static nodemask_t *zlc_setup(struct zonelist *zonelist, int alloc_flags)
+{
+       struct zonelist_cache *zlc;     /* cached zonelist speedup info */
+       nodemask_t *allowednodes;       /* zonelist_cache approximation */
+
+       zlc = zonelist->zlcache_ptr;
+       if (!zlc)
+               return NULL;
+
+       if (jiffies - zlc->last_full_zap > 1 * HZ) {
+               bitmap_zero(zlc->fullzones, MAX_ZONES_PER_ZONELIST);
+               zlc->last_full_zap = jiffies;
+       }
+
+       allowednodes = !in_interrupt() && (alloc_flags & ALLOC_CPUSET) ?
+                                       &cpuset_current_mems_allowed :
+                                       &node_online_map;
+       return allowednodes;
+}
+
+/*
+ * Given 'z' scanning a zonelist, run a couple of quick checks to see
+ * if it is worth looking at further for free memory:
+ *  1) Check that the zone isn't thought to be full (doesn't have its
+ *     bit set in the zonelist_cache fullzones BITMAP).
+ *  2) Check that the zones node (obtained from the zonelist_cache
+ *     z_to_n[] mapping) is allowed in the passed in allowednodes mask.
+ * Return true (non-zero) if zone is worth looking at further, or
+ * else return false (zero) if it is not.
+ *
+ * This check -ignores- the distinction between various watermarks,
+ * such as GFP_HIGH, GFP_ATOMIC, PF_MEMALLOC, ...  If a zone is
+ * found to be full for any variation of these watermarks, it will
+ * be considered full for up to one second by all requests, unless
+ * we are so low on memory on all allowed nodes that we are forced
+ * into the second scan of the zonelist.
+ *
+ * In the second scan we ignore this zonelist cache and exactly
+ * apply the watermarks to all zones, even it is slower to do so.
+ * We are low on memory in the second scan, and should leave no stone
+ * unturned looking for a free page.
+ */
+static int zlc_zone_worth_trying(struct zonelist *zonelist, struct zone **z,
+                                               nodemask_t *allowednodes)
+{
+       struct zonelist_cache *zlc;     /* cached zonelist speedup info */
+       int i;                          /* index of *z in zonelist zones */
+       int n;                          /* node that zone *z is on */
+
+       zlc = zonelist->zlcache_ptr;
+       if (!zlc)
+               return 1;
+
+       i = z - zonelist->zones;
+       n = zlc->z_to_n[i];
+
+       /* This zone is worth trying if it is allowed but not full */
+       return node_isset(n, *allowednodes) && !test_bit(i, zlc->fullzones);
+}
+
+/*
+ * Given 'z' scanning a zonelist, set the corresponding bit in
+ * zlc->fullzones, so that subsequent attempts to allocate a page
+ * from that zone don't waste time re-examining it.
+ */
+static void zlc_mark_zone_full(struct zonelist *zonelist, struct zone **z)
+{
+       struct zonelist_cache *zlc;     /* cached zonelist speedup info */
+       int i;                          /* index of *z in zonelist zones */
+
+       zlc = zonelist->zlcache_ptr;
+       if (!zlc)
+               return;
+
+       i = z - zonelist->zones;
+
+       set_bit(i, zlc->fullzones);
+}
+
+#else  /* CONFIG_NUMA */
+
+static nodemask_t *zlc_setup(struct zonelist *zonelist, int alloc_flags)
+{
+       return NULL;
+}
+
+static int zlc_zone_worth_trying(struct zonelist *zonelist, struct zone **z,
+                               nodemask_t *allowednodes)
+{
+       return 1;
+}
+
+static void zlc_mark_zone_full(struct zonelist *zonelist, struct zone **z)
+{
+}
+#endif /* CONFIG_NUMA */
+
 /*
- * get_page_from_freeliest goes through the zonelist trying to allocate
+ * get_page_from_freelist goes through the zonelist trying to allocate
  * a page.
  */
 static struct page *
 get_page_from_freelist(gfp_t gfp_mask, unsigned int order,
                struct zonelist *zonelist, int alloc_flags)
 {
-       struct zone **z = zonelist->zones;
+       struct zone **z;
        struct page *page = NULL;
-       int classzone_idx = zone_idx(*z);
+       int classzone_idx = zone_idx(zonelist->zones[0]);
        struct zone *zone;
+       nodemask_t *allowednodes = NULL;/* zonelist_cache approximation */
+       int zlc_active = 0;             /* set if using zonelist_cache */
+       int did_zlc_setup = 0;          /* just call zlc_setup() one time */
 
+zonelist_scan:
        /*
-        * Go through the zonelist once, looking for a zone with enough free.
+        * Scan zonelist, looking for a zone with enough free.
         * See also cpuset_zone_allowed() comment in kernel/cpuset.c.
         */
+       z = zonelist->zones;
+
        do {
+               if (NUMA_BUILD && zlc_active &&
+                       !zlc_zone_worth_trying(zonelist, z, allowednodes))
+                               continue;
                zone = *z;
                if (unlikely(NUMA_BUILD && (gfp_mask & __GFP_THISNODE) &&
                        zone->zone_pgdat != zonelist->zones[0]->zone_pgdat))
                                break;
                if ((alloc_flags & ALLOC_CPUSET) &&
-                               !cpuset_zone_allowed(zone, gfp_mask))
-                       continue;
+                       !cpuset_zone_allowed_softwall(zone, gfp_mask))
+                               goto try_next_zone;
 
                if (!(alloc_flags & ALLOC_NO_WATERMARKS)) {
                        unsigned long mark;
@@ -959,18 +1185,34 @@ get_page_from_freelist(gfp_t gfp_mask, unsigned int order,
                                mark = zone->pages_low;
                        else
                                mark = zone->pages_high;
-                       if (!zone_watermark_ok(zone , order, mark,
-                                   classzone_idx, alloc_flags))
+                       if (!zone_watermark_ok(zone, order, mark,
+                                   classzone_idx, alloc_flags)) {
                                if (!zone_reclaim_mode ||
                                    !zone_reclaim(zone, gfp_mask, order))
-                                       continue;
+                                       goto this_zone_full;
+                       }
                }
 
                page = buffered_rmqueue(zonelist, zone, order, gfp_mask);
-               if (page) {
+               if (page)
                        break;
+this_zone_full:
+               if (NUMA_BUILD)
+                       zlc_mark_zone_full(zonelist, z);
+try_next_zone:
+               if (NUMA_BUILD && !did_zlc_setup) {
+                       /* we do zlc_setup after the first zone is tried */
+                       allowednodes = zlc_setup(zonelist, alloc_flags);
+                       zlc_active = 1;
+                       did_zlc_setup = 1;
                }
        } while (*(++z) != NULL);
+
+       if (unlikely(NUMA_BUILD && page == NULL && zlc_active)) {
+               /* Disable zlc cache for second zonelist scan */
+               zlc_active = 0;
+               goto zonelist_scan;
+       }
        return page;
 }
 
@@ -992,6 +1234,9 @@ __alloc_pages(gfp_t gfp_mask, unsigned int order,
 
        might_sleep_if(wait);
 
+       if (should_fail_alloc_page(gfp_mask, order))
+               return NULL;
+
 restart:
        z = zonelist->zones;  /* the list of zones suitable for gfp_mask */
 
@@ -1005,9 +1250,19 @@ restart:
        if (page)
                goto got_pg;
 
-       do {
+       /*
+        * GFP_THISNODE (meaning __GFP_THISNODE, __GFP_NORETRY and
+        * __GFP_NOWARN set) should not cause reclaim since the subsystem
+        * (f.e. slab) using GFP_THISNODE may choose to trigger reclaim
+        * using a larger set of nodes after it has established that the
+        * allowed per node queues are empty and that nodes are
+        * over allocated.
+        */
+       if (NUMA_BUILD && (gfp_mask & GFP_THISNODE) == GFP_THISNODE)
+               goto nopage;
+
+       for (z = zonelist->zones; *z; z++)
                wakeup_kswapd(*z, order);
-       } while (*(++z));
 
        /*
         * OK, we're below the kswapd watermark and have kicked background
@@ -1041,6 +1296,7 @@ restart:
 
        /* This allocation should allow future memory freeing. */
 
+rebalance:
        if (((p->flags & PF_MEMALLOC) || unlikely(test_thread_flag(TIF_MEMDIE)))
                        && !in_interrupt()) {
                if (!(gfp_mask & __GFP_NOMEMALLOC)) {
@@ -1062,7 +1318,6 @@ nofail_alloc:
        if (!wait)
                goto nopage;
 
-rebalance:
        cond_resched();
 
        /* We now go into synchronous reclaim */
@@ -1071,7 +1326,7 @@ rebalance:
        reclaim_state.reclaimed_slab = 0;
        p->reclaim_state = &reclaim_state;
 
-       did_some_progress = try_to_free_pages(zonelist->zones, gfp_mask);
+       did_some_progress = try_to_free_pages(zonelist->zones, order, gfp_mask);
 
        p->reclaim_state = NULL;
        p->flags &= ~PF_MEMALLOC;
@@ -1108,7 +1363,8 @@ rebalance:
         */
        do_retry = 0;
        if (!(gfp_mask & __GFP_NORETRY)) {
-               if ((order <= 3) || (gfp_mask & __GFP_REPEAT))
+               if ((order <= PAGE_ALLOC_COSTLY_ORDER) ||
+                                               (gfp_mask & __GFP_REPEAT))
                        do_retry = 1;
                if (gfp_mask & __GFP_NOFAIL)
                        do_retry = 1;
@@ -1194,35 +1450,6 @@ fastcall void free_pages(unsigned long addr, unsigned int order)
 
 EXPORT_SYMBOL(free_pages);
 
-/*
- * Total amount of free (allocatable) RAM:
- */
-unsigned int nr_free_pages(void)
-{
-       unsigned int sum = 0;
-       struct zone *zone;
-
-       for_each_zone(zone)
-               sum += zone->free_pages;
-
-       return sum;
-}
-
-EXPORT_SYMBOL(nr_free_pages);
-
-#ifdef CONFIG_NUMA
-unsigned int nr_free_pages_pgdat(pg_data_t *pgdat)
-{
-       unsigned int sum = 0;
-       enum zone_type i;
-
-       for (i = 0; i < MAX_NR_ZONES; i++)
-               sum += pgdat->node_zones[i].free_pages;
-
-       return sum;
-}
-#endif
-
 static unsigned int nr_free_zone_pages(int offset)
 {
        /* Just pick one node, since fallback list is circular */
@@ -1250,26 +1477,27 @@ unsigned int nr_free_buffer_pages(void)
 {
        return nr_free_zone_pages(gfp_zone(GFP_USER));
 }
+EXPORT_SYMBOL_GPL(nr_free_buffer_pages);
 
 /*
  * Amount of free RAM allocatable within all zones
  */
 unsigned int nr_free_pagecache_pages(void)
 {
-       return nr_free_zone_pages(gfp_zone(GFP_HIGHUSER));
+       return nr_free_zone_pages(gfp_zone(GFP_HIGHUSER_MOVABLE));
 }
 
 static inline void show_node(struct zone *zone)
 {
        if (NUMA_BUILD)
-               printk("Node %ld ", zone_to_nid(zone));
+               printk("Node %d ", zone_to_nid(zone));
 }
 
 void si_meminfo(struct sysinfo *val)
 {
        val->totalram = totalram_pages;
        val->sharedram = 0;
-       val->freeram = nr_free_pages();
+       val->freeram = global_page_state(NR_FREE_PAGES);
        val->bufferram = nr_blockdev_pages();
        val->totalhigh = totalhigh_pages;
        val->freehigh = nr_free_highpages();
@@ -1284,10 +1512,11 @@ void si_meminfo_node(struct sysinfo *val, int nid)
        pg_data_t *pgdat = NODE_DATA(nid);
 
        val->totalram = pgdat->node_present_pages;
-       val->freeram = nr_free_pages_pgdat(pgdat);
+       val->freeram = node_page_state(nid, NR_FREE_PAGES);
 #ifdef CONFIG_HIGHMEM
        val->totalhigh = pgdat->node_zones[ZONE_HIGHMEM].present_pages;
-       val->freehigh = pgdat->node_zones[ZONE_HIGHMEM].free_pages;
+       val->freehigh = zone_page_state(&pgdat->node_zones[ZONE_HIGHMEM],
+                       NR_FREE_PAGES);
 #else
        val->totalhigh = 0;
        val->freehigh = 0;
@@ -1306,9 +1535,6 @@ void si_meminfo_node(struct sysinfo *val, int nid)
 void show_free_areas(void)
 {
        int cpu;
-       unsigned long active;
-       unsigned long inactive;
-       unsigned long free;
        struct zone *zone;
 
        for_each_zone(zone) {
@@ -1332,20 +1558,19 @@ void show_free_areas(void)
                }
        }
 
-       get_zone_counts(&active, &inactive, &free);
-
-       printk("Active:%lu inactive:%lu dirty:%lu writeback:%lu "
-               "unstable:%lu free:%u slab:%lu mapped:%lu pagetables:%lu\n",
-               active,
-               inactive,
+       printk("Active:%lu inactive:%lu dirty:%lu writeback:%lu unstable:%lu\n"
+               " free:%lu slab:%lu mapped:%lu pagetables:%lu bounce:%lu\n",
+               global_page_state(NR_ACTIVE),
+               global_page_state(NR_INACTIVE),
                global_page_state(NR_FILE_DIRTY),
                global_page_state(NR_WRITEBACK),
                global_page_state(NR_UNSTABLE_NFS),
-               nr_free_pages(),
+               global_page_state(NR_FREE_PAGES),
                global_page_state(NR_SLAB_RECLAIMABLE) +
                        global_page_state(NR_SLAB_UNRECLAIMABLE),
                global_page_state(NR_FILE_MAPPED),
-               global_page_state(NR_PAGETABLE));
+               global_page_state(NR_PAGETABLE),
+               global_page_state(NR_BOUNCE));
 
        for_each_zone(zone) {
                int i;
@@ -1366,12 +1591,12 @@ void show_free_areas(void)
                        " all_unreclaimable? %s"
                        "\n",
                        zone->name,
-                       K(zone->free_pages),
+                       K(zone_page_state(zone, NR_FREE_PAGES)),
                        K(zone->pages_min),
                        K(zone->pages_low),
                        K(zone->pages_high),
-                       K(zone->nr_active),
-                       K(zone->nr_inactive),
+                       K(zone_page_state(zone, NR_ACTIVE)),
+                       K(zone_page_state(zone, NR_INACTIVE)),
                        K(zone->present_pages),
                        zone->pages_scanned,
                        (zone->all_unreclaimable ? "yes" : "no")
@@ -1410,8 +1635,8 @@ void show_free_areas(void)
  *
  * Add all populated zones of a node to the zonelist.
  */
-static int __meminit build_zonelists_node(pg_data_t *pgdat,
-                       struct zonelist *zonelist, int nr_zones, enum zone_type zone_type)
+static int build_zonelists_node(pg_data_t *pgdat, struct zonelist *zonelist,
+                               int nr_zones, enum zone_type zone_type)
 {
        struct zone *zone;
 
@@ -1430,9 +1655,102 @@ static int __meminit build_zonelists_node(pg_data_t *pgdat,
        return nr_zones;
 }
 
+
+/*
+ *  zonelist_order:
+ *  0 = automatic detection of better ordering.
+ *  1 = order by ([node] distance, -zonetype)
+ *  2 = order by (-zonetype, [node] distance)
+ *
+ *  If not NUMA, ZONELIST_ORDER_ZONE and ZONELIST_ORDER_NODE will create
+ *  the same zonelist. So only NUMA can configure this param.
+ */
+#define ZONELIST_ORDER_DEFAULT  0
+#define ZONELIST_ORDER_NODE     1
+#define ZONELIST_ORDER_ZONE     2
+
+/* zonelist order in the kernel.
+ * set_zonelist_order() will set this to NODE or ZONE.
+ */
+static int current_zonelist_order = ZONELIST_ORDER_DEFAULT;
+static char zonelist_order_name[3][8] = {"Default", "Node", "Zone"};
+
+
 #ifdef CONFIG_NUMA
+/* The value user specified ....changed by config */
+static int user_zonelist_order = ZONELIST_ORDER_DEFAULT;
+/* string for sysctl */
+#define NUMA_ZONELIST_ORDER_LEN        16
+char numa_zonelist_order[16] = "default";
+
+/*
+ * interface for configure zonelist ordering.
+ * command line option "numa_zonelist_order"
+ *     = "[dD]efault   - default, automatic configuration.
+ *     = "[nN]ode      - order by node locality, then by zone within node
+ *     = "[zZ]one      - order by zone, then by locality within zone
+ */
+
+static int __parse_numa_zonelist_order(char *s)
+{
+       if (*s == 'd' || *s == 'D') {
+               user_zonelist_order = ZONELIST_ORDER_DEFAULT;
+       } else if (*s == 'n' || *s == 'N') {
+               user_zonelist_order = ZONELIST_ORDER_NODE;
+       } else if (*s == 'z' || *s == 'Z') {
+               user_zonelist_order = ZONELIST_ORDER_ZONE;
+       } else {
+               printk(KERN_WARNING
+                       "Ignoring invalid numa_zonelist_order value:  "
+                       "%s\n", s);
+               return -EINVAL;
+       }
+       return 0;
+}
+
+static __init int setup_numa_zonelist_order(char *s)
+{
+       if (s)
+               return __parse_numa_zonelist_order(s);
+       return 0;
+}
+early_param("numa_zonelist_order", setup_numa_zonelist_order);
+
+/*
+ * sysctl handler for numa_zonelist_order
+ */
+int numa_zonelist_order_handler(ctl_table *table, int write,
+               struct file *file, void __user *buffer, size_t *length,
+               loff_t *ppos)
+{
+       char saved_string[NUMA_ZONELIST_ORDER_LEN];
+       int ret;
+
+       if (write)
+               strncpy(saved_string, (char*)table->data,
+                       NUMA_ZONELIST_ORDER_LEN);
+       ret = proc_dostring(table, write, file, buffer, length, ppos);
+       if (ret)
+               return ret;
+       if (write) {
+               int oldval = user_zonelist_order;
+               if (__parse_numa_zonelist_order((char*)table->data)) {
+                       /*
+                        * bogus value.  restore saved string
+                        */
+                       strncpy((char*)table->data, saved_string,
+                               NUMA_ZONELIST_ORDER_LEN);
+                       user_zonelist_order = oldval;
+               } else if (oldval != user_zonelist_order)
+                       build_all_zonelists();
+       }
+       return 0;
+}
+
+
 #define MAX_NODE_LOAD (num_online_nodes())
-static int __meminitdata node_load[MAX_NUMNODES];
+static int node_load[MAX_NUMNODES];
+
 /**
  * find_next_best_node - find the next node that should appear in a given node's fallback list
  * @node: node whose fallback list we're appending
@@ -1447,7 +1765,7 @@ static int __meminitdata node_load[MAX_NUMNODES];
  * on them otherwise.
  * It returns -1 if no node is found.
  */
-static int __meminit find_next_best_node(int node, nodemask_t *used_node_mask)
+static int find_next_best_node(int node, nodemask_t *used_node_mask)
 {
        int n, val;
        int min_val = INT_MAX;
@@ -1493,13 +1811,129 @@ static int __meminit find_next_best_node(int node, nodemask_t *used_node_mask)
        return best_node;
 }
 
-static void __meminit build_zonelists(pg_data_t *pgdat)
+
+/*
+ * Build zonelists ordered by node and zones within node.
+ * This results in maximum locality--normal zone overflows into local
+ * DMA zone, if any--but risks exhausting DMA zone.
+ */
+static void build_zonelists_in_node_order(pg_data_t *pgdat, int node)
+{
+       enum zone_type i;
+       int j;
+       struct zonelist *zonelist;
+
+       for (i = 0; i < MAX_NR_ZONES; i++) {
+               zonelist = pgdat->node_zonelists + i;
+               for (j = 0; zonelist->zones[j] != NULL; j++)
+                       ;
+               j = build_zonelists_node(NODE_DATA(node), zonelist, j, i);
+               zonelist->zones[j] = NULL;
+       }
+}
+
+/*
+ * Build zonelists ordered by zone and nodes within zones.
+ * This results in conserving DMA zone[s] until all Normal memory is
+ * exhausted, but results in overflowing to remote node while memory
+ * may still exist in local DMA zone.
+ */
+static int node_order[MAX_NUMNODES];
+
+static void build_zonelists_in_zone_order(pg_data_t *pgdat, int nr_nodes)
 {
-       int j, node, local_node;
        enum zone_type i;
-       int prev_node, load;
+       int pos, j, node;
+       int zone_type;          /* needs to be signed */
+       struct zone *z;
        struct zonelist *zonelist;
+
+       for (i = 0; i < MAX_NR_ZONES; i++) {
+               zonelist = pgdat->node_zonelists + i;
+               pos = 0;
+               for (zone_type = i; zone_type >= 0; zone_type--) {
+                       for (j = 0; j < nr_nodes; j++) {
+                               node = node_order[j];
+                               z = &NODE_DATA(node)->node_zones[zone_type];
+                               if (populated_zone(z)) {
+                                       zonelist->zones[pos++] = z;
+                                       check_highest_zone(zone_type);
+                               }
+                       }
+               }
+               zonelist->zones[pos] = NULL;
+       }
+}
+
+static int default_zonelist_order(void)
+{
+       int nid, zone_type;
+       unsigned long low_kmem_size,total_size;
+       struct zone *z;
+       int average_size;
+       /*
+         * ZONE_DMA and ZONE_DMA32 can be very small area in the sytem.
+        * If they are really small and used heavily, the system can fall
+        * into OOM very easily.
+        * This function detect ZONE_DMA/DMA32 size and confgigures zone order.
+        */
+       /* Is there ZONE_NORMAL ? (ex. ppc has only DMA zone..) */
+       low_kmem_size = 0;
+       total_size = 0;
+       for_each_online_node(nid) {
+               for (zone_type = 0; zone_type < MAX_NR_ZONES; zone_type++) {
+                       z = &NODE_DATA(nid)->node_zones[zone_type];
+                       if (populated_zone(z)) {
+                               if (zone_type < ZONE_NORMAL)
+                                       low_kmem_size += z->present_pages;
+                               total_size += z->present_pages;
+                       }
+               }
+       }
+       if (!low_kmem_size ||  /* there are no DMA area. */
+           low_kmem_size > total_size/2) /* DMA/DMA32 is big. */
+               return ZONELIST_ORDER_NODE;
+       /*
+        * look into each node's config.
+        * If there is a node whose DMA/DMA32 memory is very big area on
+        * local memory, NODE_ORDER may be suitable.
+         */
+       average_size = total_size / (num_online_nodes() + 1);
+       for_each_online_node(nid) {
+               low_kmem_size = 0;
+               total_size = 0;
+               for (zone_type = 0; zone_type < MAX_NR_ZONES; zone_type++) {
+                       z = &NODE_DATA(nid)->node_zones[zone_type];
+                       if (populated_zone(z)) {
+                               if (zone_type < ZONE_NORMAL)
+                                       low_kmem_size += z->present_pages;
+                               total_size += z->present_pages;
+                       }
+               }
+               if (low_kmem_size &&
+                   total_size > average_size && /* ignore small node */
+                   low_kmem_size > total_size * 70/100)
+                       return ZONELIST_ORDER_NODE;
+       }
+       return ZONELIST_ORDER_ZONE;
+}
+
+static void set_zonelist_order(void)
+{
+       if (user_zonelist_order == ZONELIST_ORDER_DEFAULT)
+               current_zonelist_order = default_zonelist_order();
+       else
+               current_zonelist_order = user_zonelist_order;
+}
+
+static void build_zonelists(pg_data_t *pgdat)
+{
+       int j, node, load;
+       enum zone_type i;
        nodemask_t used_mask;
+       int local_node, prev_node;
+       struct zonelist *zonelist;
+       int order = current_zonelist_order;
 
        /* initialize zonelists */
        for (i = 0; i < MAX_NR_ZONES; i++) {
@@ -1512,6 +1946,11 @@ static void __meminit build_zonelists(pg_data_t *pgdat)
        load = num_online_nodes();
        prev_node = local_node;
        nodes_clear(used_mask);
+
+       memset(node_load, 0, sizeof(node_load));
+       memset(node_order, 0, sizeof(node_order));
+       j = 0;
+
        while ((node = find_next_best_node(local_node, &used_mask)) >= 0) {
                int distance = node_distance(local_node, node);
 
@@ -1527,24 +1966,50 @@ static void __meminit build_zonelists(pg_data_t *pgdat)
                 * So adding penalty to the first node in same
                 * distance group to make it round-robin.
                 */
-
                if (distance != node_distance(local_node, prev_node))
-                       node_load[node] += load;
+                       node_load[node] = load;
+
                prev_node = node;
                load--;
-               for (i = 0; i < MAX_NR_ZONES; i++) {
-                       zonelist = pgdat->node_zonelists + i;
-                       for (j = 0; zonelist->zones[j] != NULL; j++);
+               if (order == ZONELIST_ORDER_NODE)
+                       build_zonelists_in_node_order(pgdat, node);
+               else
+                       node_order[j++] = node; /* remember order */
+       }
 
-                       j = build_zonelists_node(NODE_DATA(node), zonelist, j, i);
-                       zonelist->zones[j] = NULL;
-               }
+       if (order == ZONELIST_ORDER_ZONE) {
+               /* calculate node order -- i.e., DMA last! */
+               build_zonelists_in_zone_order(pgdat, j);
+       }
+}
+
+/* Construct the zonelist performance cache - see further mmzone.h */
+static void build_zonelist_cache(pg_data_t *pgdat)
+{
+       int i;
+
+       for (i = 0; i < MAX_NR_ZONES; i++) {
+               struct zonelist *zonelist;
+               struct zonelist_cache *zlc;
+               struct zone **z;
+
+               zonelist = pgdat->node_zonelists + i;
+               zonelist->zlcache_ptr = zlc = &zonelist->zlcache;
+               bitmap_zero(zlc->fullzones, MAX_ZONES_PER_ZONELIST);
+               for (z = zonelist->zones; *z; z++)
+                       zlc->z_to_n[z - zonelist->zones] = zone_to_nid(*z);
        }
 }
 
+
 #else  /* CONFIG_NUMA */
 
-static void __meminit build_zonelists(pg_data_t *pgdat)
+static void set_zonelist_order(void)
+{
+       current_zonelist_order = ZONELIST_ORDER_ZONE;
+}
+
+static void build_zonelists(pg_data_t *pgdat)
 {
        int node, local_node;
        enum zone_type i,j;
@@ -1579,19 +2044,33 @@ static void __meminit build_zonelists(pg_data_t *pgdat)
        }
 }
 
+/* non-NUMA variant of zonelist performance cache - just NULL zlcache_ptr */
+static void build_zonelist_cache(pg_data_t *pgdat)
+{
+       int i;
+
+       for (i = 0; i < MAX_NR_ZONES; i++)
+               pgdat->node_zonelists[i].zlcache_ptr = NULL;
+}
+
 #endif /* CONFIG_NUMA */
 
 /* return values int ....just for stop_machine_run() */
-static int __meminit __build_all_zonelists(void *dummy)
+static int __build_all_zonelists(void *dummy)
 {
        int nid;
-       for_each_online_node(nid)
+
+       for_each_online_node(nid) {
                build_zonelists(NODE_DATA(nid));
+               build_zonelist_cache(NODE_DATA(nid));
+       }
        return 0;
 }
 
-void __meminit build_all_zonelists(void)
+void build_all_zonelists(void)
 {
+       set_zonelist_order();
+
        if (system_state == SYSTEM_BOOTING) {
                __build_all_zonelists(NULL);
                cpuset_init_current_mems_allowed();
@@ -1602,8 +2081,13 @@ void __meminit build_all_zonelists(void)
                /* cpuset refresh routine should be here */
        }
        vm_total_pages = nr_free_pagecache_pages();
-       printk("Built %i zonelists.  Total pages: %ld\n",
-                       num_online_nodes(), vm_total_pages);
+       printk("Built %i zonelists in %s order.  Total pages: %ld\n",
+                       num_online_nodes(),
+                       zonelist_order_name[current_zonelist_order],
+                       vm_total_pages);
+#ifdef CONFIG_NUMA
+       printk("Policy zone: %s\n", zone_names[policy_zone]);
+#endif
 }
 
 /*
@@ -1680,15 +2164,24 @@ static inline unsigned long wait_table_bits(unsigned long size)
  * done. Non-atomic initialization, single-pass.
  */
 void __meminit memmap_init_zone(unsigned long size, int nid, unsigned long zone,
-               unsigned long start_pfn)
+               unsigned long start_pfn, enum memmap_context context)
 {
        struct page *page;
        unsigned long end_pfn = start_pfn + size;
        unsigned long pfn;
 
        for (pfn = start_pfn; pfn < end_pfn; pfn++) {
-               if (!early_pfn_valid(pfn))
-                       continue;
+               /*
+                * There can be holes in boot-time mem_map[]s
+                * handed to this function.  They do not
+                * exist on hotplugged memory.
+                */
+               if (context == MEMMAP_EARLY) {
+                       if (!early_pfn_valid(pfn))
+                               continue;
+                       if (!early_pfn_in_nid(pfn, nid))
+                               continue;
+               }
                page = pfn_to_page(pfn);
                set_page_links(page, zone, nid, pfn);
                init_page_count(page);
@@ -1703,8 +2196,8 @@ void __meminit memmap_init_zone(unsigned long size, int nid, unsigned long zone,
        }
 }
 
-void zone_init_free_lists(struct pglist_data *pgdat, struct zone *zone,
-                               unsigned long size)
+static void __meminit zone_init_free_lists(struct pglist_data *pgdat,
+                               struct zone *zone, unsigned long size)
 {
        int order;
        for (order = 0; order < MAX_ORDER ; order++) {
@@ -1713,26 +2206,12 @@ void zone_init_free_lists(struct pglist_data *pgdat, struct zone *zone,
        }
 }
 
-#define ZONETABLE_INDEX(x, zone_nr)    ((x << ZONES_SHIFT) | zone_nr)
-void zonetable_add(struct zone *zone, int nid, enum zone_type zid,
-               unsigned long pfn, unsigned long size)
-{
-       unsigned long snum = pfn_to_section_nr(pfn);
-       unsigned long end = pfn_to_section_nr(pfn + size);
-
-       if (FLAGS_HAS_NODE)
-               zone_table[ZONETABLE_INDEX(nid, zid)] = zone;
-       else
-               for (; snum <= end; snum++)
-                       zone_table[ZONETABLE_INDEX(snum, zid)] = zone;
-}
-
 #ifndef __HAVE_ARCH_MEMMAP_INIT
 #define memmap_init(size, nid, zone, start_pfn) \
-       memmap_init_zone((size), (nid), (zone), (start_pfn))
+       memmap_init_zone((size), (nid), (zone), (start_pfn), MEMMAP_EARLY)
 #endif
 
-static int __cpuinit zone_batchsize(struct zone *zone)
+static int __devinit zone_batchsize(struct zone *zone)
 {
        int batch;
 
@@ -1879,16 +2358,19 @@ static int __cpuinit pageset_cpuup_callback(struct notifier_block *nfb,
        int ret = NOTIFY_OK;
 
        switch (action) {
-               case CPU_UP_PREPARE:
-                       if (process_zones(cpu))
-                               ret = NOTIFY_BAD;
-                       break;
-               case CPU_UP_CANCELED:
-               case CPU_DEAD:
-                       free_zone_pagesets(cpu);
-                       break;
-               default:
-                       break;
+       case CPU_UP_PREPARE:
+       case CPU_UP_PREPARE_FROZEN:
+               if (process_zones(cpu))
+                       ret = NOTIFY_BAD;
+               break;
+       case CPU_UP_CANCELED:
+       case CPU_UP_CANCELED_FROZEN:
+       case CPU_DEAD:
+       case CPU_DEAD_FROZEN:
+               free_zone_pagesets(cpu);
+               break;
+       default:
+               break;
        }
        return ret;
 }
@@ -1911,7 +2393,7 @@ void __init setup_per_cpu_pageset(void)
 
 #endif
 
-static __meminit
+static noinline __init_refok
 int zone_wait_table_init(struct zone *zone, unsigned long zone_size_pages)
 {
        int i;
@@ -1975,7 +2457,8 @@ static __meminit void zone_pcp_init(struct zone *zone)
 
 __meminit int init_currently_empty_zone(struct zone *zone,
                                        unsigned long zone_start_pfn,
-                                       unsigned long size)
+                                       unsigned long size,
+                                       enum memmap_context context)
 {
        struct pglist_data *pgdat = zone->zone_pgdat;
        int ret;
@@ -1998,7 +2481,7 @@ __meminit int init_currently_empty_zone(struct zone *zone,
  * Basic iterator support. Return the first range of PFNs for a node
  * Note: nid == MAX_NUMNODES returns first region regardless of node
  */
-static int __init first_active_region_index_in_nid(int nid)
+static int __meminit first_active_region_index_in_nid(int nid)
 {
        int i;
 
@@ -2013,7 +2496,7 @@ static int __init first_active_region_index_in_nid(int nid)
  * Basic iterator support. Return the next active range of PFNs for a node
  * Note: nid == MAX_NUMNODES returns next region regardles of node
  */
-static int __init next_active_region_index_in_nid(int index, int nid)
+static int __meminit next_active_region_index_in_nid(int index, int nid)
 {
        for (index = index + 1; index < nr_nodemap_entries; index++)
                if (nid == MAX_NUMNODES || early_node_map[index].nid == nid)
@@ -2029,7 +2512,7 @@ static int __init next_active_region_index_in_nid(int index, int nid)
  * was used and there are no special requirements, this is a convenient
  * alternative
  */
-int __init early_pfn_to_nid(unsigned long pfn)
+int __meminit early_pfn_to_nid(unsigned long pfn)
 {
        int i;
 
@@ -2130,7 +2613,7 @@ void __init push_node_boundaries(unsigned int nid,
 }
 
 /* If necessary, push the node boundary out for reserve hotadd */
-static void __init account_node_boundary(unsigned int nid,
+static void __meminit account_node_boundary(unsigned int nid,
                unsigned long *start_pfn, unsigned long *end_pfn)
 {
        printk(KERN_DEBUG "Entering account_node_boundary(%u, %lu, %lu)\n",
@@ -2150,7 +2633,7 @@ static void __init account_node_boundary(unsigned int nid,
 void __init push_node_boundaries(unsigned int nid,
                unsigned long start_pfn, unsigned long end_pfn) {}
 
-static void __init account_node_boundary(unsigned int nid,
+static void __meminit account_node_boundary(unsigned int nid,
                unsigned long *start_pfn, unsigned long *end_pfn) {}
 #endif
 
@@ -2166,7 +2649,7 @@ static void __init account_node_boundary(unsigned int nid,
  * with no available memory, a warning is printed and the start and end
  * PFNs will be 0.
  */
-void __init get_pfn_range_for_nid(unsigned int nid,
+void __meminit get_pfn_range_for_nid(unsigned int nid,
                        unsigned long *start_pfn, unsigned long *end_pfn)
 {
        int i;
@@ -2188,10 +2671,67 @@ void __init get_pfn_range_for_nid(unsigned int nid,
 }
 
 /*
+ * This finds a zone that can be used for ZONE_MOVABLE pages. The
+ * assumption is made that zones within a node are ordered in monotonic
+ * increasing memory addresses so that the "highest" populated zone is used
+ */
+void __init find_usable_zone_for_movable(void)
+{
+       int zone_index;
+       for (zone_index = MAX_NR_ZONES - 1; zone_index >= 0; zone_index--) {
+               if (zone_index == ZONE_MOVABLE)
+                       continue;
+
+               if (arch_zone_highest_possible_pfn[zone_index] >
+                               arch_zone_lowest_possible_pfn[zone_index])
+                       break;
+       }
+
+       VM_BUG_ON(zone_index == -1);
+       movable_zone = zone_index;
+}
+
+/*
+ * The zone ranges provided by the architecture do not include ZONE_MOVABLE
+ * because it is sized independant of architecture. Unlike the other zones,
+ * the starting point for ZONE_MOVABLE is not fixed. It may be different
+ * in each node depending on the size of each node and how evenly kernelcore
+ * is distributed. This helper function adjusts the zone ranges
+ * provided by the architecture for a given node by using the end of the
+ * highest usable zone for ZONE_MOVABLE. This preserves the assumption that
+ * zones within a node are in order of monotonic increases memory addresses
+ */
+void __meminit adjust_zone_range_for_zone_movable(int nid,
+                                       unsigned long zone_type,
+                                       unsigned long node_start_pfn,
+                                       unsigned long node_end_pfn,
+                                       unsigned long *zone_start_pfn,
+                                       unsigned long *zone_end_pfn)
+{
+       /* Only adjust if ZONE_MOVABLE is on this node */
+       if (zone_movable_pfn[nid]) {
+               /* Size ZONE_MOVABLE */
+               if (zone_type == ZONE_MOVABLE) {
+                       *zone_start_pfn = zone_movable_pfn[nid];
+                       *zone_end_pfn = min(node_end_pfn,
+                               arch_zone_highest_possible_pfn[movable_zone]);
+
+               /* Adjust for ZONE_MOVABLE starting within this range */
+               } else if (*zone_start_pfn < zone_movable_pfn[nid] &&
+                               *zone_end_pfn > zone_movable_pfn[nid]) {
+                       *zone_end_pfn = zone_movable_pfn[nid];
+
+               /* Check if this whole range is within ZONE_MOVABLE */
+               } else if (*zone_start_pfn >= zone_movable_pfn[nid])
+                       *zone_start_pfn = *zone_end_pfn;
+       }
+}
+
+/*
  * Return the number of pages a zone spans in a node, including holes
  * present_pages = zone_spanned_pages_in_node() - zone_absent_pages_in_node()
  */
-unsigned long __init zone_spanned_pages_in_node(int nid,
+static unsigned long __meminit zone_spanned_pages_in_node(int nid,
                                        unsigned long zone_type,
                                        unsigned long *ignored)
 {
@@ -2202,6 +2742,9 @@ unsigned long __init zone_spanned_pages_in_node(int nid,
        get_pfn_range_for_nid(nid, &node_start_pfn, &node_end_pfn);
        zone_start_pfn = arch_zone_lowest_possible_pfn[zone_type];
        zone_end_pfn = arch_zone_highest_possible_pfn[zone_type];
+       adjust_zone_range_for_zone_movable(nid, zone_type,
+                               node_start_pfn, node_end_pfn,
+                               &zone_start_pfn, &zone_end_pfn);
 
        /* Check that this node has pages within the zone's required range */
        if (zone_end_pfn < node_start_pfn || zone_start_pfn > node_end_pfn)
@@ -2219,7 +2762,7 @@ unsigned long __init zone_spanned_pages_in_node(int nid,
  * Return the number of holes in a range on a node. If nid is MAX_NUMNODES,
  * then all holes in the requested range will be accounted for.
  */
-unsigned long __init __absent_pages_in_range(int nid,
+unsigned long __meminit __absent_pages_in_range(int nid,
                                unsigned long range_start_pfn,
                                unsigned long range_end_pfn)
 {
@@ -2259,7 +2802,7 @@ unsigned long __init __absent_pages_in_range(int nid,
 
        /* Account for ranges past physical memory on this node */
        if (range_end_pfn > prev_end_pfn)
-               hole_pages = range_end_pfn -
+               hole_pages += range_end_pfn -
                                max(range_start_pfn, prev_end_pfn);
 
        return hole_pages;
@@ -2279,7 +2822,7 @@ unsigned long __init absent_pages_in_range(unsigned long start_pfn,
 }
 
 /* Return the number of page frames in holes in a zone on a node */
-unsigned long __init zone_absent_pages_in_node(int nid,
+static unsigned long __meminit zone_absent_pages_in_node(int nid,
                                        unsigned long zone_type,
                                        unsigned long *ignored)
 {
@@ -2292,18 +2835,21 @@ unsigned long __init zone_absent_pages_in_node(int nid,
        zone_end_pfn = min(arch_zone_highest_possible_pfn[zone_type],
                                                        node_end_pfn);
 
+       adjust_zone_range_for_zone_movable(nid, zone_type,
+                       node_start_pfn, node_end_pfn,
+                       &zone_start_pfn, &zone_end_pfn);
        return __absent_pages_in_range(nid, zone_start_pfn, zone_end_pfn);
 }
 
 #else
-static inline unsigned long zone_spanned_pages_in_node(int nid,
+static inline unsigned long __meminit zone_spanned_pages_in_node(int nid,
                                        unsigned long zone_type,
                                        unsigned long *zones_size)
 {
        return zones_size[zone_type];
 }
 
-static inline unsigned long zone_absent_pages_in_node(int nid,
+static inline unsigned long __meminit zone_absent_pages_in_node(int nid,
                                                unsigned long zone_type,
                                                unsigned long *zholes_size)
 {
@@ -2315,7 +2861,7 @@ static inline unsigned long zone_absent_pages_in_node(int nid,
 
 #endif
 
-static void __init calculate_node_totalpages(struct pglist_data *pgdat,
+static void __meminit calculate_node_totalpages(struct pglist_data *pgdat,
                unsigned long *zones_size, unsigned long *zholes_size)
 {
        unsigned long realtotalpages, totalpages = 0;
@@ -2379,11 +2925,11 @@ static void __meminit free_area_init_core(struct pglist_data *pgdat,
                                "  %s zone: %lu pages exceeds realsize %lu\n",
                                zone_names[j], memmap_pages, realsize);
 
-               /* Account for reserved DMA pages */
-               if (j == ZONE_DMA && realsize > dma_reserve) {
+               /* Account for reserved pages */
+               if (j == 0 && realsize > dma_reserve) {
                        realsize -= dma_reserve;
-                       printk(KERN_DEBUG "  DMA zone: %lu pages reserved\n",
-                                                               dma_reserve);
+                       printk(KERN_DEBUG "  %s zone: %lu pages reserved\n",
+                                       zone_names[0], dma_reserve);
                }
 
                if (!is_highmem_idx(j))
@@ -2403,30 +2949,27 @@ static void __meminit free_area_init_core(struct pglist_data *pgdat,
                spin_lock_init(&zone->lru_lock);
                zone_seqlock_init(zone);
                zone->zone_pgdat = pgdat;
-               zone->free_pages = 0;
 
-               zone->temp_priority = zone->prev_priority = DEF_PRIORITY;
+               zone->prev_priority = DEF_PRIORITY;
 
                zone_pcp_init(zone);
                INIT_LIST_HEAD(&zone->active_list);
                INIT_LIST_HEAD(&zone->inactive_list);
                zone->nr_scan_active = 0;
                zone->nr_scan_inactive = 0;
-               zone->nr_active = 0;
-               zone->nr_inactive = 0;
                zap_zone_vm_stats(zone);
                atomic_set(&zone->reclaim_in_progress, 0);
                if (!size)
                        continue;
 
-               zonetable_add(zone, nid, j, zone_start_pfn, size);
-               ret = init_currently_empty_zone(zone, zone_start_pfn, size);
+               ret = init_currently_empty_zone(zone, zone_start_pfn,
+                                               size, MEMMAP_EARLY);
                BUG_ON(ret);
                zone_start_pfn += size;
        }
 }
 
-static void __init alloc_node_mem_map(struct pglist_data *pgdat)
+static void __init_refok alloc_node_mem_map(struct pglist_data *pgdat)
 {
        /* Skip empty nodes */
        if (!pgdat->node_spanned_pages)
@@ -2452,7 +2995,7 @@ static void __init alloc_node_mem_map(struct pglist_data *pgdat)
                        map = alloc_bootmem_node(pgdat, size);
                pgdat->node_mem_map = map + (pgdat->node_start_pfn - start);
        }
-#ifdef CONFIG_FLATMEM
+#ifndef CONFIG_NEED_MULTIPLE_NODES
        /*
         * With no DISCONTIG, the global mem_map is just set as node 0's
         */
@@ -2481,6 +3024,26 @@ void __meminit free_area_init_node(int nid, struct pglist_data *pgdat,
 }
 
 #ifdef CONFIG_ARCH_POPULATES_NODE_MAP
+
+#if MAX_NUMNODES > 1
+/*
+ * Figure out the number of possible node ids.
+ */
+static void __init setup_nr_node_ids(void)
+{
+       unsigned int node;
+       unsigned int highest = 0;
+
+       for_each_node_mask(node, node_possible_map)
+               highest = node;
+       nr_node_ids = highest + 1;
+}
+#else
+static inline void setup_nr_node_ids(void)
+{
+}
+#endif
+
 /**
  * add_active_range - Register a range of PFNs backed by physical memory
  * @nid: The node ID the range resides on
@@ -2605,17 +3168,23 @@ static void __init sort_node_map(void)
                        cmp_node_active_region, NULL);
 }
 
-/* Find the lowest pfn for a node. This depends on a sorted early_node_map */
+/* Find the lowest pfn for a node */
 unsigned long __init find_min_pfn_for_node(unsigned long nid)
 {
        int i;
+       unsigned long min_pfn = ULONG_MAX;
 
        /* Assuming a sorted map, the first range found has the starting pfn */
        for_each_active_range_index_in_nid(i, nid)
-               return early_node_map[i].start_pfn;
+               min_pfn = min(min_pfn, early_node_map[i].start_pfn);
 
-       printk(KERN_WARNING "Could not find start_pfn for node %lu\n", nid);
-       return 0;
+       if (min_pfn == ULONG_MAX) {
+               printk(KERN_WARNING
+                       "Could not find start_pfn for node %lu\n", nid);
+               return 0;
+       }
+
+       return min_pfn;
 }
 
 /**
@@ -2646,6 +3215,157 @@ unsigned long __init find_max_pfn_with_active_regions(void)
        return max_pfn;
 }
 
+unsigned long __init early_calculate_totalpages(void)
+{
+       int i;
+       unsigned long totalpages = 0;
+
+       for (i = 0; i < nr_nodemap_entries; i++)
+               totalpages += early_node_map[i].end_pfn -
+                                               early_node_map[i].start_pfn;
+
+       return totalpages;
+}
+
+/*
+ * Find the PFN the Movable zone begins in each node. Kernel memory
+ * is spread evenly between nodes as long as the nodes have enough
+ * memory. When they don't, some nodes will have more kernelcore than
+ * others
+ */
+void __init find_zone_movable_pfns_for_nodes(unsigned long *movable_pfn)
+{
+       int i, nid;
+       unsigned long usable_startpfn;
+       unsigned long kernelcore_node, kernelcore_remaining;
+       int usable_nodes = num_online_nodes();
+
+       /*
+        * If movablecore was specified, calculate what size of
+        * kernelcore that corresponds so that memory usable for
+        * any allocation type is evenly spread. If both kernelcore
+        * and movablecore are specified, then the value of kernelcore
+        * will be used for required_kernelcore if it's greater than
+        * what movablecore would have allowed.
+        */
+       if (required_movablecore) {
+               unsigned long totalpages = early_calculate_totalpages();
+               unsigned long corepages;
+
+               /*
+                * Round-up so that ZONE_MOVABLE is at least as large as what
+                * was requested by the user
+                */
+               required_movablecore =
+                       roundup(required_movablecore, MAX_ORDER_NR_PAGES);
+               corepages = totalpages - required_movablecore;
+
+               required_kernelcore = max(required_kernelcore, corepages);
+       }
+
+       /* If kernelcore was not specified, there is no ZONE_MOVABLE */
+       if (!required_kernelcore)
+               return;
+
+       /* usable_startpfn is the lowest possible pfn ZONE_MOVABLE can be at */
+       find_usable_zone_for_movable();
+       usable_startpfn = arch_zone_lowest_possible_pfn[movable_zone];
+
+restart:
+       /* Spread kernelcore memory as evenly as possible throughout nodes */
+       kernelcore_node = required_kernelcore / usable_nodes;
+       for_each_online_node(nid) {
+               /*
+                * Recalculate kernelcore_node if the division per node
+                * now exceeds what is necessary to satisfy the requested
+                * amount of memory for the kernel
+                */
+               if (required_kernelcore < kernelcore_node)
+                       kernelcore_node = required_kernelcore / usable_nodes;
+
+               /*
+                * As the map is walked, we track how much memory is usable
+                * by the kernel using kernelcore_remaining. When it is
+                * 0, the rest of the node is usable by ZONE_MOVABLE
+                */
+               kernelcore_remaining = kernelcore_node;
+
+               /* Go through each range of PFNs within this node */
+               for_each_active_range_index_in_nid(i, nid) {
+                       unsigned long start_pfn, end_pfn;
+                       unsigned long size_pages;
+
+                       start_pfn = max(early_node_map[i].start_pfn,
+                                               zone_movable_pfn[nid]);
+                       end_pfn = early_node_map[i].end_pfn;
+                       if (start_pfn >= end_pfn)
+                               continue;
+
+                       /* Account for what is only usable for kernelcore */
+                       if (start_pfn < usable_startpfn) {
+                               unsigned long kernel_pages;
+                               kernel_pages = min(end_pfn, usable_startpfn)
+                                                               - start_pfn;
+
+                               kernelcore_remaining -= min(kernel_pages,
+                                                       kernelcore_remaining);
+                               required_kernelcore -= min(kernel_pages,
+                                                       required_kernelcore);
+
+                               /* Continue if range is now fully accounted */
+                               if (end_pfn <= usable_startpfn) {
+
+                                       /*
+                                        * Push zone_movable_pfn to the end so
+                                        * that if we have to rebalance
+                                        * kernelcore across nodes, we will
+                                        * not double account here
+                                        */
+                                       zone_movable_pfn[nid] = end_pfn;
+                                       continue;
+                               }
+                               start_pfn = usable_startpfn;
+                       }
+
+                       /*
+                        * The usable PFN range for ZONE_MOVABLE is from
+                        * start_pfn->end_pfn. Calculate size_pages as the
+                        * number of pages used as kernelcore
+                        */
+                       size_pages = end_pfn - start_pfn;
+                       if (size_pages > kernelcore_remaining)
+                               size_pages = kernelcore_remaining;
+                       zone_movable_pfn[nid] = start_pfn + size_pages;
+
+                       /*
+                        * Some kernelcore has been met, update counts and
+                        * break if the kernelcore for this node has been
+                        * satisified
+                        */
+                       required_kernelcore -= min(required_kernelcore,
+                                                               size_pages);
+                       kernelcore_remaining -= size_pages;
+                       if (!kernelcore_remaining)
+                               break;
+               }
+       }
+
+       /*
+        * If there is still required_kernelcore, we do another pass with one
+        * less node in the count. This will push zone_movable_pfn[nid] further
+        * along on the nodes that still have memory until kernelcore is
+        * satisified
+        */
+       usable_nodes--;
+       if (usable_nodes && required_kernelcore > usable_nodes)
+               goto restart;
+
+       /* Align start of ZONE_MOVABLE on all nids to MAX_ORDER_NR_PAGES */
+       for (nid = 0; nid < MAX_NUMNODES; nid++)
+               zone_movable_pfn[nid] =
+                       roundup(zone_movable_pfn[nid], MAX_ORDER_NR_PAGES);
+}
+
 /**
  * free_area_init_nodes - Initialise all pg_data_t and zone data
  * @max_zone_pfn: an array of max PFNs for each zone
@@ -2664,6 +3384,9 @@ void __init free_area_init_nodes(unsigned long *max_zone_pfn)
        unsigned long nid;
        enum zone_type i;
 
+       /* Sort early_node_map as initialisation assumes it is sorted */
+       sort_node_map();
+
        /* Record where the zone boundaries are */
        memset(arch_zone_lowest_possible_pfn, 0,
                                sizeof(arch_zone_lowest_possible_pfn));
@@ -2672,22 +3395,37 @@ void __init free_area_init_nodes(unsigned long *max_zone_pfn)
        arch_zone_lowest_possible_pfn[0] = find_min_pfn_with_active_regions();
        arch_zone_highest_possible_pfn[0] = max_zone_pfn[0];
        for (i = 1; i < MAX_NR_ZONES; i++) {
+               if (i == ZONE_MOVABLE)
+                       continue;
                arch_zone_lowest_possible_pfn[i] =
                        arch_zone_highest_possible_pfn[i-1];
                arch_zone_highest_possible_pfn[i] =
                        max(max_zone_pfn[i], arch_zone_lowest_possible_pfn[i]);
        }
+       arch_zone_lowest_possible_pfn[ZONE_MOVABLE] = 0;
+       arch_zone_highest_possible_pfn[ZONE_MOVABLE] = 0;
 
-       /* Regions in the early_node_map can be in any order */
-       sort_node_map();
+       /* Find the PFNs that ZONE_MOVABLE begins at in each node */
+       memset(zone_movable_pfn, 0, sizeof(zone_movable_pfn));
+       find_zone_movable_pfns_for_nodes(zone_movable_pfn);
 
        /* Print out the zone ranges */
        printk("Zone PFN ranges:\n");
-       for (i = 0; i < MAX_NR_ZONES; i++)
+       for (i = 0; i < MAX_NR_ZONES; i++) {
+               if (i == ZONE_MOVABLE)
+                       continue;
                printk("  %-8s %8lu -> %8lu\n",
                                zone_names[i],
                                arch_zone_lowest_possible_pfn[i],
                                arch_zone_highest_possible_pfn[i]);
+       }
+
+       /* Print out the PFNs ZONE_MOVABLE begins at in each node */
+       printk("Movable zone start PFN for each node\n");
+       for (i = 0; i < MAX_NUMNODES; i++) {
+               if (zone_movable_pfn[i])
+                       printk("  Node %d: %lu\n", i, zone_movable_pfn[i]);
+       }
 
        /* Print out the early_node_map[] */
        printk("early_node_map[%d] active PFN ranges\n", nr_nodemap_entries);
@@ -2697,12 +3435,50 @@ void __init free_area_init_nodes(unsigned long *max_zone_pfn)
                                                early_node_map[i].end_pfn);
 
        /* Initialise every node */
+       setup_nr_node_ids();
        for_each_online_node(nid) {
                pg_data_t *pgdat = NODE_DATA(nid);
                free_area_init_node(nid, pgdat, NULL,
                                find_min_pfn_for_node(nid), NULL);
        }
 }
+
+static int __init cmdline_parse_core(char *p, unsigned long *core)
+{
+       unsigned long long coremem;
+       if (!p)
+               return -EINVAL;
+
+       coremem = memparse(p, &p);
+       *core = coremem >> PAGE_SHIFT;
+
+       /* Paranoid check that UL is enough for the coremem value */
+       WARN_ON((coremem >> PAGE_SHIFT) > ULONG_MAX);
+
+       return 0;
+}
+
+/*
+ * kernelcore=size sets the amount of memory for use for allocations that
+ * cannot be reclaimed or migrated.
+ */
+static int __init cmdline_parse_kernelcore(char *p)
+{
+       return cmdline_parse_core(p, &required_kernelcore);
+}
+
+/*
+ * movablecore=size sets the amount of memory for use for allocations that
+ * can be reclaimed or migrated.
+ */
+static int __init cmdline_parse_movablecore(char *p)
+{
+       return cmdline_parse_core(p, &required_movablecore);
+}
+
+early_param("kernelcore", cmdline_parse_kernelcore);
+early_param("movablecore", cmdline_parse_movablecore);
+
 #endif /* CONFIG_ARCH_POPULATES_NODE_MAP */
 
 /**
@@ -2734,13 +3510,12 @@ void __init free_area_init(unsigned long *zones_size)
                        __pa(PAGE_OFFSET) >> PAGE_SHIFT, NULL);
 }
 
-#ifdef CONFIG_HOTPLUG_CPU
 static int page_alloc_cpu_notify(struct notifier_block *self,
                                 unsigned long action, void *hcpu)
 {
        int cpu = (unsigned long)hcpu;
 
-       if (action == CPU_DEAD) {
+       if (action == CPU_DEAD || action == CPU_DEAD_FROZEN) {
                local_irq_disable();
                __drain_pages(cpu);
                vm_events_fold_cpu(cpu);
@@ -2749,7 +3524,6 @@ static int page_alloc_cpu_notify(struct notifier_block *self,
        }
        return NOTIFY_OK;
 }
-#endif /* CONFIG_HOTPLUG_CPU */
 
 void __init page_alloc_init(void)
 {
@@ -2937,7 +3711,8 @@ int min_free_kbytes_sysctl_handler(ctl_table *table, int write,
        struct file *file, void __user *buffer, size_t *length, loff_t *ppos)
 {
        proc_dointvec(table, write, file, buffer, length, ppos);
-       setup_per_zone_pages_min();
+       if (write)
+               setup_per_zone_pages_min();
        return 0;
 }
 
@@ -3053,7 +3828,7 @@ void *__init alloc_large_system_hash(const char *tablename,
        /* allow the kernel cmdline to have a say */
        if (!numentries) {
                /* round applicable memory size up to nearest megabyte */
-               numentries = (flags & HASH_HIGHMEM) ? nr_all_pages : nr_kernel_pages;
+               numentries = nr_kernel_pages;
                numentries += (1UL << (20 - PAGE_SHIFT)) - 1;
                numentries >>= 20 - PAGE_SHIFT;
                numentries <<= 20 - PAGE_SHIFT;
@@ -3063,6 +3838,10 @@ void *__init alloc_large_system_hash(const char *tablename,
                        numentries >>= (scale - PAGE_SHIFT);
                else
                        numentries <<= (PAGE_SHIFT - scale);
+
+               /* Make sure we've got at least a 0-order allocation.. */
+               if (unlikely((numentries * bucketsize) < PAGE_SIZE))
+                       numentries = PAGE_SIZE / bucketsize;
        }
        numentries = roundup_pow_of_two(numentries);
 
@@ -3075,7 +3854,7 @@ void *__init alloc_large_system_hash(const char *tablename,
        if (numentries > max)
                numentries = max;
 
-       log2qty = long_log2(numentries);
+       log2qty = ilog2(numentries);
 
        do {
                size = bucketsize << log2qty;
@@ -3088,16 +3867,31 @@ void *__init alloc_large_system_hash(const char *tablename,
                        for (order = 0; ((1UL << order) << PAGE_SHIFT) < size; order++)
                                ;
                        table = (void*) __get_free_pages(GFP_ATOMIC, order);
+                       /*
+                        * If bucketsize is not a power-of-two, we may free
+                        * some pages at the end of hash table.
+                        */
+                       if (table) {
+                               unsigned long alloc_end = (unsigned long)table +
+                                               (PAGE_SIZE << order);
+                               unsigned long used = (unsigned long)table +
+                                               PAGE_ALIGN(size);
+                               split_page(virt_to_page(table), order);
+                               while (used < alloc_end) {
+                                       free_page(used);
+                                       used += PAGE_SIZE;
+                               }
+                       }
                }
        } while (!table && size > PAGE_SIZE && --log2qty);
 
        if (!table)
                panic("Failed to allocate %s hash table\n", tablename);
 
-       printk("%s hash table entries: %d (order: %d, %lu bytes)\n",
+       printk(KERN_INFO "%s hash table entries: %d (order: %d, %lu bytes)\n",
               tablename,
               (1U << log2qty),
-              long_log2(size) - PAGE_SHIFT,
+              ilog2(size) - PAGE_SHIFT,
               size);
 
        if (_hash_shift)
@@ -3121,18 +3915,4 @@ EXPORT_SYMBOL(pfn_to_page);
 EXPORT_SYMBOL(page_to_pfn);
 #endif /* CONFIG_OUT_OF_LINE_PFN_TO_PAGE */
 
-#if MAX_NUMNODES > 1
-/*
- * Find the highest possible node id.
- */
-int highest_possible_node_id(void)
-{
-       unsigned int node;
-       unsigned int highest = 0;
 
-       for_each_node_mask(node, node_possible_map)
-               highest = node;
-       return highest;
-}
-EXPORT_SYMBOL(highest_possible_node_id);
-#endif