KVM: MMU: Remove user access when allowing kernel access to gpte.w=0 page
[safe/jmp/linux-2.6] / mm / page_alloc.c
index 59d2e88..431214b 100644 (file)
 #include <linux/page_cgroup.h>
 #include <linux/debugobjects.h>
 #include <linux/kmemleak.h>
+#include <linux/memory.h>
+#include <linux/compaction.h>
 #include <trace/events/kmem.h>
+#include <linux/ftrace_event.h>
 
 #include <asm/tlbflush.h>
 #include <asm/div64.h>
 #include "internal.h"
 
+#ifdef CONFIG_USE_PERCPU_NUMA_NODE_ID
+DEFINE_PER_CPU(int, numa_node);
+EXPORT_PER_CPU_SYMBOL(numa_node);
+#endif
+
+#ifdef CONFIG_HAVE_MEMORYLESS_NODES
+/*
+ * N.B., Do NOT reference the '_numa_mem_' per cpu variable directly.
+ * It will not be defined when CONFIG_HAVE_MEMORYLESS_NODES is not defined.
+ * Use the accessor functions set_numa_mem(), numa_mem_id() and cpu_to_mem()
+ * defined in <linux/topology.h>.
+ */
+DEFINE_PER_CPU(int, _numa_mem_);               /* Kernel "local memory" node */
+EXPORT_PER_CPU_SYMBOL(_numa_mem_);
+#endif
+
 /*
  * Array of node states.
  */
@@ -75,6 +94,31 @@ unsigned long totalreserve_pages __read_mostly;
 int percpu_pagelist_fraction;
 gfp_t gfp_allowed_mask __read_mostly = GFP_BOOT_MASK;
 
+#ifdef CONFIG_PM_SLEEP
+/*
+ * The following functions are used by the suspend/hibernate code to temporarily
+ * change gfp_allowed_mask in order to avoid using I/O during memory allocations
+ * while devices are suspended.  To avoid races with the suspend/hibernate code,
+ * they should always be called with pm_mutex held (gfp_allowed_mask also should
+ * only be modified with pm_mutex held, unless the suspend/hibernate code is
+ * guaranteed not to run in parallel with that modification).
+ */
+void set_gfp_allowed_mask(gfp_t mask)
+{
+       WARN_ON(!mutex_is_locked(&pm_mutex));
+       gfp_allowed_mask = mask;
+}
+
+gfp_t clear_gfp_allowed_mask(gfp_t mask)
+{
+       gfp_t ret = gfp_allowed_mask;
+
+       WARN_ON(!mutex_is_locked(&pm_mutex));
+       gfp_allowed_mask &= ~mask;
+       return ret;
+}
+#endif /* CONFIG_PM_SLEEP */
+
 #ifdef CONFIG_HUGETLB_PAGE_SIZE_VARIABLE
 int pageblock_order __read_mostly;
 #endif
@@ -262,10 +306,7 @@ static void bad_page(struct page *page)
 
        printk(KERN_ALERT "BUG: Bad page state in process %s  pfn:%05lx\n",
                current->comm, page_to_pfn(page));
-       printk(KERN_ALERT
-               "page:%p flags:%p count:%d mapcount:%d mapping:%p index:%lx\n",
-               page, (void *)page->flags, page_count(page),
-               page_mapcount(page), page->mapping, page->index);
+       dump_page(page);
 
        dump_stack();
 out:
@@ -451,6 +492,8 @@ static inline void __free_one_page(struct page *page,
                int migratetype)
 {
        unsigned long page_idx;
+       unsigned long combined_idx;
+       struct page *buddy;
 
        if (unlikely(PageCompound(page)))
                if (unlikely(destroy_compound_page(page, order)))
@@ -464,9 +507,6 @@ static inline void __free_one_page(struct page *page,
        VM_BUG_ON(bad_range(zone, page));
 
        while (order < MAX_ORDER-1) {
-               unsigned long combined_idx;
-               struct page *buddy;
-
                buddy = __page_find_buddy(page, page_idx, order);
                if (!page_is_buddy(page, buddy, order))
                        break;
@@ -481,8 +521,29 @@ static inline void __free_one_page(struct page *page,
                order++;
        }
        set_page_order(page, order);
-       list_add(&page->lru,
-               &zone->free_area[order].free_list[migratetype]);
+
+       /*
+        * If this is not the largest possible page, check if the buddy
+        * of the next-highest order is free. If it is, it's possible
+        * that pages are being freed that will coalesce soon. In case,
+        * that is happening, add the free page to the tail of the list
+        * so it's less likely to be used soon and more likely to be merged
+        * as a higher order page
+        */
+       if ((order < MAX_ORDER-1) && pfn_valid_within(page_to_pfn(buddy))) {
+               struct page *higher_page, *higher_buddy;
+               combined_idx = __find_combined_index(page_idx, order);
+               higher_page = page + combined_idx - page_idx;
+               higher_buddy = __page_find_buddy(higher_page, combined_idx, order + 1);
+               if (page_is_buddy(higher_page, higher_buddy, order + 1)) {
+                       list_add_tail(&page->lru,
+                               &zone->free_area[order].free_list[migratetype]);
+                       goto out;
+               }
+       }
+
+       list_add(&page->lru, &zone->free_area[order].free_list[migratetype]);
+out:
        zone->free_area[order].nr_free++;
 }
 
@@ -529,7 +590,7 @@ static void free_pcppages_bulk(struct zone *zone, int count,
        int batch_free = 0;
 
        spin_lock(&zone->lock);
-       zone_clear_flag(zone, ZONE_ALL_UNRECLAIMABLE);
+       zone->all_unreclaimable = 0;
        zone->pages_scanned = 0;
 
        __mod_zone_page_state(zone, NR_FREE_PAGES, count);
@@ -555,8 +616,9 @@ static void free_pcppages_bulk(struct zone *zone, int count,
                        page = list_entry(list->prev, struct page, lru);
                        /* must delete as __free_one_page list manipulates */
                        list_del(&page->lru);
-                       __free_one_page(page, zone, 0, migratetype);
-                       trace_mm_page_pcpu_drain(page, 0, migratetype);
+                       /* MIGRATE_MOVABLE list may include MIGRATE_RESERVEs */
+                       __free_one_page(page, zone, 0, page_private(page));
+                       trace_mm_page_pcpu_drain(page, 0, page_private(page));
                } while (--count && --batch_free && !list_empty(list));
        }
        spin_unlock(&zone->lock);
@@ -566,7 +628,7 @@ static void free_one_page(struct zone *zone, struct page *page, int order,
                                int migratetype)
 {
        spin_lock(&zone->lock);
-       zone_clear_flag(zone, ZONE_ALL_UNRECLAIMABLE);
+       zone->all_unreclaimable = 0;
        zone->pages_scanned = 0;
 
        __mod_zone_page_state(zone, NR_FREE_PAGES, 1 << order);
@@ -574,19 +636,23 @@ static void free_one_page(struct zone *zone, struct page *page, int order,
        spin_unlock(&zone->lock);
 }
 
-static void __free_pages_ok(struct page *page, unsigned int order)
+static bool free_pages_prepare(struct page *page, unsigned int order)
 {
-       unsigned long flags;
        int i;
        int bad = 0;
-       int wasMlocked = __TestClearPageMlocked(page);
 
+       trace_mm_page_free_direct(page, order);
        kmemcheck_free_shadow(page, order);
 
-       for (i = 0 ; i < (1 << order) ; ++i)
-               bad += free_pages_check(page + i);
+       for (i = 0; i < (1 << order); i++) {
+               struct page *pg = page + i;
+
+               if (PageAnon(pg))
+                       pg->mapping = NULL;
+               bad += free_pages_check(pg);
+       }
        if (bad)
-               return;
+               return false;
 
        if (!PageHighMem(page)) {
                debug_check_no_locks_freed(page_address(page),PAGE_SIZE<<order);
@@ -596,6 +662,17 @@ static void __free_pages_ok(struct page *page, unsigned int order)
        arch_free_page(page, order);
        kernel_map_pages(page, 1 << order, 0);
 
+       return true;
+}
+
+static void __free_pages_ok(struct page *page, unsigned int order)
+{
+       unsigned long flags;
+       int wasMlocked = __TestClearPageMlocked(page);
+
+       if (!free_pages_prepare(page, order))
+               return;
+
        local_irq_save(flags);
        if (unlikely(wasMlocked))
                free_page_mlock(page);
@@ -1007,10 +1084,10 @@ static void drain_pages(unsigned int cpu)
                struct per_cpu_pageset *pset;
                struct per_cpu_pages *pcp;
 
-               pset = zone_pcp(zone, cpu);
+               local_irq_save(flags);
+               pset = per_cpu_ptr(zone->pageset, cpu);
 
                pcp = &pset->pcp;
-               local_irq_save(flags);
                free_pcppages_bulk(zone, pcp->count, pcp);
                pcp->count = 0;
                local_irq_restore(flags);
@@ -1071,8 +1148,9 @@ void mark_free_pages(struct zone *zone)
 
 /*
  * Free a 0-order page
+ * cold == 1 ? free a cold page : free a hot page
  */
-static void free_hot_cold_page(struct page *page, int cold)
+void free_hot_cold_page(struct page *page, int cold)
 {
        struct zone *zone = page_zone(page);
        struct per_cpu_pages *pcp;
@@ -1080,21 +1158,9 @@ static void free_hot_cold_page(struct page *page, int cold)
        int migratetype;
        int wasMlocked = __TestClearPageMlocked(page);
 
-       kmemcheck_free_shadow(page, 0);
-
-       if (PageAnon(page))
-               page->mapping = NULL;
-       if (free_pages_check(page))
+       if (!free_pages_prepare(page, 0))
                return;
 
-       if (!PageHighMem(page)) {
-               debug_check_no_locks_freed(page_address(page), PAGE_SIZE);
-               debug_check_no_obj_freed(page_address(page), PAGE_SIZE);
-       }
-       arch_free_page(page, 0);
-       kernel_map_pages(page, 1, 0);
-
-       pcp = &zone_pcp(zone, get_cpu())->pcp;
        migratetype = get_pageblock_migratetype(page);
        set_page_private(page, migratetype);
        local_irq_save(flags);
@@ -1117,6 +1183,7 @@ static void free_hot_cold_page(struct page *page, int cold)
                migratetype = MIGRATE_MOVABLE;
        }
 
+       pcp = &this_cpu_ptr(zone->pageset)->pcp;
        if (cold)
                list_add_tail(&page->lru, &pcp->lists[migratetype]);
        else
@@ -1129,15 +1196,8 @@ static void free_hot_cold_page(struct page *page, int cold)
 
 out:
        local_irq_restore(flags);
-       put_cpu();
 }
 
-void free_hot_page(struct page *page)
-{
-       trace_mm_page_free_direct(page, 0);
-       free_hot_cold_page(page, 0);
-}
-       
 /*
  * split_page takes a non-compound higher-order page, and splits it into
  * n (1<<order) sub-pages: page[0..n]
@@ -1167,6 +1227,51 @@ void split_page(struct page *page, unsigned int order)
 }
 
 /*
+ * Similar to split_page except the page is already free. As this is only
+ * being used for migration, the migratetype of the block also changes.
+ * As this is called with interrupts disabled, the caller is responsible
+ * for calling arch_alloc_page() and kernel_map_page() after interrupts
+ * are enabled.
+ *
+ * Note: this is probably too low level an operation for use in drivers.
+ * Please consult with lkml before using this in your driver.
+ */
+int split_free_page(struct page *page)
+{
+       unsigned int order;
+       unsigned long watermark;
+       struct zone *zone;
+
+       BUG_ON(!PageBuddy(page));
+
+       zone = page_zone(page);
+       order = page_order(page);
+
+       /* Obey watermarks as if the page was being allocated */
+       watermark = low_wmark_pages(zone) + (1 << order);
+       if (!zone_watermark_ok(zone, 0, watermark, 0, 0))
+               return 0;
+
+       /* Remove page from free list */
+       list_del(&page->lru);
+       zone->free_area[order].nr_free--;
+       rmv_page_order(page);
+       __mod_zone_page_state(zone, NR_FREE_PAGES, -(1UL << order));
+
+       /* Split into individual pages */
+       set_page_refcounted(page);
+       split_page(page, order);
+
+       if (order >= pageblock_order - 1) {
+               struct page *endpage = page + (1 << order) - 1;
+               for (; page < endpage; page += pageblock_nr_pages)
+                       set_pageblock_migratetype(page, MIGRATE_MOVABLE);
+       }
+
+       return 1 << order;
+}
+
+/*
  * Really, prep_compound_page() should be called from __rmqueue_bulk().  But
  * we cheat by calling it from here, in the order > 0 path.  Saves a branch
  * or two.
@@ -1179,17 +1284,15 @@ struct page *buffered_rmqueue(struct zone *preferred_zone,
        unsigned long flags;
        struct page *page;
        int cold = !!(gfp_flags & __GFP_COLD);
-       int cpu;
 
 again:
-       cpu  = get_cpu();
        if (likely(order == 0)) {
                struct per_cpu_pages *pcp;
                struct list_head *list;
 
-               pcp = &zone_pcp(zone, cpu)->pcp;
-               list = &pcp->lists[migratetype];
                local_irq_save(flags);
+               pcp = &this_cpu_ptr(zone->pageset)->pcp;
+               list = &pcp->lists[migratetype];
                if (list_empty(list)) {
                        pcp->count += rmqueue_bulk(zone, 0,
                                        pcp->batch, list,
@@ -1221,16 +1324,15 @@ again:
                }
                spin_lock_irqsave(&zone->lock, flags);
                page = __rmqueue(zone, order, migratetype);
-               __mod_zone_page_state(zone, NR_FREE_PAGES, -(1 << order));
                spin_unlock(&zone->lock);
                if (!page)
                        goto failed;
+               __mod_zone_page_state(zone, NR_FREE_PAGES, -(1 << order));
        }
 
        __count_zone_vm_events(PGALLOC, zone, 1 << order);
        zone_statistics(preferred_zone, zone);
        local_irq_restore(flags);
-       put_cpu();
 
        VM_BUG_ON(bad_range(zone, page));
        if (prep_new_page(page, order, gfp_flags))
@@ -1239,7 +1341,6 @@ again:
 
 failed:
        local_irq_restore(flags);
-       put_cpu();
        return NULL;
 }
 
@@ -1654,18 +1755,84 @@ __alloc_pages_may_oom(gfp_t gfp_mask, unsigned int order,
        if (page)
                goto out;
 
-       /* The OOM killer will not help higher order allocs */
-       if (order > PAGE_ALLOC_COSTLY_ORDER && !(gfp_mask & __GFP_NOFAIL))
-               goto out;
-
+       if (!(gfp_mask & __GFP_NOFAIL)) {
+               /* The OOM killer will not help higher order allocs */
+               if (order > PAGE_ALLOC_COSTLY_ORDER)
+                       goto out;
+               /*
+                * GFP_THISNODE contains __GFP_NORETRY and we never hit this.
+                * Sanity check for bare calls of __GFP_THISNODE, not real OOM.
+                * The caller should handle page allocation failure by itself if
+                * it specifies __GFP_THISNODE.
+                * Note: Hugepage uses it but will hit PAGE_ALLOC_COSTLY_ORDER.
+                */
+               if (gfp_mask & __GFP_THISNODE)
+                       goto out;
+       }
        /* Exhausted what can be done so it's blamo time */
-       out_of_memory(zonelist, gfp_mask, order);
+       out_of_memory(zonelist, gfp_mask, order, nodemask);
 
 out:
        clear_zonelist_oom(zonelist, gfp_mask);
        return page;
 }
 
+#ifdef CONFIG_COMPACTION
+/* Try memory compaction for high-order allocations before reclaim */
+static struct page *
+__alloc_pages_direct_compact(gfp_t gfp_mask, unsigned int order,
+       struct zonelist *zonelist, enum zone_type high_zoneidx,
+       nodemask_t *nodemask, int alloc_flags, struct zone *preferred_zone,
+       int migratetype, unsigned long *did_some_progress)
+{
+       struct page *page;
+
+       if (!order || compaction_deferred(preferred_zone))
+               return NULL;
+
+       *did_some_progress = try_to_compact_pages(zonelist, order, gfp_mask,
+                                                               nodemask);
+       if (*did_some_progress != COMPACT_SKIPPED) {
+
+               /* Page migration frees to the PCP lists but we want merging */
+               drain_pages(get_cpu());
+               put_cpu();
+
+               page = get_page_from_freelist(gfp_mask, nodemask,
+                               order, zonelist, high_zoneidx,
+                               alloc_flags, preferred_zone,
+                               migratetype);
+               if (page) {
+                       preferred_zone->compact_considered = 0;
+                       preferred_zone->compact_defer_shift = 0;
+                       count_vm_event(COMPACTSUCCESS);
+                       return page;
+               }
+
+               /*
+                * It's bad if compaction run occurs and fails.
+                * The most likely reason is that pages exist,
+                * but not enough to satisfy watermarks.
+                */
+               count_vm_event(COMPACTFAIL);
+               defer_compaction(preferred_zone);
+
+               cond_resched();
+       }
+
+       return NULL;
+}
+#else
+static inline struct page *
+__alloc_pages_direct_compact(gfp_t gfp_mask, unsigned int order,
+       struct zonelist *zonelist, enum zone_type high_zoneidx,
+       nodemask_t *nodemask, int alloc_flags, struct zone *preferred_zone,
+       int migratetype, unsigned long *did_some_progress)
+{
+       return NULL;
+}
+#endif /* CONFIG_COMPACTION */
+
 /* The really slow allocator path where we enter direct reclaim */
 static inline struct page *
 __alloc_pages_direct_reclaim(gfp_t gfp_mask, unsigned int order,
@@ -1852,6 +2019,15 @@ rebalance:
        if (test_thread_flag(TIF_MEMDIE) && !(gfp_mask & __GFP_NOFAIL))
                goto nopage;
 
+       /* Try direct compaction */
+       page = __alloc_pages_direct_compact(gfp_mask, order,
+                                       zonelist, high_zoneidx,
+                                       nodemask,
+                                       alloc_flags, preferred_zone,
+                                       migratetype, &did_some_progress);
+       if (page)
+               goto got_pg;
+
        /* Try direct reclaim and then allocating */
        page = __alloc_pages_direct_reclaim(gfp_mask, order,
                                        zonelist, high_zoneidx,
@@ -1943,10 +2119,13 @@ __alloc_pages_nodemask(gfp_t gfp_mask, unsigned int order,
        if (unlikely(!zonelist->_zonerefs->zone))
                return NULL;
 
+       get_mems_allowed();
        /* The preferred zone is used for statistics later */
        first_zones_zonelist(zonelist, high_zoneidx, nodemask, &preferred_zone);
-       if (!preferred_zone)
+       if (!preferred_zone) {
+               put_mems_allowed();
                return NULL;
+       }
 
        /* First allocation attempt */
        page = get_page_from_freelist(gfp_mask|__GFP_HARDWALL, nodemask, order,
@@ -1956,6 +2135,7 @@ __alloc_pages_nodemask(gfp_t gfp_mask, unsigned int order,
                page = __alloc_pages_slowpath(gfp_mask, order,
                                zonelist, high_zoneidx, nodemask,
                                preferred_zone, migratetype);
+       put_mems_allowed();
 
        trace_mm_page_alloc(page, order, gfp_mask, migratetype);
        return page;
@@ -2001,9 +2181,8 @@ void __pagevec_free(struct pagevec *pvec)
 void __free_pages(struct page *page, unsigned int order)
 {
        if (put_page_testzero(page)) {
-               trace_mm_page_free_direct(page, order);
                if (order == 0)
-                       free_hot_page(page);
+                       free_hot_cold_page(page, 0);
                else
                        __free_pages_ok(page, order);
        }
@@ -2168,7 +2347,7 @@ void show_free_areas(void)
                for_each_online_cpu(cpu) {
                        struct per_cpu_pageset *pageset;
 
-                       pageset = zone_pcp(zone, cpu);
+                       pageset = per_cpu_ptr(zone->pageset, cpu);
 
                        printk("CPU %4d: hi:%5d, btch:%4d usd:%4d\n",
                               cpu, pageset->pcp.high,
@@ -2259,7 +2438,7 @@ void show_free_areas(void)
                        K(zone_page_state(zone, NR_BOUNCE)),
                        K(zone_page_state(zone, NR_WRITEBACK_TEMP)),
                        zone->pages_scanned,
-                       (zone_is_all_unreclaimable(zone) ? "yes" : "no")
+                       (zone->all_unreclaimable ? "yes" : "no")
                        );
                printk("lowmem_reserve[]:");
                for (i = 0; i < MAX_NR_ZONES; i++)
@@ -2391,13 +2570,14 @@ int numa_zonelist_order_handler(ctl_table *table, int write,
 {
        char saved_string[NUMA_ZONELIST_ORDER_LEN];
        int ret;
+       static DEFINE_MUTEX(zl_order_mutex);
 
+       mutex_lock(&zl_order_mutex);
        if (write)
-               strncpy(saved_string, (char*)table->data,
-                       NUMA_ZONELIST_ORDER_LEN);
+               strcpy(saved_string, (char*)table->data);
        ret = proc_dostring(table, write, buffer, length, ppos);
        if (ret)
-               return ret;
+               goto out;
        if (write) {
                int oldval = user_zonelist_order;
                if (__parse_numa_zonelist_order((char*)table->data)) {
@@ -2407,10 +2587,15 @@ int numa_zonelist_order_handler(ctl_table *table, int write,
                        strncpy((char*)table->data, saved_string,
                                NUMA_ZONELIST_ORDER_LEN);
                        user_zonelist_order = oldval;
-               } else if (oldval != user_zonelist_order)
-                       build_all_zonelists();
+               } else if (oldval != user_zonelist_order) {
+                       mutex_lock(&zonelists_mutex);
+                       build_all_zonelists(NULL);
+                       mutex_unlock(&zonelists_mutex);
+               }
        }
-       return 0;
+out:
+       mutex_unlock(&zl_order_mutex);
+       return ret;
 }
 
 
@@ -2550,10 +2735,10 @@ static int default_zonelist_order(void)
        struct zone *z;
        int average_size;
        /*
-         * ZONE_DMA and ZONE_DMA32 can be very small area in the sytem.
+         * ZONE_DMA and ZONE_DMA32 can be very small area in the system.
         * If they are really small and used heavily, the system can fall
         * into OOM very easily.
-        * This function detect ZONE_DMA/DMA32 size and confgigures zone order.
+        * This function detect ZONE_DMA/DMA32 size and configures zone order.
         */
        /* Is there ZONE_NORMAL ? (ex. ppc has only DMA zone..) */
        low_kmem_size = 0;
@@ -2565,6 +2750,15 @@ static int default_zonelist_order(void)
                                if (zone_type < ZONE_NORMAL)
                                        low_kmem_size += z->present_pages;
                                total_size += z->present_pages;
+                       } else if (zone_type == ZONE_NORMAL) {
+                               /*
+                                * If any node has only lowmem, then node order
+                                * is preferred to allow kernel allocations
+                                * locally; otherwise, they can easily infringe
+                                * on other nodes when there is an abundance of
+                                * lowmem available to allocate from.
+                                */
+                               return ZONELIST_ORDER_NODE;
                        }
                }
        }
@@ -2678,6 +2872,24 @@ static void build_zonelist_cache(pg_data_t *pgdat)
                zlc->z_to_n[z - zonelist->_zonerefs] = zonelist_node_idx(z);
 }
 
+#ifdef CONFIG_HAVE_MEMORYLESS_NODES
+/*
+ * Return node id of node used for "local" allocations.
+ * I.e., first node id of first zone in arg node's generic zonelist.
+ * Used for initializing percpu 'numa_mem', which is used primarily
+ * for kernel allocations, so use GFP_KERNEL flags to locate zonelist.
+ */
+int local_memory_node(int node)
+{
+       struct zone *zone;
+
+       (void)first_zones_zonelist(node_zonelist(node, GFP_KERNEL),
+                                  gfp_zone(GFP_KERNEL),
+                                  NULL,
+                                  &zone);
+       return zone->node;
+}
+#endif
 
 #else  /* CONFIG_NUMA */
 
@@ -2730,10 +2942,36 @@ static void build_zonelist_cache(pg_data_t *pgdat)
 
 #endif /* CONFIG_NUMA */
 
+/*
+ * Boot pageset table. One per cpu which is going to be used for all
+ * zones and all nodes. The parameters will be set in such a way
+ * that an item put on a list will immediately be handed over to
+ * the buddy list. This is safe since pageset manipulation is done
+ * with interrupts disabled.
+ *
+ * The boot_pagesets must be kept even after bootup is complete for
+ * unused processors and/or zones. They do play a role for bootstrapping
+ * hotplugged processors.
+ *
+ * zoneinfo_show() and maybe other functions do
+ * not check if the processor is online before following the pageset pointer.
+ * Other parts of the kernel may not check if the zone is available.
+ */
+static void setup_pageset(struct per_cpu_pageset *p, unsigned long batch);
+static DEFINE_PER_CPU(struct per_cpu_pageset, boot_pageset);
+static void setup_zone_pageset(struct zone *zone);
+
+/*
+ * Global mutex to protect against size modification of zonelists
+ * as well as to serialize pageset setup for the new populated zone.
+ */
+DEFINE_MUTEX(zonelists_mutex);
+
 /* return values int ....just for stop_machine() */
-static int __build_all_zonelists(void *dummy)
+static __init_refok int __build_all_zonelists(void *data)
 {
        int nid;
+       int cpu;
 
 #ifdef CONFIG_NUMA
        memset(node_load, 0, sizeof(node_load));
@@ -2744,10 +2982,53 @@ static int __build_all_zonelists(void *dummy)
                build_zonelists(pgdat);
                build_zonelist_cache(pgdat);
        }
+
+#ifdef CONFIG_MEMORY_HOTPLUG
+       /* Setup real pagesets for the new zone */
+       if (data) {
+               struct zone *zone = data;
+               setup_zone_pageset(zone);
+       }
+#endif
+
+       /*
+        * Initialize the boot_pagesets that are going to be used
+        * for bootstrapping processors. The real pagesets for
+        * each zone will be allocated later when the per cpu
+        * allocator is available.
+        *
+        * boot_pagesets are used also for bootstrapping offline
+        * cpus if the system is already booted because the pagesets
+        * are needed to initialize allocators on a specific cpu too.
+        * F.e. the percpu allocator needs the page allocator which
+        * needs the percpu allocator in order to allocate its pagesets
+        * (a chicken-egg dilemma).
+        */
+       for_each_possible_cpu(cpu) {
+               setup_pageset(&per_cpu(boot_pageset, cpu), 0);
+
+#ifdef CONFIG_HAVE_MEMORYLESS_NODES
+               /*
+                * We now know the "local memory node" for each node--
+                * i.e., the node of the first zone in the generic zonelist.
+                * Set up numa_mem percpu variable for on-line cpus.  During
+                * boot, only the boot cpu should be on-line;  we'll init the
+                * secondary cpus' numa_mem as they come on-line.  During
+                * node/memory hotplug, we'll fixup all on-line cpus.
+                */
+               if (cpu_online(cpu))
+                       set_cpu_numa_mem(cpu, local_memory_node(cpu_to_node(cpu)));
+#endif
+       }
+
        return 0;
 }
 
-void build_all_zonelists(void)
+/*
+ * Called with zonelists_mutex held always
+ * unless system_state == SYSTEM_BOOTING.
+ */
+void build_all_zonelists(void *data)
 {
        set_zonelist_order();
 
@@ -2758,7 +3039,7 @@ void build_all_zonelists(void)
        } else {
                /* we have to stop all cpus to guarantee there is no user
                   of zonelist */
-               stop_machine(__build_all_zonelists, NULL, NULL);
+               stop_machine(__build_all_zonelists, data, NULL);
                /* cpuset refresh routine should be here */
        }
        vm_total_pages = nr_free_pagecache_pages();
@@ -3081,121 +3362,36 @@ static void setup_pagelist_highmark(struct per_cpu_pageset *p,
                pcp->batch = PAGE_SHIFT * 8;
 }
 
-
-#ifdef CONFIG_NUMA
-/*
- * Boot pageset table. One per cpu which is going to be used for all
- * zones and all nodes. The parameters will be set in such a way
- * that an item put on a list will immediately be handed over to
- * the buddy list. This is safe since pageset manipulation is done
- * with interrupts disabled.
- *
- * Some NUMA counter updates may also be caught by the boot pagesets.
- *
- * The boot_pagesets must be kept even after bootup is complete for
- * unused processors and/or zones. They do play a role for bootstrapping
- * hotplugged processors.
- *
- * zoneinfo_show() and maybe other functions do
- * not check if the processor is online before following the pageset pointer.
- * Other parts of the kernel may not check if the zone is available.
- */
-static struct per_cpu_pageset boot_pageset[NR_CPUS];
-
-/*
- * Dynamically allocate memory for the
- * per cpu pageset array in struct zone.
- */
-static int __cpuinit process_zones(int cpu)
+static __meminit void setup_zone_pageset(struct zone *zone)
 {
-       struct zone *zone, *dzone;
-       int node = cpu_to_node(cpu);
+       int cpu;
 
-       node_set_state(node, N_CPU);    /* this node has a cpu */
+       zone->pageset = alloc_percpu(struct per_cpu_pageset);
 
-       for_each_populated_zone(zone) {
-               zone_pcp(zone, cpu) = kmalloc_node(sizeof(struct per_cpu_pageset),
-                                        GFP_KERNEL, node);
-               if (!zone_pcp(zone, cpu))
-                       goto bad;
+       for_each_possible_cpu(cpu) {
+               struct per_cpu_pageset *pcp = per_cpu_ptr(zone->pageset, cpu);
 
-               setup_pageset(zone_pcp(zone, cpu), zone_batchsize(zone));
+               setup_pageset(pcp, zone_batchsize(zone));
 
                if (percpu_pagelist_fraction)
-                       setup_pagelist_highmark(zone_pcp(zone, cpu),
-                               (zone->present_pages / percpu_pagelist_fraction));
+                       setup_pagelist_highmark(pcp,
+                               (zone->present_pages /
+                                       percpu_pagelist_fraction));
        }
-
-       return 0;
-bad:
-       for_each_zone(dzone) {
-               if (!populated_zone(dzone))
-                       continue;
-               if (dzone == zone)
-                       break;
-               kfree(zone_pcp(dzone, cpu));
-               zone_pcp(dzone, cpu) = &boot_pageset[cpu];
-       }
-       return -ENOMEM;
 }
 
-static inline void free_zone_pagesets(int cpu)
-{
-       struct zone *zone;
-
-       for_each_zone(zone) {
-               struct per_cpu_pageset *pset = zone_pcp(zone, cpu);
-
-               /* Free per_cpu_pageset if it is slab allocated */
-               if (pset != &boot_pageset[cpu])
-                       kfree(pset);
-               zone_pcp(zone, cpu) = &boot_pageset[cpu];
-       }
-}
-
-static int __cpuinit pageset_cpuup_callback(struct notifier_block *nfb,
-               unsigned long action,
-               void *hcpu)
-{
-       int cpu = (long)hcpu;
-       int ret = NOTIFY_OK;
-
-       switch (action) {
-       case CPU_UP_PREPARE:
-       case CPU_UP_PREPARE_FROZEN:
-               if (process_zones(cpu))
-                       ret = NOTIFY_BAD;
-               break;
-       case CPU_UP_CANCELED:
-       case CPU_UP_CANCELED_FROZEN:
-       case CPU_DEAD:
-       case CPU_DEAD_FROZEN:
-               free_zone_pagesets(cpu);
-               break;
-       default:
-               break;
-       }
-       return ret;
-}
-
-static struct notifier_block __cpuinitdata pageset_notifier =
-       { &pageset_cpuup_callback, NULL, 0 };
-
+/*
+ * Allocate per cpu pagesets and initialize them.
+ * Before this call only boot pagesets were available.
+ */
 void __init setup_per_cpu_pageset(void)
 {
-       int err;
+       struct zone *zone;
 
-       /* Initialize per_cpu_pageset for cpu 0.
-        * A cpuup callback will do this for every cpu
-        * as it comes online
-        */
-       err = process_zones(smp_processor_id());
-       BUG_ON(err);
-       register_cpu_notifier(&pageset_notifier);
+       for_each_populated_zone(zone)
+               setup_zone_pageset(zone);
 }
 
-#endif
-
 static noinline __init_refok
 int zone_wait_table_init(struct zone *zone, unsigned long zone_size_pages)
 {
@@ -3245,11 +3441,11 @@ static int __zone_pcp_update(void *data)
        int cpu;
        unsigned long batch = zone_batchsize(zone), flags;
 
-       for (cpu = 0; cpu < NR_CPUS; cpu++) {
+       for_each_possible_cpu(cpu) {
                struct per_cpu_pageset *pset;
                struct per_cpu_pages *pcp;
 
-               pset = zone_pcp(zone, cpu);
+               pset = per_cpu_ptr(zone->pageset, cpu);
                pcp = &pset->pcp;
 
                local_irq_save(flags);
@@ -3267,21 +3463,17 @@ void zone_pcp_update(struct zone *zone)
 
 static __meminit void zone_pcp_init(struct zone *zone)
 {
-       int cpu;
-       unsigned long batch = zone_batchsize(zone);
+       /*
+        * per cpu subsystem is not up at this point. The following code
+        * relies on the ability of the linker to provide the
+        * offset of a (static) per cpu variable into the per cpu area.
+        */
+       zone->pageset = &boot_pageset;
 
-       for (cpu = 0; cpu < NR_CPUS; cpu++) {
-#ifdef CONFIG_NUMA
-               /* Early boot. Slab allocator not functional yet */
-               zone_pcp(zone, cpu) = &boot_pageset[cpu];
-               setup_pageset(&boot_pageset[cpu],0);
-#else
-               setup_pageset(zone_pcp(zone,cpu), batch);
-#endif
-       }
        if (zone->present_pages)
-               printk(KERN_DEBUG "  %s zone: %lu pages, LIFO batch:%lu\n",
-                       zone->name, zone->present_pages, batch);
+               printk(KERN_DEBUG "  %s zone: %lu pages, LIFO batch:%u\n",
+                       zone->name, zone->present_pages,
+                                        zone_batchsize(zone));
 }
 
 __meminit int init_currently_empty_zone(struct zone *zone,
@@ -3420,6 +3612,61 @@ void __init free_bootmem_with_active_regions(int nid,
        }
 }
 
+int __init add_from_early_node_map(struct range *range, int az,
+                                  int nr_range, int nid)
+{
+       int i;
+       u64 start, end;
+
+       /* need to go over early_node_map to find out good range for node */
+       for_each_active_range_index_in_nid(i, nid) {
+               start = early_node_map[i].start_pfn;
+               end = early_node_map[i].end_pfn;
+               nr_range = add_range(range, az, nr_range, start, end);
+       }
+       return nr_range;
+}
+
+#ifdef CONFIG_NO_BOOTMEM
+void * __init __alloc_memory_core_early(int nid, u64 size, u64 align,
+                                       u64 goal, u64 limit)
+{
+       int i;
+       void *ptr;
+
+       /* need to go over early_node_map to find out good range for node */
+       for_each_active_range_index_in_nid(i, nid) {
+               u64 addr;
+               u64 ei_start, ei_last;
+
+               ei_last = early_node_map[i].end_pfn;
+               ei_last <<= PAGE_SHIFT;
+               ei_start = early_node_map[i].start_pfn;
+               ei_start <<= PAGE_SHIFT;
+               addr = find_early_area(ei_start, ei_last,
+                                        goal, limit, size, align);
+
+               if (addr == -1ULL)
+                       continue;
+
+#if 0
+               printk(KERN_DEBUG "alloc (nid=%d %llx - %llx) (%llx - %llx) %llx %llx => %llx\n",
+                               nid,
+                               ei_start, ei_last, goal, limit, size,
+                               align, addr);
+#endif
+
+               ptr = phys_to_virt(addr);
+               memset(ptr, 0, size);
+               reserve_early_without_check(addr, addr + size, "BOOTMEM");
+               return ptr;
+       }
+
+       return NULL;
+}
+#endif
+
+
 void __init work_with_active_regions(int nid, work_fn_t work_fn, void *data)
 {
        int i;
@@ -3569,7 +3816,7 @@ static unsigned long __meminit zone_spanned_pages_in_node(int nid,
  * Return the number of holes in a range on a node. If nid is MAX_NUMNODES,
  * then all holes in the requested range will be accounted for.
  */
-static unsigned long __meminit __absent_pages_in_range(int nid,
+unsigned long __meminit __absent_pages_in_range(int nid,
                                unsigned long range_start_pfn,
                                unsigned long range_end_pfn)
 {
@@ -3984,7 +4231,7 @@ void __init add_active_range(unsigned int nid, unsigned long start_pfn,
                }
 
                /* Merge backward if suitable */
-               if (start_pfn < early_node_map[i].end_pfn &&
+               if (start_pfn < early_node_map[i].start_pfn &&
                                end_pfn >= early_node_map[i].start_pfn) {
                        early_node_map[i].start_pfn = start_pfn;
                        return;
@@ -4098,7 +4345,7 @@ static int __init cmp_node_active_region(const void *a, const void *b)
 }
 
 /* sort the node_map by start_pfn */
-static void __init sort_node_map(void)
+void __init sort_node_map(void)
 {
        sort(early_node_map, (size_t)nr_nodemap_entries,
                        sizeof(struct node_active_region),
@@ -4362,8 +4609,12 @@ void __init free_area_init_nodes(unsigned long *max_zone_pfn)
        for (i = 0; i < MAX_NR_ZONES; i++) {
                if (i == ZONE_MOVABLE)
                        continue;
-               printk("  %-8s %0#10lx -> %0#10lx\n",
-                               zone_names[i],
+               printk("  %-8s ", zone_names[i]);
+               if (arch_zone_lowest_possible_pfn[i] ==
+                               arch_zone_highest_possible_pfn[i])
+                       printk("empty\n");
+               else
+                       printk("%0#10lx -> %0#10lx\n",
                                arch_zone_lowest_possible_pfn[i],
                                arch_zone_highest_possible_pfn[i]);
        }
@@ -4452,7 +4703,11 @@ void __init set_dma_reserve(unsigned long new_dma_reserve)
 }
 
 #ifndef CONFIG_NEED_MULTIPLE_NODES
-struct pglist_data __refdata contig_page_data = { .bdata = &bootmem_node_data[0] };
+struct pglist_data __refdata contig_page_data = {
+#ifndef CONFIG_NO_BOOTMEM
+ .bdata = &bootmem_node_data[0]
+#endif
+ };
 EXPORT_SYMBOL(contig_page_data);
 #endif
 
@@ -4795,10 +5050,11 @@ int percpu_pagelist_fraction_sysctl_handler(ctl_table *table, int write,
        if (!write || (ret == -EINVAL))
                return ret;
        for_each_populated_zone(zone) {
-               for_each_online_cpu(cpu) {
+               for_each_possible_cpu(cpu) {
                        unsigned long  high;
                        high = zone->present_pages / percpu_pagelist_fraction;
-                       setup_pagelist_highmark(zone_pcp(zone, cpu), high);
+                       setup_pagelist_highmark(
+                               per_cpu_ptr(zone->pageset, cpu), high);
                }
        }
        return 0;
@@ -4998,23 +5254,65 @@ void set_pageblock_flags_group(struct page *page, unsigned long flags,
 int set_migratetype_isolate(struct page *page)
 {
        struct zone *zone;
-       unsigned long flags;
+       struct page *curr_page;
+       unsigned long flags, pfn, iter;
+       unsigned long immobile = 0;
+       struct memory_isolate_notify arg;
+       int notifier_ret;
        int ret = -EBUSY;
        int zone_idx;
 
        zone = page_zone(page);
        zone_idx = zone_idx(zone);
+
        spin_lock_irqsave(&zone->lock, flags);
+       if (get_pageblock_migratetype(page) == MIGRATE_MOVABLE ||
+           zone_idx == ZONE_MOVABLE) {
+               ret = 0;
+               goto out;
+       }
+
+       pfn = page_to_pfn(page);
+       arg.start_pfn = pfn;
+       arg.nr_pages = pageblock_nr_pages;
+       arg.pages_found = 0;
+
        /*
-        * In future, more migrate types will be able to be isolation target.
+        * It may be possible to isolate a pageblock even if the
+        * migratetype is not MIGRATE_MOVABLE. The memory isolation
+        * notifier chain is used by balloon drivers to return the
+        * number of pages in a range that are held by the balloon
+        * driver to shrink memory. If all the pages are accounted for
+        * by balloons, are free, or on the LRU, isolation can continue.
+        * Later, for example, when memory hotplug notifier runs, these
+        * pages reported as "can be isolated" should be isolated(freed)
+        * by the balloon driver through the memory notifier chain.
         */
-       if (get_pageblock_migratetype(page) != MIGRATE_MOVABLE &&
-           zone_idx != ZONE_MOVABLE)
+       notifier_ret = memory_isolate_notify(MEM_ISOLATE_COUNT, &arg);
+       notifier_ret = notifier_to_errno(notifier_ret);
+       if (notifier_ret || !arg.pages_found)
                goto out;
-       set_pageblock_migratetype(page, MIGRATE_ISOLATE);
-       move_freepages_block(zone, page, MIGRATE_ISOLATE);
-       ret = 0;
+
+       for (iter = pfn; iter < (pfn + pageblock_nr_pages); iter++) {
+               if (!pfn_valid_within(pfn))
+                       continue;
+
+               curr_page = pfn_to_page(iter);
+               if (!page_count(curr_page) || PageLRU(curr_page))
+                       continue;
+
+               immobile++;
+       }
+
+       if (arg.pages_found == immobile)
+               ret = 0;
+
 out:
+       if (!ret) {
+               set_pageblock_migratetype(page, MIGRATE_ISOLATE);
+               move_freepages_block(zone, page, MIGRATE_ISOLATE);
+       }
+
        spin_unlock_irqrestore(&zone->lock, flags);
        if (!ret)
                drain_all_pages();
@@ -5081,3 +5379,101 @@ __offline_isolated_pages(unsigned long start_pfn, unsigned long end_pfn)
        spin_unlock_irqrestore(&zone->lock, flags);
 }
 #endif
+
+#ifdef CONFIG_MEMORY_FAILURE
+bool is_free_buddy_page(struct page *page)
+{
+       struct zone *zone = page_zone(page);
+       unsigned long pfn = page_to_pfn(page);
+       unsigned long flags;
+       int order;
+
+       spin_lock_irqsave(&zone->lock, flags);
+       for (order = 0; order < MAX_ORDER; order++) {
+               struct page *page_head = page - (pfn & ((1 << order) - 1));
+
+               if (PageBuddy(page_head) && page_order(page_head) >= order)
+                       break;
+       }
+       spin_unlock_irqrestore(&zone->lock, flags);
+
+       return order < MAX_ORDER;
+}
+#endif
+
+static struct trace_print_flags pageflag_names[] = {
+       {1UL << PG_locked,              "locked"        },
+       {1UL << PG_error,               "error"         },
+       {1UL << PG_referenced,          "referenced"    },
+       {1UL << PG_uptodate,            "uptodate"      },
+       {1UL << PG_dirty,               "dirty"         },
+       {1UL << PG_lru,                 "lru"           },
+       {1UL << PG_active,              "active"        },
+       {1UL << PG_slab,                "slab"          },
+       {1UL << PG_owner_priv_1,        "owner_priv_1"  },
+       {1UL << PG_arch_1,              "arch_1"        },
+       {1UL << PG_reserved,            "reserved"      },
+       {1UL << PG_private,             "private"       },
+       {1UL << PG_private_2,           "private_2"     },
+       {1UL << PG_writeback,           "writeback"     },
+#ifdef CONFIG_PAGEFLAGS_EXTENDED
+       {1UL << PG_head,                "head"          },
+       {1UL << PG_tail,                "tail"          },
+#else
+       {1UL << PG_compound,            "compound"      },
+#endif
+       {1UL << PG_swapcache,           "swapcache"     },
+       {1UL << PG_mappedtodisk,        "mappedtodisk"  },
+       {1UL << PG_reclaim,             "reclaim"       },
+       {1UL << PG_buddy,               "buddy"         },
+       {1UL << PG_swapbacked,          "swapbacked"    },
+       {1UL << PG_unevictable,         "unevictable"   },
+#ifdef CONFIG_MMU
+       {1UL << PG_mlocked,             "mlocked"       },
+#endif
+#ifdef CONFIG_ARCH_USES_PG_UNCACHED
+       {1UL << PG_uncached,            "uncached"      },
+#endif
+#ifdef CONFIG_MEMORY_FAILURE
+       {1UL << PG_hwpoison,            "hwpoison"      },
+#endif
+       {-1UL,                          NULL            },
+};
+
+static void dump_page_flags(unsigned long flags)
+{
+       const char *delim = "";
+       unsigned long mask;
+       int i;
+
+       printk(KERN_ALERT "page flags: %#lx(", flags);
+
+       /* remove zone id */
+       flags &= (1UL << NR_PAGEFLAGS) - 1;
+
+       for (i = 0; pageflag_names[i].name && flags; i++) {
+
+               mask = pageflag_names[i].mask;
+               if ((flags & mask) != mask)
+                       continue;
+
+               flags &= ~mask;
+               printk("%s%s", delim, pageflag_names[i].name);
+               delim = "|";
+       }
+
+       /* check for left over flags */
+       if (flags)
+               printk("%s%#lx", delim, flags);
+
+       printk(")\n");
+}
+
+void dump_page(struct page *page)
+{
+       printk(KERN_ALERT
+              "page:%p count:%d mapcount:%d mapping:%p index:%#lx\n",
+               page, page_count(page), page_mapcount(page),
+               page->mapping, page->index);
+       dump_page_flags(page->flags);
+}