NOMMU: Optimise away the {dac_,}mmap_min_addr tests
[safe/jmp/linux-2.6] / mm / page_alloc.c
index af28e2c..74af449 100644 (file)
@@ -23,6 +23,7 @@
 #include <linux/bootmem.h>
 #include <linux/compiler.h>
 #include <linux/kernel.h>
+#include <linux/kmemcheck.h>
 #include <linux/module.h>
 #include <linux/suspend.h>
 #include <linux/pagevec.h>
 #include <linux/backing-dev.h>
 #include <linux/fault-inject.h>
 #include <linux/page-isolation.h>
-#include <linux/memcontrol.h>
+#include <linux/page_cgroup.h>
+#include <linux/debugobjects.h>
+#include <linux/kmemleak.h>
+#include <trace/events/kmem.h>
 
 #include <asm/tlbflush.h>
 #include <asm/div64.h>
@@ -68,8 +72,8 @@ EXPORT_SYMBOL(node_states);
 
 unsigned long totalram_pages __read_mostly;
 unsigned long totalreserve_pages __read_mostly;
-long nr_swap_pages;
 int percpu_pagelist_fraction;
+gfp_t gfp_allowed_mask __read_mostly = GFP_BOOT_MASK;
 
 #ifdef CONFIG_HUGETLB_PAGE_SIZE_VARIABLE
 int pageblock_order __read_mostly;
@@ -119,8 +123,8 @@ static char * const zone_names[MAX_NR_ZONES] = {
 
 int min_free_kbytes = 1024;
 
-unsigned long __meminitdata nr_kernel_pages;
-unsigned long __meminitdata nr_all_pages;
+static unsigned long __meminitdata nr_kernel_pages;
+static unsigned long __meminitdata nr_all_pages;
 static unsigned long __meminitdata dma_reserve;
 
 #ifdef CONFIG_ARCH_POPULATES_NODE_MAP
@@ -148,13 +152,9 @@ static unsigned long __meminitdata dma_reserve;
   static int __meminitdata nr_nodemap_entries;
   static unsigned long __meminitdata arch_zone_lowest_possible_pfn[MAX_NR_ZONES];
   static unsigned long __meminitdata arch_zone_highest_possible_pfn[MAX_NR_ZONES];
-#ifdef CONFIG_MEMORY_HOTPLUG_RESERVE
-  static unsigned long __meminitdata node_boundary_start_pfn[MAX_NUMNODES];
-  static unsigned long __meminitdata node_boundary_end_pfn[MAX_NUMNODES];
-#endif /* CONFIG_MEMORY_HOTPLUG_RESERVE */
-  unsigned long __initdata required_kernelcore;
+  static unsigned long __initdata required_kernelcore;
   static unsigned long __initdata required_movablecore;
-  unsigned long __meminitdata zone_movable_pfn[MAX_NUMNODES];
+  static unsigned long __meminitdata zone_movable_pfn[MAX_NUMNODES];
 
   /* movable_zone is the "real" zone pages in ZONE_MOVABLE are taken from */
   int movable_zone;
@@ -163,17 +163,25 @@ static unsigned long __meminitdata dma_reserve;
 
 #if MAX_NUMNODES > 1
 int nr_node_ids __read_mostly = MAX_NUMNODES;
+int nr_online_nodes __read_mostly = 1;
 EXPORT_SYMBOL(nr_node_ids);
+EXPORT_SYMBOL(nr_online_nodes);
 #endif
 
 int page_group_by_mobility_disabled __read_mostly;
 
 static void set_pageblock_migratetype(struct page *page, int migratetype)
 {
+
+       if (unlikely(page_group_by_mobility_disabled))
+               migratetype = MIGRATE_UNMOVABLE;
+
        set_pageblock_flags_group(page, (unsigned long)migratetype,
                                        PB_migrate, PB_migrate_end);
 }
 
+bool oom_killer_disabled __read_mostly;
+
 #ifdef CONFIG_DEBUG_VM
 static int page_outside_zone_boundaries(struct zone *zone, struct page *page)
 {
@@ -222,33 +230,47 @@ static inline int bad_range(struct zone *zone, struct page *page)
 
 static void bad_page(struct page *page)
 {
-       void *pc = page_get_page_cgroup(page);
+       static unsigned long resume;
+       static unsigned long nr_shown;
+       static unsigned long nr_unshown;
+
+       /* Don't complain about poisoned pages */
+       if (PageHWPoison(page)) {
+               __ClearPageBuddy(page);
+               return;
+       }
 
-       printk(KERN_EMERG "Bad page state in process '%s'\n" KERN_EMERG
-               "page:%p flags:0x%0*lx mapping:%p mapcount:%d count:%d\n",
-               current->comm, page, (int)(2*sizeof(unsigned long)),
-               (unsigned long)page->flags, page->mapping,
-               page_mapcount(page), page_count(page));
-       if (pc) {
-               printk(KERN_EMERG "cgroup:%p\n", pc);
-               page_reset_bad_cgroup(page);
+       /*
+        * Allow a burst of 60 reports, then keep quiet for that minute;
+        * or allow a steady drip of one report per second.
+        */
+       if (nr_shown == 60) {
+               if (time_before(jiffies, resume)) {
+                       nr_unshown++;
+                       goto out;
+               }
+               if (nr_unshown) {
+                       printk(KERN_ALERT
+                             "BUG: Bad page state: %lu messages suppressed\n",
+                               nr_unshown);
+                       nr_unshown = 0;
+               }
+               nr_shown = 0;
        }
-       printk(KERN_EMERG "Trying to fix it up, but a reboot is needed\n"
-               KERN_EMERG "Backtrace:\n");
+       if (nr_shown++ == 0)
+               resume = jiffies + 60 * HZ;
+
+       printk(KERN_ALERT "BUG: Bad page state in process %s  pfn:%05lx\n",
+               current->comm, page_to_pfn(page));
+       printk(KERN_ALERT
+               "page:%p flags:%p count:%d mapcount:%d mapping:%p index:%lx\n",
+               page, (void *)page->flags, page_count(page),
+               page_mapcount(page), page->mapping, page->index);
+
        dump_stack();
-       page->flags &= ~(1 << PG_lru    |
-                       1 << PG_private |
-                       1 << PG_locked  |
-                       1 << PG_active  |
-                       1 << PG_dirty   |
-                       1 << PG_reclaim |
-                       1 << PG_slab    |
-                       1 << PG_swapcache |
-                       1 << PG_writeback |
-                       1 << PG_buddy );
-       set_page_count(page, 0);
-       reset_page_mapcount(page);
-       page->mapping = NULL;
+out:
+       /* Leave bad fields for debug, except PageBuddy could make trouble */
+       __ClearPageBuddy(page);
        add_taint(TAINT_BAD_PAGE);
 }
 
@@ -272,7 +294,7 @@ static void free_compound_page(struct page *page)
        __free_pages_ok(page, compound_order(page));
 }
 
-static void prep_compound_page(struct page *page, unsigned long order)
+void prep_compound_page(struct page *page, unsigned long order)
 {
        int i;
        int nr_pages = 1 << order;
@@ -288,25 +310,31 @@ static void prep_compound_page(struct page *page, unsigned long order)
        }
 }
 
-static void destroy_compound_page(struct page *page, unsigned long order)
+static int destroy_compound_page(struct page *page, unsigned long order)
 {
        int i;
        int nr_pages = 1 << order;
+       int bad = 0;
 
-       if (unlikely(compound_order(page) != order))
+       if (unlikely(compound_order(page) != order) ||
+           unlikely(!PageHead(page))) {
                bad_page(page);
+               bad++;
+       }
 
-       if (unlikely(!PageHead(page)))
-                       bad_page(page);
        __ClearPageHead(page);
+
        for (i = 1; i < nr_pages; i++) {
                struct page *p = page + i;
 
-               if (unlikely(!PageTail(p) |
-                               (p->first_page != page)))
+               if (unlikely(!PageTail(p) || (p->first_page != page))) {
                        bad_page(page);
+                       bad++;
+               }
                __ClearPageTail(p);
        }
+
+       return bad;
 }
 
 static inline void prep_zero_page(struct page *page, int order, gfp_t gfp_flags)
@@ -388,7 +416,7 @@ static inline int page_is_buddy(struct page *page, struct page *buddy,
                return 0;
 
        if (PageBuddy(buddy) && page_order(buddy) == order) {
-               BUG_ON(page_count(buddy) != 0);
+               VM_BUG_ON(page_count(buddy) != 0);
                return 1;
        }
        return 0;
@@ -419,29 +447,31 @@ static inline int page_is_buddy(struct page *page, struct page *buddy,
  */
 
 static inline void __free_one_page(struct page *page,
-               struct zone *zone, unsigned int order)
+               struct zone *zone, unsigned int order,
+               int migratetype)
 {
        unsigned long page_idx;
-       int order_size = 1 << order;
-       int migratetype = get_pageblock_migratetype(page);
 
        if (unlikely(PageCompound(page)))
-               destroy_compound_page(page, order);
+               if (unlikely(destroy_compound_page(page, order)))
+                       return;
+
+       VM_BUG_ON(migratetype == -1);
 
        page_idx = page_to_pfn(page) & ((1 << MAX_ORDER) - 1);
 
-       VM_BUG_ON(page_idx & (order_size - 1));
+       VM_BUG_ON(page_idx & ((1 << order) - 1));
        VM_BUG_ON(bad_range(zone, page));
 
-       __mod_zone_page_state(zone, NR_FREE_PAGES, order_size);
        while (order < MAX_ORDER-1) {
                unsigned long combined_idx;
                struct page *buddy;
 
                buddy = __page_find_buddy(page, page_idx, order);
                if (!page_is_buddy(page, buddy, order))
-                       break;          /* Move the buddy up one level. */
+                       break;
 
+               /* Our buddy is free, merge with it and move up one order. */
                list_del(&buddy->lru);
                zone->free_area[order].nr_free--;
                rmv_page_order(buddy);
@@ -456,35 +486,33 @@ static inline void __free_one_page(struct page *page,
        zone->free_area[order].nr_free++;
 }
 
+/*
+ * free_page_mlock() -- clean up attempts to free and mlocked() page.
+ * Page should not be on lru, so no need to fix that up.
+ * free_pages_check() will verify...
+ */
+static inline void free_page_mlock(struct page *page)
+{
+       __dec_zone_page_state(page, NR_MLOCK);
+       __count_vm_event(UNEVICTABLE_MLOCKFREED);
+}
+
 static inline int free_pages_check(struct page *page)
 {
        if (unlikely(page_mapcount(page) |
                (page->mapping != NULL)  |
-               (page_get_page_cgroup(page) != NULL) |
-               (page_count(page) != 0)  |
-               (page->flags & (
-                       1 << PG_lru     |
-                       1 << PG_private |
-                       1 << PG_locked  |
-                       1 << PG_active  |
-                       1 << PG_slab    |
-                       1 << PG_swapcache |
-                       1 << PG_writeback |
-                       1 << PG_reserved |
-                       1 << PG_buddy ))))
+               (atomic_read(&page->_count) != 0) |
+               (page->flags & PAGE_FLAGS_CHECK_AT_FREE))) {
                bad_page(page);
-       if (PageDirty(page))
-               __ClearPageDirty(page);
-       /*
-        * For now, we report if PG_reserved was found set, but do not
-        * clear it, and do not free the page.  But we shall soon need
-        * to do more, for when the ZERO_PAGE count wraps negative.
-        */
-       return PageReserved(page);
+               return 1;
+       }
+       if (page->flags & PAGE_FLAGS_CHECK_AT_PREP)
+               page->flags &= ~PAGE_FLAGS_CHECK_AT_PREP;
+       return 0;
 }
 
 /*
- * Frees a list of pages. 
+ * Frees a number of pages from the PCP lists
  * Assumes all pages on list are in same zone, and of same order.
  * count is the number of pages to free.
  *
@@ -494,30 +522,55 @@ static inline int free_pages_check(struct page *page)
  * And clear the zone's pages_scanned counter, to hold off the "all pages are
  * pinned" detection logic.
  */
-static void free_pages_bulk(struct zone *zone, int count,
-                                       struct list_head *list, int order)
+static void free_pcppages_bulk(struct zone *zone, int count,
+                                       struct per_cpu_pages *pcp)
 {
+       int migratetype = 0;
+       int batch_free = 0;
+
        spin_lock(&zone->lock);
        zone_clear_flag(zone, ZONE_ALL_UNRECLAIMABLE);
        zone->pages_scanned = 0;
-       while (count--) {
+
+       __mod_zone_page_state(zone, NR_FREE_PAGES, count);
+       while (count) {
                struct page *page;
+               struct list_head *list;
 
-               VM_BUG_ON(list_empty(list));
-               page = list_entry(list->prev, struct page, lru);
-               /* have to delete it as __free_one_page list manipulates */
-               list_del(&page->lru);
-               __free_one_page(page, zone, order);
+               /*
+                * Remove pages from lists in a round-robin fashion. A
+                * batch_free count is maintained that is incremented when an
+                * empty list is encountered.  This is so more pages are freed
+                * off fuller lists instead of spinning excessively around empty
+                * lists
+                */
+               do {
+                       batch_free++;
+                       if (++migratetype == MIGRATE_PCPTYPES)
+                               migratetype = 0;
+                       list = &pcp->lists[migratetype];
+               } while (list_empty(list));
+
+               do {
+                       page = list_entry(list->prev, struct page, lru);
+                       /* must delete as __free_one_page list manipulates */
+                       list_del(&page->lru);
+                       __free_one_page(page, zone, 0, migratetype);
+                       trace_mm_page_pcpu_drain(page, 0, migratetype);
+               } while (--count && --batch_free && !list_empty(list));
        }
        spin_unlock(&zone->lock);
 }
 
-static void free_one_page(struct zone *zone, struct page *page, int order)
+static void free_one_page(struct zone *zone, struct page *page, int order,
+                               int migratetype)
 {
        spin_lock(&zone->lock);
        zone_clear_flag(zone, ZONE_ALL_UNRECLAIMABLE);
        zone->pages_scanned = 0;
-       __free_one_page(page, zone, order);
+
+       __mod_zone_page_state(zone, NR_FREE_PAGES, 1 << order);
+       __free_one_page(page, zone, order, migratetype);
        spin_unlock(&zone->lock);
 }
 
@@ -525,28 +578,37 @@ static void __free_pages_ok(struct page *page, unsigned int order)
 {
        unsigned long flags;
        int i;
-       int reserved = 0;
+       int bad = 0;
+       int wasMlocked = __TestClearPageMlocked(page);
+
+       kmemcheck_free_shadow(page, order);
 
        for (i = 0 ; i < (1 << order) ; ++i)
-               reserved += free_pages_check(page + i);
-       if (reserved)
+               bad += free_pages_check(page + i);
+       if (bad)
                return;
 
-       if (!PageHighMem(page))
+       if (!PageHighMem(page)) {
                debug_check_no_locks_freed(page_address(page),PAGE_SIZE<<order);
+               debug_check_no_obj_freed(page_address(page),
+                                          PAGE_SIZE << order);
+       }
        arch_free_page(page, order);
        kernel_map_pages(page, 1 << order, 0);
 
        local_irq_save(flags);
+       if (unlikely(wasMlocked))
+               free_page_mlock(page);
        __count_vm_events(PGFREE, 1 << order);
-       free_one_page(page_zone(page), page, order);
+       free_one_page(page_zone(page), page, order,
+                                       get_pageblock_migratetype(page));
        local_irq_restore(flags);
 }
 
 /*
  * permit the bootmem allocator to evade page validation on high-order frees
  */
-void __init __free_pages_bootmem(struct page *page, unsigned int order)
+void __meminit __free_pages_bootmem(struct page *page, unsigned int order)
 {
        if (order == 0) {
                __ClearPageReserved(page);
@@ -606,35 +668,28 @@ static inline void expand(struct zone *zone, struct page *page,
 /*
  * This page is about to be returned from the page allocator
  */
-static int prep_new_page(struct page *page, int order, gfp_t gfp_flags)
+static inline int check_new_page(struct page *page)
 {
        if (unlikely(page_mapcount(page) |
                (page->mapping != NULL)  |
-               (page_get_page_cgroup(page) != NULL) |
-               (page_count(page) != 0)  |
-               (page->flags & (
-                       1 << PG_lru     |
-                       1 << PG_private |
-                       1 << PG_locked  |
-                       1 << PG_active  |
-                       1 << PG_dirty   |
-                       1 << PG_slab    |
-                       1 << PG_swapcache |
-                       1 << PG_writeback |
-                       1 << PG_reserved |
-                       1 << PG_buddy ))))
+               (atomic_read(&page->_count) != 0)  |
+               (page->flags & PAGE_FLAGS_CHECK_AT_PREP))) {
                bad_page(page);
-
-       /*
-        * For now, we report if PG_reserved was found set, but do not
-        * clear it, and do not allocate the page: as a safety net.
-        */
-       if (PageReserved(page))
                return 1;
+       }
+       return 0;
+}
+
+static int prep_new_page(struct page *page, int order, gfp_t gfp_flags)
+{
+       int i;
+
+       for (i = 0; i < (1 << order); i++) {
+               struct page *p = page + i;
+               if (unlikely(check_new_page(p)))
+                       return 1;
+       }
 
-       page->flags &= ~(1 << PG_uptodate | 1 << PG_error | 1 << PG_readahead |
-                       1 << PG_referenced | 1 << PG_arch_1 |
-                       1 << PG_owner_priv_1 | 1 << PG_mappedtodisk);
        set_page_private(page, 0);
        set_page_refcounted(page);
 
@@ -654,7 +709,8 @@ static int prep_new_page(struct page *page, int order, gfp_t gfp_flags)
  * Go through the free lists for the given migratetype and remove
  * the smallest available page from the freelists
  */
-static struct page *__rmqueue_smallest(struct zone *zone, unsigned int order,
+static inline
+struct page *__rmqueue_smallest(struct zone *zone, unsigned int order,
                                                int migratetype)
 {
        unsigned int current_order;
@@ -672,7 +728,6 @@ static struct page *__rmqueue_smallest(struct zone *zone, unsigned int order,
                list_del(&page->lru);
                rmv_page_order(page);
                area->nr_free--;
-               __mod_zone_page_state(zone, NR_FREE_PAGES, - (1UL << order));
                expand(zone, page, order, current_order, area, migratetype);
                return page;
        }
@@ -697,9 +752,9 @@ static int fallbacks[MIGRATE_TYPES][MIGRATE_TYPES-1] = {
  * Note that start_page and end_pages are not aligned on a pageblock
  * boundary. If alignment is required, use move_freepages_block()
  */
-int move_freepages(struct zone *zone,
-                       struct page *start_page, struct page *end_page,
-                       int migratetype)
+static int move_freepages(struct zone *zone,
+                         struct page *start_page, struct page *end_page,
+                         int migratetype)
 {
        struct page *page;
        unsigned long order;
@@ -717,6 +772,9 @@ int move_freepages(struct zone *zone,
 #endif
 
        for (page = start_page; page <= end_page;) {
+               /* Make sure we are not inadvertently changing nodes */
+               VM_BUG_ON(page_to_nid(page) != zone_to_nid(zone));
+
                if (!pfn_valid_within(page_to_pfn(page))) {
                        page++;
                        continue;
@@ -738,7 +796,8 @@ int move_freepages(struct zone *zone,
        return pages_moved;
 }
 
-int move_freepages_block(struct zone *zone, struct page *page, int migratetype)
+static int move_freepages_block(struct zone *zone, struct page *page,
+                               int migratetype)
 {
        unsigned long start_pfn, end_pfn;
        struct page *start_page, *end_page;
@@ -758,9 +817,20 @@ int move_freepages_block(struct zone *zone, struct page *page, int migratetype)
        return move_freepages(zone, start_page, end_page, migratetype);
 }
 
+static void change_pageblock_range(struct page *pageblock_page,
+                                       int start_order, int migratetype)
+{
+       int nr_pageblocks = 1 << (start_order - pageblock_order);
+
+       while (nr_pageblocks--) {
+               set_pageblock_migratetype(pageblock_page, migratetype);
+               pageblock_page += pageblock_nr_pages;
+       }
+}
+
 /* Remove an element from the buddy allocator from the fallback list */
-static struct page *__rmqueue_fallback(struct zone *zone, int order,
-                                               int start_migratetype)
+static inline struct page *
+__rmqueue_fallback(struct zone *zone, int order, int start_migratetype)
 {
        struct free_area * area;
        int current_order;
@@ -792,13 +862,15 @@ static struct page *__rmqueue_fallback(struct zone *zone, int order,
                         * agressive about taking ownership of free pages
                         */
                        if (unlikely(current_order >= (pageblock_order >> 1)) ||
-                                       start_migratetype == MIGRATE_RECLAIMABLE) {
+                                       start_migratetype == MIGRATE_RECLAIMABLE ||
+                                       page_group_by_mobility_disabled) {
                                unsigned long pages;
                                pages = move_freepages_block(zone, page,
                                                                start_migratetype);
 
                                /* Claim the whole block if over half of it is free */
-                               if (pages >= (1 << (pageblock_order-1)))
+                               if (pages >= (1 << (pageblock_order-1)) ||
+                                               page_group_by_mobility_disabled)
                                        set_pageblock_migratetype(page,
                                                                start_migratetype);
 
@@ -808,20 +880,22 @@ static struct page *__rmqueue_fallback(struct zone *zone, int order,
                        /* Remove the page from the freelists */
                        list_del(&page->lru);
                        rmv_page_order(page);
-                       __mod_zone_page_state(zone, NR_FREE_PAGES,
-                                                       -(1UL << order));
 
-                       if (current_order == pageblock_order)
-                               set_pageblock_migratetype(page,
+                       /* Take ownership for orders >= pageblock_order */
+                       if (current_order >= pageblock_order)
+                               change_pageblock_range(page, current_order,
                                                        start_migratetype);
 
                        expand(zone, page, order, current_order, area, migratetype);
+
+                       trace_mm_page_alloc_extfrag(page, order, current_order,
+                               start_migratetype, migratetype);
+
                        return page;
                }
        }
 
-       /* Use MIGRATE_RESERVE rather than fail an allocation */
-       return __rmqueue_smallest(zone, order, MIGRATE_RESERVE);
+       return NULL;
 }
 
 /*
@@ -833,11 +907,24 @@ static struct page *__rmqueue(struct zone *zone, unsigned int order,
 {
        struct page *page;
 
+retry_reserve:
        page = __rmqueue_smallest(zone, order, migratetype);
 
-       if (unlikely(!page))
+       if (unlikely(!page) && migratetype != MIGRATE_RESERVE) {
                page = __rmqueue_fallback(zone, order, migratetype);
 
+               /*
+                * Use MIGRATE_RESERVE rather than fail an allocation. goto
+                * is used because __rmqueue_smallest is an inline function
+                * and we want just one call site
+                */
+               if (!page) {
+                       migratetype = MIGRATE_RESERVE;
+                       goto retry_reserve;
+               }
+       }
+
+       trace_mm_page_alloc_zone_locked(page, order, migratetype);
        return page;
 }
 
@@ -848,7 +935,7 @@ static struct page *__rmqueue(struct zone *zone, unsigned int order,
  */
 static int rmqueue_bulk(struct zone *zone, unsigned int order, 
                        unsigned long count, struct list_head *list,
-                       int migratetype)
+                       int migratetype, int cold)
 {
        int i;
        
@@ -867,10 +954,14 @@ static int rmqueue_bulk(struct zone *zone, unsigned int order,
                 * merge IO requests if the physical pages are ordered
                 * properly.
                 */
-               list_add(&page->lru, list);
+               if (likely(cold == 0))
+                       list_add(&page->lru, list);
+               else
+                       list_add_tail(&page->lru, list);
                set_page_private(page, migratetype);
                list = &page->lru;
        }
+       __mod_zone_page_state(zone, NR_FREE_PAGES, -(i << order));
        spin_unlock(&zone->lock);
        return i;
 }
@@ -894,7 +985,7 @@ void drain_zone_pages(struct zone *zone, struct per_cpu_pages *pcp)
                to_drain = pcp->batch;
        else
                to_drain = pcp->count;
-       free_pages_bulk(zone, to_drain, &pcp->list, 0);
+       free_pcppages_bulk(zone, to_drain, pcp);
        pcp->count -= to_drain;
        local_irq_restore(flags);
 }
@@ -912,18 +1003,15 @@ static void drain_pages(unsigned int cpu)
        unsigned long flags;
        struct zone *zone;
 
-       for_each_zone(zone) {
+       for_each_populated_zone(zone) {
                struct per_cpu_pageset *pset;
                struct per_cpu_pages *pcp;
 
-               if (!populated_zone(zone))
-                       continue;
-
                pset = zone_pcp(zone, cpu);
 
                pcp = &pset->pcp;
                local_irq_save(flags);
-               free_pages_bulk(zone, pcp->count, &pcp->list, 0);
+               free_pcppages_bulk(zone, pcp->count, pcp);
                pcp->count = 0;
                local_irq_restore(flags);
        }
@@ -942,7 +1030,7 @@ void drain_local_pages(void *arg)
  */
 void drain_all_pages(void)
 {
-       on_each_cpu(drain_local_pages, NULL, 0, 1);
+       on_each_cpu(drain_local_pages, NULL, 1);
 }
 
 #ifdef CONFIG_HIBERNATION
@@ -989,44 +1077,67 @@ static void free_hot_cold_page(struct page *page, int cold)
        struct zone *zone = page_zone(page);
        struct per_cpu_pages *pcp;
        unsigned long flags;
+       int migratetype;
+       int wasMlocked = __TestClearPageMlocked(page);
+
+       kmemcheck_free_shadow(page, 0);
 
        if (PageAnon(page))
                page->mapping = NULL;
        if (free_pages_check(page))
                return;
 
-       if (!PageHighMem(page))
+       if (!PageHighMem(page)) {
                debug_check_no_locks_freed(page_address(page), PAGE_SIZE);
+               debug_check_no_obj_freed(page_address(page), PAGE_SIZE);
+       }
        arch_free_page(page, 0);
        kernel_map_pages(page, 1, 0);
 
        pcp = &zone_pcp(zone, get_cpu())->pcp;
+       migratetype = get_pageblock_migratetype(page);
+       set_page_private(page, migratetype);
        local_irq_save(flags);
+       if (unlikely(wasMlocked))
+               free_page_mlock(page);
        __count_vm_event(PGFREE);
+
+       /*
+        * We only track unmovable, reclaimable and movable on pcp lists.
+        * Free ISOLATE pages back to the allocator because they are being
+        * offlined but treat RESERVE as movable pages so we can get those
+        * areas back if necessary. Otherwise, we may have to free
+        * excessively into the page allocator
+        */
+       if (migratetype >= MIGRATE_PCPTYPES) {
+               if (unlikely(migratetype == MIGRATE_ISOLATE)) {
+                       free_one_page(zone, page, 0, migratetype);
+                       goto out;
+               }
+               migratetype = MIGRATE_MOVABLE;
+       }
+
        if (cold)
-               list_add_tail(&page->lru, &pcp->list);
+               list_add_tail(&page->lru, &pcp->lists[migratetype]);
        else
-               list_add(&page->lru, &pcp->list);
-       set_page_private(page, get_pageblock_migratetype(page));
+               list_add(&page->lru, &pcp->lists[migratetype]);
        pcp->count++;
        if (pcp->count >= pcp->high) {
-               free_pages_bulk(zone, pcp->batch, &pcp->list, 0);
+               free_pcppages_bulk(zone, pcp->batch, pcp);
                pcp->count -= pcp->batch;
        }
+
+out:
        local_irq_restore(flags);
        put_cpu();
 }
 
 void free_hot_page(struct page *page)
 {
+       trace_mm_page_free_direct(page, 0);
        free_hot_cold_page(page, 0);
 }
        
-void free_cold_page(struct page *page)
-{
-       free_hot_cold_page(page, 1);
-}
-
 /*
  * split_page takes a non-compound higher-order page, and splits it into
  * n (1<<order) sub-pages: page[0..n]
@@ -1041,6 +1152,16 @@ void split_page(struct page *page, unsigned int order)
 
        VM_BUG_ON(PageCompound(page));
        VM_BUG_ON(!page_count(page));
+
+#ifdef CONFIG_KMEMCHECK
+       /*
+        * Split shadow pages too, because free(page[0]) would
+        * otherwise free the whole shadow.
+        */
+       if (kmemcheck_page_is_tracked(page))
+               split_page(virt_to_page(page[0].shadow), order);
+#endif
+
        for (i = 1; i < (1 << order); i++)
                set_page_refcounted(page + i);
 }
@@ -1050,52 +1171,57 @@ void split_page(struct page *page, unsigned int order)
  * we cheat by calling it from here, in the order > 0 path.  Saves a branch
  * or two.
  */
-static struct page *buffered_rmqueue(struct zone *preferred_zone,
-                       struct zone *zone, int order, gfp_t gfp_flags)
+static inline
+struct page *buffered_rmqueue(struct zone *preferred_zone,
+                       struct zone *zone, int order, gfp_t gfp_flags,
+                       int migratetype)
 {
        unsigned long flags;
        struct page *page;
        int cold = !!(gfp_flags & __GFP_COLD);
        int cpu;
-       int migratetype = allocflags_to_migratetype(gfp_flags);
 
 again:
        cpu  = get_cpu();
        if (likely(order == 0)) {
                struct per_cpu_pages *pcp;
+               struct list_head *list;
 
                pcp = &zone_pcp(zone, cpu)->pcp;
+               list = &pcp->lists[migratetype];
                local_irq_save(flags);
-               if (!pcp->count) {
-                       pcp->count = rmqueue_bulk(zone, 0,
-                                       pcp->batch, &pcp->list, migratetype);
-                       if (unlikely(!pcp->count))
+               if (list_empty(list)) {
+                       pcp->count += rmqueue_bulk(zone, 0,
+                                       pcp->batch, list,
+                                       migratetype, cold);
+                       if (unlikely(list_empty(list)))
                                goto failed;
                }
 
-               /* Find a page of the appropriate migrate type */
-               if (cold) {
-                       list_for_each_entry_reverse(page, &pcp->list, lru)
-                               if (page_private(page) == migratetype)
-                                       break;
-               } else {
-                       list_for_each_entry(page, &pcp->list, lru)
-                               if (page_private(page) == migratetype)
-                                       break;
-               }
-
-               /* Allocate more to the pcp list if necessary */
-               if (unlikely(&page->lru == &pcp->list)) {
-                       pcp->count += rmqueue_bulk(zone, 0,
-                                       pcp->batch, &pcp->list, migratetype);
-                       page = list_entry(pcp->list.next, struct page, lru);
-               }
+               if (cold)
+                       page = list_entry(list->prev, struct page, lru);
+               else
+                       page = list_entry(list->next, struct page, lru);
 
                list_del(&page->lru);
                pcp->count--;
        } else {
+               if (unlikely(gfp_flags & __GFP_NOFAIL)) {
+                       /*
+                        * __GFP_NOFAIL is not to be used in new code.
+                        *
+                        * All __GFP_NOFAIL callers should be fixed so that they
+                        * properly detect and handle allocation failures.
+                        *
+                        * We most definitely don't want callers attempting to
+                        * allocate greater than order-1 page units with
+                        * __GFP_NOFAIL.
+                        */
+                       WARN_ON_ONCE(order > 1);
+               }
                spin_lock_irqsave(&zone->lock, flags);
                page = __rmqueue(zone, order, migratetype);
+               __mod_zone_page_state(zone, NR_FREE_PAGES, -(1 << order));
                spin_unlock(&zone->lock);
                if (!page)
                        goto failed;
@@ -1117,10 +1243,15 @@ failed:
        return NULL;
 }
 
-#define ALLOC_NO_WATERMARKS    0x01 /* don't check watermarks at all */
-#define ALLOC_WMARK_MIN                0x02 /* use pages_min watermark */
-#define ALLOC_WMARK_LOW                0x04 /* use pages_low watermark */
-#define ALLOC_WMARK_HIGH       0x08 /* use pages_high watermark */
+/* The ALLOC_WMARK bits are used as an index to zone->watermark */
+#define ALLOC_WMARK_MIN                WMARK_MIN
+#define ALLOC_WMARK_LOW                WMARK_LOW
+#define ALLOC_WMARK_HIGH       WMARK_HIGH
+#define ALLOC_NO_WATERMARKS    0x04 /* don't check watermarks at all */
+
+/* Mask to get the watermark bits */
+#define ALLOC_WMARK_MASK       (ALLOC_NO_WATERMARKS-1)
+
 #define ALLOC_HARDER           0x10 /* try to alloc harder */
 #define ALLOC_HIGH             0x20 /* __GFP_HIGH set */
 #define ALLOC_CPUSET           0x40 /* check for correct cpuset */
@@ -1378,20 +1509,18 @@ static void zlc_mark_zone_full(struct zonelist *zonelist, struct zoneref *z)
  */
 static struct page *
 get_page_from_freelist(gfp_t gfp_mask, nodemask_t *nodemask, unsigned int order,
-               struct zonelist *zonelist, int high_zoneidx, int alloc_flags)
+               struct zonelist *zonelist, int high_zoneidx, int alloc_flags,
+               struct zone *preferred_zone, int migratetype)
 {
        struct zoneref *z;
        struct page *page = NULL;
        int classzone_idx;
-       struct zone *zone, *preferred_zone;
+       struct zone *zone;
        nodemask_t *allowednodes = NULL;/* zonelist_cache approximation */
        int zlc_active = 0;             /* set if using zonelist_cache */
        int did_zlc_setup = 0;          /* just call zlc_setup() one time */
 
-       (void)first_zones_zonelist(zonelist, high_zoneidx, nodemask,
-                                                       &preferred_zone);
        classzone_idx = zone_idx(preferred_zone);
-
 zonelist_scan:
        /*
         * Scan zonelist, looking for a zone with enough free.
@@ -1406,31 +1535,49 @@ zonelist_scan:
                        !cpuset_zone_allowed_softwall(zone, gfp_mask))
                                goto try_next_zone;
 
+               BUILD_BUG_ON(ALLOC_NO_WATERMARKS < NR_WMARK);
                if (!(alloc_flags & ALLOC_NO_WATERMARKS)) {
                        unsigned long mark;
-                       if (alloc_flags & ALLOC_WMARK_MIN)
-                               mark = zone->pages_min;
-                       else if (alloc_flags & ALLOC_WMARK_LOW)
-                               mark = zone->pages_low;
-                       else
-                               mark = zone->pages_high;
-                       if (!zone_watermark_ok(zone, order, mark,
-                                   classzone_idx, alloc_flags)) {
-                               if (!zone_reclaim_mode ||
-                                   !zone_reclaim(zone, gfp_mask, order))
+                       int ret;
+
+                       mark = zone->watermark[alloc_flags & ALLOC_WMARK_MASK];
+                       if (zone_watermark_ok(zone, order, mark,
+                                   classzone_idx, alloc_flags))
+                               goto try_this_zone;
+
+                       if (zone_reclaim_mode == 0)
+                               goto this_zone_full;
+
+                       ret = zone_reclaim(zone, gfp_mask, order);
+                       switch (ret) {
+                       case ZONE_RECLAIM_NOSCAN:
+                               /* did not scan */
+                               goto try_next_zone;
+                       case ZONE_RECLAIM_FULL:
+                               /* scanned but unreclaimable */
+                               goto this_zone_full;
+                       default:
+                               /* did we reclaim enough */
+                               if (!zone_watermark_ok(zone, order, mark,
+                                               classzone_idx, alloc_flags))
                                        goto this_zone_full;
                        }
                }
 
-               page = buffered_rmqueue(preferred_zone, zone, order, gfp_mask);
+try_this_zone:
+               page = buffered_rmqueue(preferred_zone, zone, order,
+                                               gfp_mask, migratetype);
                if (page)
                        break;
 this_zone_full:
                if (NUMA_BUILD)
                        zlc_mark_zone_full(zonelist, z);
 try_next_zone:
-               if (NUMA_BUILD && !did_zlc_setup) {
-                       /* we do zlc_setup after the first zone is tried */
+               if (NUMA_BUILD && !did_zlc_setup && nr_online_nodes > 1) {
+                       /*
+                        * we do zlc_setup after the first zone is tried but only
+                        * if there are multiple nodes make it worthwhile
+                        */
                        allowednodes = zlc_setup(zonelist, alloc_flags);
                        zlc_active = 1;
                        did_zlc_setup = 1;
@@ -1445,44 +1592,225 @@ try_next_zone:
        return page;
 }
 
+static inline int
+should_alloc_retry(gfp_t gfp_mask, unsigned int order,
+                               unsigned long pages_reclaimed)
+{
+       /* Do not loop if specifically requested */
+       if (gfp_mask & __GFP_NORETRY)
+               return 0;
+
+       /*
+        * In this implementation, order <= PAGE_ALLOC_COSTLY_ORDER
+        * means __GFP_NOFAIL, but that may not be true in other
+        * implementations.
+        */
+       if (order <= PAGE_ALLOC_COSTLY_ORDER)
+               return 1;
+
+       /*
+        * For order > PAGE_ALLOC_COSTLY_ORDER, if __GFP_REPEAT is
+        * specified, then we retry until we no longer reclaim any pages
+        * (above), or we've reclaimed an order of pages at least as
+        * large as the allocation's order. In both cases, if the
+        * allocation still fails, we stop retrying.
+        */
+       if (gfp_mask & __GFP_REPEAT && pages_reclaimed < (1 << order))
+               return 1;
+
+       /*
+        * Don't let big-order allocations loop unless the caller
+        * explicitly requests that.
+        */
+       if (gfp_mask & __GFP_NOFAIL)
+               return 1;
+
+       return 0;
+}
+
+static inline struct page *
+__alloc_pages_may_oom(gfp_t gfp_mask, unsigned int order,
+       struct zonelist *zonelist, enum zone_type high_zoneidx,
+       nodemask_t *nodemask, struct zone *preferred_zone,
+       int migratetype)
+{
+       struct page *page;
+
+       /* Acquire the OOM killer lock for the zones in zonelist */
+       if (!try_set_zone_oom(zonelist, gfp_mask)) {
+               schedule_timeout_uninterruptible(1);
+               return NULL;
+       }
+
+       /*
+        * Go through the zonelist yet one more time, keep very high watermark
+        * here, this is only to catch a parallel oom killing, we must fail if
+        * we're still under heavy pressure.
+        */
+       page = get_page_from_freelist(gfp_mask|__GFP_HARDWALL, nodemask,
+               order, zonelist, high_zoneidx,
+               ALLOC_WMARK_HIGH|ALLOC_CPUSET,
+               preferred_zone, migratetype);
+       if (page)
+               goto out;
+
+       if (!(gfp_mask & __GFP_NOFAIL)) {
+               /* The OOM killer will not help higher order allocs */
+               if (order > PAGE_ALLOC_COSTLY_ORDER)
+                       goto out;
+               /*
+                * GFP_THISNODE contains __GFP_NORETRY and we never hit this.
+                * Sanity check for bare calls of __GFP_THISNODE, not real OOM.
+                * The caller should handle page allocation failure by itself if
+                * it specifies __GFP_THISNODE.
+                * Note: Hugepage uses it but will hit PAGE_ALLOC_COSTLY_ORDER.
+                */
+               if (gfp_mask & __GFP_THISNODE)
+                       goto out;
+       }
+       /* Exhausted what can be done so it's blamo time */
+       out_of_memory(zonelist, gfp_mask, order, nodemask);
+
+out:
+       clear_zonelist_oom(zonelist, gfp_mask);
+       return page;
+}
+
+/* The really slow allocator path where we enter direct reclaim */
+static inline struct page *
+__alloc_pages_direct_reclaim(gfp_t gfp_mask, unsigned int order,
+       struct zonelist *zonelist, enum zone_type high_zoneidx,
+       nodemask_t *nodemask, int alloc_flags, struct zone *preferred_zone,
+       int migratetype, unsigned long *did_some_progress)
+{
+       struct page *page = NULL;
+       struct reclaim_state reclaim_state;
+       struct task_struct *p = current;
+
+       cond_resched();
+
+       /* We now go into synchronous reclaim */
+       cpuset_memory_pressure_bump();
+       p->flags |= PF_MEMALLOC;
+       lockdep_set_current_reclaim_state(gfp_mask);
+       reclaim_state.reclaimed_slab = 0;
+       p->reclaim_state = &reclaim_state;
+
+       *did_some_progress = try_to_free_pages(zonelist, order, gfp_mask, nodemask);
+
+       p->reclaim_state = NULL;
+       lockdep_clear_current_reclaim_state();
+       p->flags &= ~PF_MEMALLOC;
+
+       cond_resched();
+
+       if (order != 0)
+               drain_all_pages();
+
+       if (likely(*did_some_progress))
+               page = get_page_from_freelist(gfp_mask, nodemask, order,
+                                       zonelist, high_zoneidx,
+                                       alloc_flags, preferred_zone,
+                                       migratetype);
+       return page;
+}
+
 /*
- * This is the 'heart' of the zoned buddy allocator.
+ * This is called in the allocator slow-path if the allocation request is of
+ * sufficient urgency to ignore watermarks and take other desperate measures
  */
-static struct page *
-__alloc_pages_internal(gfp_t gfp_mask, unsigned int order,
-                       struct zonelist *zonelist, nodemask_t *nodemask)
+static inline struct page *
+__alloc_pages_high_priority(gfp_t gfp_mask, unsigned int order,
+       struct zonelist *zonelist, enum zone_type high_zoneidx,
+       nodemask_t *nodemask, struct zone *preferred_zone,
+       int migratetype)
+{
+       struct page *page;
+
+       do {
+               page = get_page_from_freelist(gfp_mask, nodemask, order,
+                       zonelist, high_zoneidx, ALLOC_NO_WATERMARKS,
+                       preferred_zone, migratetype);
+
+               if (!page && gfp_mask & __GFP_NOFAIL)
+                       congestion_wait(BLK_RW_ASYNC, HZ/50);
+       } while (!page && (gfp_mask & __GFP_NOFAIL));
+
+       return page;
+}
+
+static inline
+void wake_all_kswapd(unsigned int order, struct zonelist *zonelist,
+                                               enum zone_type high_zoneidx)
 {
-       const gfp_t wait = gfp_mask & __GFP_WAIT;
-       enum zone_type high_zoneidx = gfp_zone(gfp_mask);
        struct zoneref *z;
        struct zone *zone;
-       struct page *page;
-       struct reclaim_state reclaim_state;
-       struct task_struct *p = current;
-       int do_retry;
-       int alloc_flags;
-       int did_some_progress;
 
-       might_sleep_if(wait);
+       for_each_zone_zonelist(zone, z, zonelist, high_zoneidx)
+               wakeup_kswapd(zone, order);
+}
 
-       if (should_fail_alloc_page(gfp_mask, order))
-               return NULL;
+static inline int
+gfp_to_alloc_flags(gfp_t gfp_mask)
+{
+       struct task_struct *p = current;
+       int alloc_flags = ALLOC_WMARK_MIN | ALLOC_CPUSET;
+       const gfp_t wait = gfp_mask & __GFP_WAIT;
 
-restart:
-       z = zonelist->_zonerefs;  /* the list of zones suitable for gfp_mask */
+       /* __GFP_HIGH is assumed to be the same as ALLOC_HIGH to save a branch. */
+       BUILD_BUG_ON(__GFP_HIGH != ALLOC_HIGH);
 
-       if (unlikely(!z->zone)) {
+       /*
+        * The caller may dip into page reserves a bit more if the caller
+        * cannot run direct reclaim, or if the caller has realtime scheduling
+        * policy or is asking for __GFP_HIGH memory.  GFP_ATOMIC requests will
+        * set both ALLOC_HARDER (!wait) and ALLOC_HIGH (__GFP_HIGH).
+        */
+       alloc_flags |= (gfp_mask & __GFP_HIGH);
+
+       if (!wait) {
+               alloc_flags |= ALLOC_HARDER;
                /*
-                * Happens if we have an empty zonelist as a result of
-                * GFP_THISNODE being used on a memoryless node
+                * Ignore cpuset if GFP_ATOMIC (!wait) rather than fail alloc.
+                * See also cpuset_zone_allowed() comment in kernel/cpuset.c.
                 */
-               return NULL;
+               alloc_flags &= ~ALLOC_CPUSET;
+       } else if (unlikely(rt_task(p)) && !in_interrupt())
+               alloc_flags |= ALLOC_HARDER;
+
+       if (likely(!(gfp_mask & __GFP_NOMEMALLOC))) {
+               if (!in_interrupt() &&
+                   ((p->flags & PF_MEMALLOC) ||
+                    unlikely(test_thread_flag(TIF_MEMDIE))))
+                       alloc_flags |= ALLOC_NO_WATERMARKS;
        }
 
-       page = get_page_from_freelist(gfp_mask|__GFP_HARDWALL, nodemask, order,
-                       zonelist, high_zoneidx, ALLOC_WMARK_LOW|ALLOC_CPUSET);
-       if (page)
-               goto got_pg;
+       return alloc_flags;
+}
+
+static inline struct page *
+__alloc_pages_slowpath(gfp_t gfp_mask, unsigned int order,
+       struct zonelist *zonelist, enum zone_type high_zoneidx,
+       nodemask_t *nodemask, struct zone *preferred_zone,
+       int migratetype)
+{
+       const gfp_t wait = gfp_mask & __GFP_WAIT;
+       struct page *page = NULL;
+       int alloc_flags;
+       unsigned long pages_reclaimed = 0;
+       unsigned long did_some_progress;
+       struct task_struct *p = current;
+
+       /*
+        * In the slowpath, we sanity check order to avoid ever trying to
+        * reclaim >= MAX_ORDER areas which will never succeed. Callers may
+        * be using allocators in order of preference for an area that is
+        * too large.
+        */
+       if (order >= MAX_ORDER) {
+               WARN_ON_ONCE(!(gfp_mask & __GFP_NOWARN));
+               return NULL;
+       }
 
        /*
         * GFP_THISNODE (meaning __GFP_THISNODE, __GFP_NORETRY and
@@ -1495,135 +1823,88 @@ restart:
        if (NUMA_BUILD && (gfp_mask & GFP_THISNODE) == GFP_THISNODE)
                goto nopage;
 
-       for_each_zone_zonelist(zone, z, zonelist, high_zoneidx)
-               wakeup_kswapd(zone, order);
+restart:
+       wake_all_kswapd(order, zonelist, high_zoneidx);
 
        /*
         * OK, we're below the kswapd watermark and have kicked background
         * reclaim. Now things get more complex, so set up alloc_flags according
         * to how we want to proceed.
-        *
-        * The caller may dip into page reserves a bit more if the caller
-        * cannot run direct reclaim, or if the caller has realtime scheduling
-        * policy or is asking for __GFP_HIGH memory.  GFP_ATOMIC requests will
-        * set both ALLOC_HARDER (!wait) and ALLOC_HIGH (__GFP_HIGH).
         */
-       alloc_flags = ALLOC_WMARK_MIN;
-       if ((unlikely(rt_task(p)) && !in_interrupt()) || !wait)
-               alloc_flags |= ALLOC_HARDER;
-       if (gfp_mask & __GFP_HIGH)
-               alloc_flags |= ALLOC_HIGH;
-       if (wait)
-               alloc_flags |= ALLOC_CPUSET;
+       alloc_flags = gfp_to_alloc_flags(gfp_mask);
 
-       /*
-        * Go through the zonelist again. Let __GFP_HIGH and allocations
-        * coming from realtime tasks go deeper into reserves.
-        *
-        * This is the last chance, in general, before the goto nopage.
-        * Ignore cpuset if GFP_ATOMIC (!wait) rather than fail alloc.
-        * See also cpuset_zone_allowed() comment in kernel/cpuset.c.
-        */
+       /* This is the last chance, in general, before the goto nopage. */
        page = get_page_from_freelist(gfp_mask, nodemask, order, zonelist,
-                                               high_zoneidx, alloc_flags);
+                       high_zoneidx, alloc_flags & ~ALLOC_NO_WATERMARKS,
+                       preferred_zone, migratetype);
        if (page)
                goto got_pg;
 
-       /* This allocation should allow future memory freeing. */
-
 rebalance:
-       if (((p->flags & PF_MEMALLOC) || unlikely(test_thread_flag(TIF_MEMDIE)))
-                       && !in_interrupt()) {
-               if (!(gfp_mask & __GFP_NOMEMALLOC)) {
-nofail_alloc:
-                       /* go through the zonelist yet again, ignoring mins */
-                       page = get_page_from_freelist(gfp_mask, nodemask, order,
-                               zonelist, high_zoneidx, ALLOC_NO_WATERMARKS);
-                       if (page)
-                               goto got_pg;
-                       if (gfp_mask & __GFP_NOFAIL) {
-                               congestion_wait(WRITE, HZ/50);
-                               goto nofail_alloc;
-                       }
-               }
-               goto nopage;
+       /* Allocate without watermarks if the context allows */
+       if (alloc_flags & ALLOC_NO_WATERMARKS) {
+               page = __alloc_pages_high_priority(gfp_mask, order,
+                               zonelist, high_zoneidx, nodemask,
+                               preferred_zone, migratetype);
+               if (page)
+                       goto got_pg;
        }
 
        /* Atomic allocations - we can't balance anything */
        if (!wait)
                goto nopage;
 
-       cond_resched();
-
-       /* We now go into synchronous reclaim */
-       cpuset_memory_pressure_bump();
-       p->flags |= PF_MEMALLOC;
-       reclaim_state.reclaimed_slab = 0;
-       p->reclaim_state = &reclaim_state;
-
-       did_some_progress = try_to_free_pages(zonelist, order, gfp_mask);
-
-       p->reclaim_state = NULL;
-       p->flags &= ~PF_MEMALLOC;
-
-       cond_resched();
-
-       if (order != 0)
-               drain_all_pages();
-
-       if (likely(did_some_progress)) {
-               page = get_page_from_freelist(gfp_mask, nodemask, order,
-                                       zonelist, high_zoneidx, alloc_flags);
-               if (page)
-                       goto got_pg;
-       } else if ((gfp_mask & __GFP_FS) && !(gfp_mask & __GFP_NORETRY)) {
-               if (!try_set_zone_oom(zonelist, gfp_mask)) {
-                       schedule_timeout_uninterruptible(1);
-                       goto restart;
-               }
-
-               /*
-                * Go through the zonelist yet one more time, keep
-                * very high watermark here, this is only to catch
-                * a parallel oom killing, we must fail if we're still
-                * under heavy pressure.
-                */
-               page = get_page_from_freelist(gfp_mask|__GFP_HARDWALL, nodemask,
-                       order, zonelist, high_zoneidx,
-                       ALLOC_WMARK_HIGH|ALLOC_CPUSET);
-               if (page) {
-                       clear_zonelist_oom(zonelist, gfp_mask);
-                       goto got_pg;
-               }
+       /* Avoid recursion of direct reclaim */
+       if (p->flags & PF_MEMALLOC)
+               goto nopage;
 
-               /* The OOM killer will not help higher order allocs so fail */
-               if (order > PAGE_ALLOC_COSTLY_ORDER) {
-                       clear_zonelist_oom(zonelist, gfp_mask);
-                       goto nopage;
-               }
+       /* Avoid allocations with no watermarks from looping endlessly */
+       if (test_thread_flag(TIF_MEMDIE) && !(gfp_mask & __GFP_NOFAIL))
+               goto nopage;
 
-               out_of_memory(zonelist, gfp_mask, order);
-               clear_zonelist_oom(zonelist, gfp_mask);
-               goto restart;
-       }
+       /* Try direct reclaim and then allocating */
+       page = __alloc_pages_direct_reclaim(gfp_mask, order,
+                                       zonelist, high_zoneidx,
+                                       nodemask,
+                                       alloc_flags, preferred_zone,
+                                       migratetype, &did_some_progress);
+       if (page)
+               goto got_pg;
 
        /*
-        * Don't let big-order allocations loop unless the caller explicitly
-        * requests that.  Wait for some write requests to complete then retry.
-        *
-        * In this implementation, __GFP_REPEAT means __GFP_NOFAIL for order
-        * <= 3, but that may not be true in other implementations.
+        * If we failed to make any progress reclaiming, then we are
+        * running out of options and have to consider going OOM
         */
-       do_retry = 0;
-       if (!(gfp_mask & __GFP_NORETRY)) {
-               if ((order <= PAGE_ALLOC_COSTLY_ORDER) ||
-                                               (gfp_mask & __GFP_REPEAT))
-                       do_retry = 1;
-               if (gfp_mask & __GFP_NOFAIL)
-                       do_retry = 1;
+       if (!did_some_progress) {
+               if ((gfp_mask & __GFP_FS) && !(gfp_mask & __GFP_NORETRY)) {
+                       if (oom_killer_disabled)
+                               goto nopage;
+                       page = __alloc_pages_may_oom(gfp_mask, order,
+                                       zonelist, high_zoneidx,
+                                       nodemask, preferred_zone,
+                                       migratetype);
+                       if (page)
+                               goto got_pg;
+
+                       /*
+                        * The OOM killer does not trigger for high-order
+                        * ~__GFP_NOFAIL allocations so if no progress is being
+                        * made, there are no other options and retrying is
+                        * unlikely to help.
+                        */
+                       if (order > PAGE_ALLOC_COSTLY_ORDER &&
+                                               !(gfp_mask & __GFP_NOFAIL))
+                               goto nopage;
+
+                       goto restart;
+               }
        }
-       if (do_retry) {
-               congestion_wait(WRITE, HZ/50);
+
+       /* Check if we should retry the allocation */
+       pages_reclaimed += did_some_progress;
+       if (should_alloc_retry(gfp_mask, order, pages_reclaimed)) {
+               /* Wait for some write requests to complete then retry */
+               congestion_wait(BLK_RW_ASYNC, HZ/50);
                goto rebalance;
        }
 
@@ -1635,69 +1916,102 @@ nopage:
                dump_stack();
                show_mem();
        }
+       return page;
 got_pg:
+       if (kmemcheck_enabled)
+               kmemcheck_pagealloc_alloc(page, order, gfp_mask);
        return page;
-}
 
-struct page *
-__alloc_pages(gfp_t gfp_mask, unsigned int order,
-               struct zonelist *zonelist)
-{
-       return __alloc_pages_internal(gfp_mask, order, zonelist, NULL);
 }
 
+/*
+ * This is the 'heart' of the zoned buddy allocator.
+ */
 struct page *
 __alloc_pages_nodemask(gfp_t gfp_mask, unsigned int order,
-               struct zonelist *zonelist, nodemask_t *nodemask)
+                       struct zonelist *zonelist, nodemask_t *nodemask)
 {
-       return __alloc_pages_internal(gfp_mask, order, zonelist, nodemask);
-}
+       enum zone_type high_zoneidx = gfp_zone(gfp_mask);
+       struct zone *preferred_zone;
+       struct page *page;
+       int migratetype = allocflags_to_migratetype(gfp_mask);
+
+       gfp_mask &= gfp_allowed_mask;
+
+       lockdep_trace_alloc(gfp_mask);
+
+       might_sleep_if(gfp_mask & __GFP_WAIT);
+
+       if (should_fail_alloc_page(gfp_mask, order))
+               return NULL;
+
+       /*
+        * Check the zones suitable for the gfp_mask contain at least one
+        * valid zone. It's possible to have an empty zonelist as a result
+        * of GFP_THISNODE and a memoryless node
+        */
+       if (unlikely(!zonelist->_zonerefs->zone))
+               return NULL;
 
-EXPORT_SYMBOL(__alloc_pages);
+       /* The preferred zone is used for statistics later */
+       first_zones_zonelist(zonelist, high_zoneidx, nodemask, &preferred_zone);
+       if (!preferred_zone)
+               return NULL;
+
+       /* First allocation attempt */
+       page = get_page_from_freelist(gfp_mask|__GFP_HARDWALL, nodemask, order,
+                       zonelist, high_zoneidx, ALLOC_WMARK_LOW|ALLOC_CPUSET,
+                       preferred_zone, migratetype);
+       if (unlikely(!page))
+               page = __alloc_pages_slowpath(gfp_mask, order,
+                               zonelist, high_zoneidx, nodemask,
+                               preferred_zone, migratetype);
+
+       trace_mm_page_alloc(page, order, gfp_mask, migratetype);
+       return page;
+}
+EXPORT_SYMBOL(__alloc_pages_nodemask);
 
 /*
  * Common helper functions.
  */
 unsigned long __get_free_pages(gfp_t gfp_mask, unsigned int order)
 {
-       struct page * page;
+       struct page *page;
+
+       /*
+        * __get_free_pages() returns a 32-bit address, which cannot represent
+        * a highmem page
+        */
+       VM_BUG_ON((gfp_mask & __GFP_HIGHMEM) != 0);
+
        page = alloc_pages(gfp_mask, order);
        if (!page)
                return 0;
        return (unsigned long) page_address(page);
 }
-
 EXPORT_SYMBOL(__get_free_pages);
 
 unsigned long get_zeroed_page(gfp_t gfp_mask)
 {
-       struct page * page;
-
-       /*
-        * get_zeroed_page() returns a 32-bit address, which cannot represent
-        * a highmem page
-        */
-       VM_BUG_ON((gfp_mask & __GFP_HIGHMEM) != 0);
-
-       page = alloc_pages(gfp_mask | __GFP_ZERO, 0);
-       if (page)
-               return (unsigned long) page_address(page);
-       return 0;
+       return __get_free_pages(gfp_mask | __GFP_ZERO, 0);
 }
-
 EXPORT_SYMBOL(get_zeroed_page);
 
 void __pagevec_free(struct pagevec *pvec)
 {
        int i = pagevec_count(pvec);
 
-       while (--i >= 0)
+       while (--i >= 0) {
+               trace_mm_pagevec_free(pvec->pages[i], pvec->cold);
                free_hot_cold_page(pvec->pages[i], pvec->cold);
+       }
 }
 
 void __free_pages(struct page *page, unsigned int order)
 {
        if (put_page_testzero(page)) {
+               trace_mm_page_free_direct(page, order);
                if (order == 0)
                        free_hot_page(page);
                else
@@ -1717,6 +2031,59 @@ void free_pages(unsigned long addr, unsigned int order)
 
 EXPORT_SYMBOL(free_pages);
 
+/**
+ * alloc_pages_exact - allocate an exact number physically-contiguous pages.
+ * @size: the number of bytes to allocate
+ * @gfp_mask: GFP flags for the allocation
+ *
+ * This function is similar to alloc_pages(), except that it allocates the
+ * minimum number of pages to satisfy the request.  alloc_pages() can only
+ * allocate memory in power-of-two pages.
+ *
+ * This function is also limited by MAX_ORDER.
+ *
+ * Memory allocated by this function must be released by free_pages_exact().
+ */
+void *alloc_pages_exact(size_t size, gfp_t gfp_mask)
+{
+       unsigned int order = get_order(size);
+       unsigned long addr;
+
+       addr = __get_free_pages(gfp_mask, order);
+       if (addr) {
+               unsigned long alloc_end = addr + (PAGE_SIZE << order);
+               unsigned long used = addr + PAGE_ALIGN(size);
+
+               split_page(virt_to_page((void *)addr), order);
+               while (used < alloc_end) {
+                       free_page(used);
+                       used += PAGE_SIZE;
+               }
+       }
+
+       return (void *)addr;
+}
+EXPORT_SYMBOL(alloc_pages_exact);
+
+/**
+ * free_pages_exact - release memory allocated via alloc_pages_exact()
+ * @virt: the value returned by alloc_pages_exact.
+ * @size: size of allocation, same value as passed to alloc_pages_exact().
+ *
+ * Release the memory allocated by a previous call to alloc_pages_exact.
+ */
+void free_pages_exact(void *virt, size_t size)
+{
+       unsigned long addr = (unsigned long)virt;
+       unsigned long end = addr + PAGE_ALIGN(size);
+
+       while (addr < end) {
+               free_page(addr);
+               addr += PAGE_SIZE;
+       }
+}
+EXPORT_SYMBOL(free_pages_exact);
+
 static unsigned int nr_free_zone_pages(int offset)
 {
        struct zoneref *z;
@@ -1729,7 +2096,7 @@ static unsigned int nr_free_zone_pages(int offset)
 
        for_each_zone_zonelist(zone, z, zonelist, offset) {
                unsigned long size = zone->present_pages;
-               unsigned long high = zone->pages_high;
+               unsigned long high = high_wmark_pages(zone);
                if (size > high)
                        sum += size - high;
        }
@@ -1804,10 +2171,7 @@ void show_free_areas(void)
        int cpu;
        struct zone *zone;
 
-       for_each_zone(zone) {
-               if (!populated_zone(zone))
-                       continue;
-
+       for_each_populated_zone(zone) {
                show_node(zone);
                printk("%s per-cpu:\n", zone->name);
 
@@ -1822,46 +2186,88 @@ void show_free_areas(void)
                }
        }
 
-       printk("Active:%lu inactive:%lu dirty:%lu writeback:%lu unstable:%lu\n"
-               " free:%lu slab:%lu mapped:%lu pagetables:%lu bounce:%lu\n",
-               global_page_state(NR_ACTIVE),
-               global_page_state(NR_INACTIVE),
+       printk("active_anon:%lu inactive_anon:%lu isolated_anon:%lu\n"
+               " active_file:%lu inactive_file:%lu isolated_file:%lu\n"
+               " unevictable:%lu"
+               " dirty:%lu writeback:%lu unstable:%lu\n"
+               " free:%lu slab_reclaimable:%lu slab_unreclaimable:%lu\n"
+               " mapped:%lu shmem:%lu pagetables:%lu bounce:%lu\n",
+               global_page_state(NR_ACTIVE_ANON),
+               global_page_state(NR_INACTIVE_ANON),
+               global_page_state(NR_ISOLATED_ANON),
+               global_page_state(NR_ACTIVE_FILE),
+               global_page_state(NR_INACTIVE_FILE),
+               global_page_state(NR_ISOLATED_FILE),
+               global_page_state(NR_UNEVICTABLE),
                global_page_state(NR_FILE_DIRTY),
                global_page_state(NR_WRITEBACK),
                global_page_state(NR_UNSTABLE_NFS),
                global_page_state(NR_FREE_PAGES),
-               global_page_state(NR_SLAB_RECLAIMABLE) +
-                       global_page_state(NR_SLAB_UNRECLAIMABLE),
+               global_page_state(NR_SLAB_RECLAIMABLE),
+               global_page_state(NR_SLAB_UNRECLAIMABLE),
                global_page_state(NR_FILE_MAPPED),
+               global_page_state(NR_SHMEM),
                global_page_state(NR_PAGETABLE),
                global_page_state(NR_BOUNCE));
 
-       for_each_zone(zone) {
+       for_each_populated_zone(zone) {
                int i;
 
-               if (!populated_zone(zone))
-                       continue;
-
                show_node(zone);
                printk("%s"
                        " free:%lukB"
                        " min:%lukB"
                        " low:%lukB"
                        " high:%lukB"
-                       " active:%lukB"
-                       " inactive:%lukB"
+                       " active_anon:%lukB"
+                       " inactive_anon:%lukB"
+                       " active_file:%lukB"
+                       " inactive_file:%lukB"
+                       " unevictable:%lukB"
+                       " isolated(anon):%lukB"
+                       " isolated(file):%lukB"
                        " present:%lukB"
+                       " mlocked:%lukB"
+                       " dirty:%lukB"
+                       " writeback:%lukB"
+                       " mapped:%lukB"
+                       " shmem:%lukB"
+                       " slab_reclaimable:%lukB"
+                       " slab_unreclaimable:%lukB"
+                       " kernel_stack:%lukB"
+                       " pagetables:%lukB"
+                       " unstable:%lukB"
+                       " bounce:%lukB"
+                       " writeback_tmp:%lukB"
                        " pages_scanned:%lu"
                        " all_unreclaimable? %s"
                        "\n",
                        zone->name,
                        K(zone_page_state(zone, NR_FREE_PAGES)),
-                       K(zone->pages_min),
-                       K(zone->pages_low),
-                       K(zone->pages_high),
-                       K(zone_page_state(zone, NR_ACTIVE)),
-                       K(zone_page_state(zone, NR_INACTIVE)),
+                       K(min_wmark_pages(zone)),
+                       K(low_wmark_pages(zone)),
+                       K(high_wmark_pages(zone)),
+                       K(zone_page_state(zone, NR_ACTIVE_ANON)),
+                       K(zone_page_state(zone, NR_INACTIVE_ANON)),
+                       K(zone_page_state(zone, NR_ACTIVE_FILE)),
+                       K(zone_page_state(zone, NR_INACTIVE_FILE)),
+                       K(zone_page_state(zone, NR_UNEVICTABLE)),
+                       K(zone_page_state(zone, NR_ISOLATED_ANON)),
+                       K(zone_page_state(zone, NR_ISOLATED_FILE)),
                        K(zone->present_pages),
+                       K(zone_page_state(zone, NR_MLOCK)),
+                       K(zone_page_state(zone, NR_FILE_DIRTY)),
+                       K(zone_page_state(zone, NR_WRITEBACK)),
+                       K(zone_page_state(zone, NR_FILE_MAPPED)),
+                       K(zone_page_state(zone, NR_SHMEM)),
+                       K(zone_page_state(zone, NR_SLAB_RECLAIMABLE)),
+                       K(zone_page_state(zone, NR_SLAB_UNRECLAIMABLE)),
+                       zone_page_state(zone, NR_KERNEL_STACK) *
+                               THREAD_SIZE / 1024,
+                       K(zone_page_state(zone, NR_PAGETABLE)),
+                       K(zone_page_state(zone, NR_UNSTABLE_NFS)),
+                       K(zone_page_state(zone, NR_BOUNCE)),
+                       K(zone_page_state(zone, NR_WRITEBACK_TEMP)),
                        zone->pages_scanned,
                        (zone_is_all_unreclaimable(zone) ? "yes" : "no")
                        );
@@ -1871,12 +2277,9 @@ void show_free_areas(void)
                printk("\n");
        }
 
-       for_each_zone(zone) {
+       for_each_populated_zone(zone) {
                unsigned long nr[MAX_ORDER], flags, order, total = 0;
 
-               if (!populated_zone(zone))
-                       continue;
-
                show_node(zone);
                printk("%s: ", zone->name);
 
@@ -1993,7 +2396,7 @@ early_param("numa_zonelist_order", setup_numa_zonelist_order);
  * sysctl handler for numa_zonelist_order
  */
 int numa_zonelist_order_handler(ctl_table *table, int write,
-               struct file *file, void __user *buffer, size_t *length,
+               void __user *buffer, size_t *length,
                loff_t *ppos)
 {
        char saved_string[NUMA_ZONELIST_ORDER_LEN];
@@ -2002,7 +2405,7 @@ int numa_zonelist_order_handler(ctl_table *table, int write,
        if (write)
                strncpy(saved_string, (char*)table->data,
                        NUMA_ZONELIST_ORDER_LEN);
-       ret = proc_dostring(table, write, file, buffer, length, ppos);
+       ret = proc_dostring(table, write, buffer, length, ppos);
        if (ret)
                return ret;
        if (write) {
@@ -2021,7 +2424,7 @@ int numa_zonelist_order_handler(ctl_table *table, int write,
 }
 
 
-#define MAX_NODE_LOAD (num_online_nodes())
+#define MAX_NODE_LOAD (nr_online_nodes)
 static int node_load[MAX_NUMNODES];
 
 /**
@@ -2043,7 +2446,7 @@ static int find_next_best_node(int node, nodemask_t *used_node_mask)
        int n, val;
        int min_val = INT_MAX;
        int best_node = -1;
-       node_to_cpumask_ptr(tmp, 0);
+       const struct cpumask *tmp = cpumask_of_node(0);
 
        /* Use the local node if we haven't already */
        if (!node_isset(node, *used_node_mask)) {
@@ -2064,8 +2467,8 @@ static int find_next_best_node(int node, nodemask_t *used_node_mask)
                val += (n < node);
 
                /* Give preference to headless and unused nodes */
-               node_to_cpumask_ptr_next(tmp, n);
-               if (!cpus_empty(*tmp))
+               tmp = cpumask_of_node(n);
+               if (!cpumask_empty(tmp))
                        val += PENALTY_FOR_NODE_WITH_CPUS;
 
                /* Slight preference for less loaded node */
@@ -2230,11 +2633,10 @@ static void build_zonelists(pg_data_t *pgdat)
 
        /* NUMA-aware ordering of nodes */
        local_node = pgdat->node_id;
-       load = num_online_nodes();
+       load = nr_online_nodes;
        prev_node = local_node;
        nodes_clear(used_mask);
 
-       memset(node_load, 0, sizeof(node_load));
        memset(node_order, 0, sizeof(node_order));
        j = 0;
 
@@ -2334,16 +2736,18 @@ static void build_zonelists(pg_data_t *pgdat)
 static void build_zonelist_cache(pg_data_t *pgdat)
 {
        pgdat->node_zonelists[0].zlcache_ptr = NULL;
-       pgdat->node_zonelists[1].zlcache_ptr = NULL;
 }
 
 #endif /* CONFIG_NUMA */
 
-/* return values int ....just for stop_machine_run() */
+/* return values int ....just for stop_machine() */
 static int __build_all_zonelists(void *dummy)
 {
        int nid;
 
+#ifdef CONFIG_NUMA
+       memset(node_load, 0, sizeof(node_load));
+#endif
        for_each_online_node(nid) {
                pg_data_t *pgdat = NODE_DATA(nid);
 
@@ -2359,11 +2763,12 @@ void build_all_zonelists(void)
 
        if (system_state == SYSTEM_BOOTING) {
                __build_all_zonelists(NULL);
+               mminit_verify_zonelist();
                cpuset_init_current_mems_allowed();
        } else {
                /* we have to stop all cpus to guarantee there is no user
                   of zonelist */
-               stop_machine_run(__build_all_zonelists, NULL, NR_CPUS);
+               stop_machine(__build_all_zonelists, NULL, NULL);
                /* cpuset refresh routine should be here */
        }
        vm_total_pages = nr_free_pagecache_pages();
@@ -2381,7 +2786,7 @@ void build_all_zonelists(void)
 
        printk("Built %i zonelists in %s order, mobility grouping %s.  "
                "Total pages: %ld\n",
-                       num_online_nodes(),
+                       nr_online_nodes,
                        zonelist_order_name[current_zonelist_order],
                        page_group_by_mobility_disabled ? "off" : "on",
                        vm_total_pages);
@@ -2460,8 +2865,8 @@ static inline unsigned long wait_table_bits(unsigned long size)
 
 /*
  * Mark a number of pageblocks as MIGRATE_RESERVE. The number
- * of blocks reserved is based on zone->pages_min. The memory within the
- * reserve will tend to store contiguous free pages. Setting min_free_kbytes
+ * of blocks reserved is based on min_wmark_pages(zone). The memory within
+ * the reserve will tend to store contiguous free pages. Setting min_free_kbytes
  * higher will lead to a bigger reserve which will get freed as contiguous
  * blocks as reclaim kicks in
  */
@@ -2469,19 +2874,33 @@ static void setup_zone_migrate_reserve(struct zone *zone)
 {
        unsigned long start_pfn, pfn, end_pfn;
        struct page *page;
-       unsigned long reserve, block_migratetype;
+       unsigned long block_migratetype;
+       int reserve;
 
        /* Get the start pfn, end pfn and the number of blocks to reserve */
        start_pfn = zone->zone_start_pfn;
        end_pfn = start_pfn + zone->spanned_pages;
-       reserve = roundup(zone->pages_min, pageblock_nr_pages) >>
+       reserve = roundup(min_wmark_pages(zone), pageblock_nr_pages) >>
                                                        pageblock_order;
 
+       /*
+        * Reserve blocks are generally in place to help high-order atomic
+        * allocations that are short-lived. A min_free_kbytes value that
+        * would result in more than 2 reserve blocks for atomic allocations
+        * is assumed to be in place to help anti-fragmentation for the
+        * future allocation of hugepages at runtime.
+        */
+       reserve = min(2, reserve);
+
        for (pfn = start_pfn; pfn < end_pfn; pfn += pageblock_nr_pages) {
                if (!pfn_valid(pfn))
                        continue;
                page = pfn_to_page(pfn);
 
+               /* Watch out for overlapping nodes */
+               if (page_to_nid(page) != zone_to_nid(zone))
+                       continue;
+
                /* Blocks with reserved pages will never free, skip them. */
                if (PageReserved(page))
                        continue;
@@ -2524,7 +2943,12 @@ void __meminit memmap_init_zone(unsigned long size, int nid, unsigned long zone,
        struct page *page;
        unsigned long end_pfn = start_pfn + size;
        unsigned long pfn;
+       struct zone *z;
+
+       if (highest_memmap_pfn < end_pfn - 1)
+               highest_memmap_pfn = end_pfn - 1;
 
+       z = &NODE_DATA(nid)->node_zones[zone];
        for (pfn = start_pfn; pfn < end_pfn; pfn++) {
                /*
                 * There can be holes in boot-time mem_map[]s
@@ -2539,10 +2963,10 @@ void __meminit memmap_init_zone(unsigned long size, int nid, unsigned long zone,
                }
                page = pfn_to_page(pfn);
                set_page_links(page, zone, nid, pfn);
+               mminit_verify_page_links(page, zone, nid, pfn);
                init_page_count(page);
                reset_page_mapcount(page);
                SetPageReserved(page);
-
                /*
                 * Mark the block movable so that blocks are reserved for
                 * movable at startup. This will force kernel allocations
@@ -2551,8 +2975,15 @@ void __meminit memmap_init_zone(unsigned long size, int nid, unsigned long zone,
                 * kernel allocations are made. Later some blocks near
                 * the start are marked MIGRATE_RESERVE by
                 * setup_zone_migrate_reserve()
+                *
+                * bitmap is created for zone's valid pfn range. but memmap
+                * can be created for invalid pages (for alignment)
+                * check here not to call set_pageblock_migratetype() against
+                * pfn out of zone.
                 */
-               if ((pfn & (pageblock_nr_pages-1)))
+               if ((z->zone_start_pfn <= pfn)
+                   && (pfn < z->zone_start_pfn + z->spanned_pages)
+                   && !(pfn & (pageblock_nr_pages - 1)))
                        set_pageblock_migratetype(page, MIGRATE_MOVABLE);
 
                INIT_LIST_HEAD(&page->lru);
@@ -2580,6 +3011,7 @@ static void __meminit zone_init_free_lists(struct zone *zone)
 
 static int zone_batchsize(struct zone *zone)
 {
+#ifdef CONFIG_MMU
        int batch;
 
        /*
@@ -2605,14 +3037,32 @@ static int zone_batchsize(struct zone *zone)
         * of pages of one half of the possible page colors
         * and the other with pages of the other colors.
         */
-       batch = (1 << (fls(batch + batch/2)-1)) - 1;
+       batch = rounddown_pow_of_two(batch + batch/2) - 1;
 
        return batch;
+
+#else
+       /* The deferral and batching of frees should be suppressed under NOMMU
+        * conditions.
+        *
+        * The problem is that NOMMU needs to be able to allocate large chunks
+        * of contiguous memory as there's no hardware page translation to
+        * assemble apparent contiguous memory from discontiguous pages.
+        *
+        * Queueing large contiguous runs of pages for batching, however,
+        * causes the pages to actually be freed in smaller chunks.  As there
+        * can be a significant delay between the individual batches being
+        * recycled, this leads to the once large chunks of space being
+        * fragmented and becoming unavailable for high-order allocations.
+        */
+       return 0;
+#endif
 }
 
-inline void setup_pageset(struct per_cpu_pageset *p, unsigned long batch)
+static void setup_pageset(struct per_cpu_pageset *p, unsigned long batch)
 {
        struct per_cpu_pages *pcp;
+       int migratetype;
 
        memset(p, 0, sizeof(*p));
 
@@ -2620,7 +3070,8 @@ inline void setup_pageset(struct per_cpu_pageset *p, unsigned long batch)
        pcp->count = 0;
        pcp->high = 6 * batch;
        pcp->batch = max(1UL, 1 * batch);
-       INIT_LIST_HEAD(&pcp->list);
+       for (migratetype = 0; migratetype < MIGRATE_PCPTYPES; migratetype++)
+               INIT_LIST_HEAD(&pcp->lists[migratetype]);
 }
 
 /*
@@ -2672,11 +3123,7 @@ static int __cpuinit process_zones(int cpu)
 
        node_set_state(node, N_CPU);    /* this node has a cpu */
 
-       for_each_zone(zone) {
-
-               if (!populated_zone(zone))
-                       continue;
-
+       for_each_populated_zone(zone) {
                zone_pcp(zone, cpu) = kmalloc_node(sizeof(struct per_cpu_pageset),
                                         GFP_KERNEL, node);
                if (!zone_pcp(zone, cpu))
@@ -2686,7 +3133,7 @@ static int __cpuinit process_zones(int cpu)
 
                if (percpu_pagelist_fraction)
                        setup_pagelist_highmark(zone_pcp(zone, cpu),
-                               (zone->present_pages / percpu_pagelist_fraction));
+                           (zone->present_pages / percpu_pagelist_fraction));
        }
 
        return 0;
@@ -2697,7 +3144,7 @@ bad:
                if (dzone == zone)
                        break;
                kfree(zone_pcp(dzone, cpu));
-               zone_pcp(dzone, cpu) = NULL;
+               zone_pcp(dzone, cpu) = &boot_pageset[cpu];
        }
        return -ENOMEM;
 }
@@ -2712,7 +3159,7 @@ static inline void free_zone_pagesets(int cpu)
                /* Free per_cpu_pageset if it is slab allocated */
                if (pset != &boot_pageset[cpu])
                        kfree(pset);
-               zone_pcp(zone, cpu) = NULL;
+               zone_pcp(zone, cpu) = &boot_pageset[cpu];
        }
 }
 
@@ -2777,7 +3224,7 @@ int zone_wait_table_init(struct zone *zone, unsigned long zone_size_pages)
        alloc_size = zone->wait_table_hash_nr_entries
                                        * sizeof(wait_queue_head_t);
 
-       if (system_state == SYSTEM_BOOTING) {
+       if (!slab_is_available()) {
                zone->wait_table = (wait_queue_head_t *)
                        alloc_bootmem_node(pgdat, alloc_size);
        } else {
@@ -2802,6 +3249,32 @@ int zone_wait_table_init(struct zone *zone, unsigned long zone_size_pages)
        return 0;
 }
 
+static int __zone_pcp_update(void *data)
+{
+       struct zone *zone = data;
+       int cpu;
+       unsigned long batch = zone_batchsize(zone), flags;
+
+       for (cpu = 0; cpu < NR_CPUS; cpu++) {
+               struct per_cpu_pageset *pset;
+               struct per_cpu_pages *pcp;
+
+               pset = zone_pcp(zone, cpu);
+               pcp = &pset->pcp;
+
+               local_irq_save(flags);
+               free_pcppages_bulk(zone, pcp->count, pcp);
+               setup_pageset(pset, batch);
+               local_irq_restore(flags);
+       }
+       return 0;
+}
+
+void zone_pcp_update(struct zone *zone)
+{
+       stop_machine(__zone_pcp_update, zone, NULL);
+}
+
 static __meminit void zone_pcp_init(struct zone *zone)
 {
        int cpu;
@@ -2835,7 +3308,11 @@ __meminit int init_currently_empty_zone(struct zone *zone,
 
        zone->zone_start_pfn = zone_start_pfn;
 
-       memmap_init(size, pgdat->node_id, zone_idx(zone), zone_start_pfn);
+       mminit_dprintk(MMINIT_TRACE, "memmap_init",
+                       "Initialising map node %d zone %lu pfns %lu -> %lu\n",
+                       pgdat->node_id,
+                       (unsigned long)zone_idx(zone),
+                       zone_start_pfn, (zone_start_pfn + size));
 
        zone_init_free_lists(zone);
 
@@ -2878,7 +3355,7 @@ static int __meminit next_active_region_index_in_nid(int index, int nid)
  * was used and there are no special requirements, this is a convenient
  * alternative
  */
-int __meminit early_pfn_to_nid(unsigned long pfn)
+int __meminit __early_pfn_to_nid(unsigned long pfn)
 {
        int i;
 
@@ -2889,10 +3366,33 @@ int __meminit early_pfn_to_nid(unsigned long pfn)
                if (start_pfn <= pfn && pfn < end_pfn)
                        return early_node_map[i].nid;
        }
+       /* This is a memory hole */
+       return -1;
+}
+#endif /* CONFIG_HAVE_ARCH_EARLY_PFN_TO_NID */
+
+int __meminit early_pfn_to_nid(unsigned long pfn)
+{
+       int nid;
 
+       nid = __early_pfn_to_nid(pfn);
+       if (nid >= 0)
+               return nid;
+       /* just returns 0 */
        return 0;
 }
-#endif /* CONFIG_HAVE_ARCH_EARLY_PFN_TO_NID */
+
+#ifdef CONFIG_NODES_SPAN_OTHER_NODES
+bool __meminit early_pfn_in_nid(unsigned long pfn, int node)
+{
+       int nid;
+
+       nid = __early_pfn_to_nid(pfn);
+       if (nid >= 0 && nid != node)
+               return false;
+       return true;
+}
+#endif
 
 /* Basic iterator support to walk early_node_map[] */
 #define for_each_active_range_index_in_nid(i, nid) \
@@ -2930,6 +3430,18 @@ void __init free_bootmem_with_active_regions(int nid,
        }
 }
 
+void __init work_with_active_regions(int nid, work_fn_t work_fn, void *data)
+{
+       int i;
+       int ret;
+
+       for_each_active_range_index_in_nid(i, nid) {
+               ret = work_fn(early_node_map[i].start_pfn,
+                             early_node_map[i].end_pfn, data);
+               if (ret)
+                       break;
+       }
+}
 /**
  * sparse_memory_present_with_active_regions - Call memory_present for each active range
  * @nid: The node to call memory_present for. If MAX_NUMNODES, all nodes will be used.
@@ -2949,62 +3461,6 @@ void __init sparse_memory_present_with_active_regions(int nid)
 }
 
 /**
- * push_node_boundaries - Push node boundaries to at least the requested boundary
- * @nid: The nid of the node to push the boundary for
- * @start_pfn: The start pfn of the node
- * @end_pfn: The end pfn of the node
- *
- * In reserve-based hot-add, mem_map is allocated that is unused until hotadd
- * time. Specifically, on x86_64, SRAT will report ranges that can potentially
- * be hotplugged even though no physical memory exists. This function allows
- * an arch to push out the node boundaries so mem_map is allocated that can
- * be used later.
- */
-#ifdef CONFIG_MEMORY_HOTPLUG_RESERVE
-void __init push_node_boundaries(unsigned int nid,
-               unsigned long start_pfn, unsigned long end_pfn)
-{
-       printk(KERN_DEBUG "Entering push_node_boundaries(%u, %lu, %lu)\n",
-                       nid, start_pfn, end_pfn);
-
-       /* Initialise the boundary for this node if necessary */
-       if (node_boundary_end_pfn[nid] == 0)
-               node_boundary_start_pfn[nid] = -1UL;
-
-       /* Update the boundaries */
-       if (node_boundary_start_pfn[nid] > start_pfn)
-               node_boundary_start_pfn[nid] = start_pfn;
-       if (node_boundary_end_pfn[nid] < end_pfn)
-               node_boundary_end_pfn[nid] = end_pfn;
-}
-
-/* If necessary, push the node boundary out for reserve hotadd */
-static void __meminit account_node_boundary(unsigned int nid,
-               unsigned long *start_pfn, unsigned long *end_pfn)
-{
-       printk(KERN_DEBUG "Entering account_node_boundary(%u, %lu, %lu)\n",
-                       nid, *start_pfn, *end_pfn);
-
-       /* Return if boundary information has not been provided */
-       if (node_boundary_end_pfn[nid] == 0)
-               return;
-
-       /* Check the boundaries and update if necessary */
-       if (node_boundary_start_pfn[nid] < *start_pfn)
-               *start_pfn = node_boundary_start_pfn[nid];
-       if (node_boundary_end_pfn[nid] > *end_pfn)
-               *end_pfn = node_boundary_end_pfn[nid];
-}
-#else
-void __init push_node_boundaries(unsigned int nid,
-               unsigned long start_pfn, unsigned long end_pfn) {}
-
-static void __meminit account_node_boundary(unsigned int nid,
-               unsigned long *start_pfn, unsigned long *end_pfn) {}
-#endif
-
-
-/**
  * get_pfn_range_for_nid - Return the start and end page frames for a node
  * @nid: The nid to return the range for. If MAX_NUMNODES, the min and max PFN are returned.
  * @start_pfn: Passed by reference. On return, it will have the node start_pfn.
@@ -3029,9 +3485,6 @@ void __meminit get_pfn_range_for_nid(unsigned int nid,
 
        if (*start_pfn == -1UL)
                *start_pfn = 0;
-
-       /* Push the node boundaries out if requested */
-       account_node_boundary(nid, start_pfn, end_pfn);
 }
 
 /*
@@ -3039,7 +3492,7 @@ void __meminit get_pfn_range_for_nid(unsigned int nid,
  * assumption is made that zones within a node are ordered in monotonic
  * increasing memory addresses so that the "highest" populated zone is used
  */
-void __init find_usable_zone_for_movable(void)
+static void __init find_usable_zone_for_movable(void)
 {
        int zone_index;
        for (zone_index = MAX_NR_ZONES - 1; zone_index >= 0; zone_index--) {
@@ -3065,7 +3518,7 @@ void __init find_usable_zone_for_movable(void)
  * highest usable zone for ZONE_MOVABLE. This preserves the assumption that
  * zones within a node are in order of monotonic increases memory addresses
  */
-void __meminit adjust_zone_range_for_zone_movable(int nid,
+static void __meminit adjust_zone_range_for_zone_movable(int nid,
                                        unsigned long zone_type,
                                        unsigned long node_start_pfn,
                                        unsigned long node_end_pfn,
@@ -3126,7 +3579,7 @@ static unsigned long __meminit zone_spanned_pages_in_node(int nid,
  * Return the number of holes in a range on a node. If nid is MAX_NUMNODES,
  * then all holes in the requested range will be accounted for.
  */
-unsigned long __meminit __absent_pages_in_range(int nid,
+static unsigned long __meminit __absent_pages_in_range(int nid,
                                unsigned long range_start_pfn,
                                unsigned long range_end_pfn)
 {
@@ -3271,10 +3724,8 @@ static void __init setup_usemap(struct pglist_data *pgdat,
 {
        unsigned long usemapsize = usemap_size(zonesize);
        zone->pageblock_flags = NULL;
-       if (usemapsize) {
+       if (usemapsize)
                zone->pageblock_flags = alloc_bootmem_node(pgdat, usemapsize);
-               memset(zone->pageblock_flags, 0, usemapsize);
-       }
 }
 #else
 static void inline setup_usemap(struct pglist_data *pgdat,
@@ -3339,10 +3790,12 @@ static void __paginginit free_area_init_core(struct pglist_data *pgdat,
        pgdat->nr_zones = 0;
        init_waitqueue_head(&pgdat->kswapd_wait);
        pgdat->kswapd_max_order = 0;
+       pgdat_page_cgroup_init(pgdat);
        
        for (j = 0; j < MAX_NR_ZONES; j++) {
                struct zone *zone = pgdat->node_zones + j;
                unsigned long size, realsize, memmap_pages;
+               enum lru_list l;
 
                size = zone_spanned_pages_in_node(nid, j, zones_size);
                realsize = size - zone_absent_pages_in_node(nid, j,
@@ -3353,12 +3806,14 @@ static void __paginginit free_area_init_core(struct pglist_data *pgdat,
                 * is used by this zone for memmap. This affects the watermark
                 * and per-cpu initialisations
                 */
-               memmap_pages = (size * sizeof(struct page)) >> PAGE_SHIFT;
+               memmap_pages =
+                       PAGE_ALIGN(size * sizeof(struct page)) >> PAGE_SHIFT;
                if (realsize >= memmap_pages) {
                        realsize -= memmap_pages;
-                       printk(KERN_DEBUG
-                               "  %s zone: %lu pages used for memmap\n",
-                               zone_names[j], memmap_pages);
+                       if (memmap_pages)
+                               printk(KERN_DEBUG
+                                      "  %s zone: %lu pages used for memmap\n",
+                                      zone_names[j], memmap_pages);
                } else
                        printk(KERN_WARNING
                                "  %s zone: %lu pages exceeds realsize %lu\n",
@@ -3392,10 +3847,14 @@ static void __paginginit free_area_init_core(struct pglist_data *pgdat,
                zone->prev_priority = DEF_PRIORITY;
 
                zone_pcp_init(zone);
-               INIT_LIST_HEAD(&zone->active_list);
-               INIT_LIST_HEAD(&zone->inactive_list);
-               zone->nr_scan_active = 0;
-               zone->nr_scan_inactive = 0;
+               for_each_lru(l) {
+                       INIT_LIST_HEAD(&zone->lru[l].list);
+                       zone->reclaim_stat.nr_saved_scan[l] = 0;
+               }
+               zone->reclaim_stat.recent_rotated[0] = 0;
+               zone->reclaim_stat.recent_rotated[1] = 0;
+               zone->reclaim_stat.recent_scanned[0] = 0;
+               zone->reclaim_stat.recent_scanned[1] = 0;
                zap_zone_vm_stats(zone);
                zone->flags = 0;
                if (!size)
@@ -3406,6 +3865,7 @@ static void __paginginit free_area_init_core(struct pglist_data *pgdat,
                ret = init_currently_empty_zone(zone, zone_start_pfn,
                                                size, MEMMAP_EARLY);
                BUG_ON(ret);
+               memmap_init(size, nid, j, zone_start_pfn);
                zone_start_pfn += size;
        }
 }
@@ -3451,15 +3911,21 @@ static void __init_refok alloc_node_mem_map(struct pglist_data *pgdat)
 #endif /* CONFIG_FLAT_NODE_MEM_MAP */
 }
 
-void __paginginit free_area_init_node(int nid, struct pglist_data *pgdat,
-               unsigned long *zones_size, unsigned long node_start_pfn,
-               unsigned long *zholes_size)
+void __paginginit free_area_init_node(int nid, unsigned long *zones_size,
+               unsigned long node_start_pfn, unsigned long *zholes_size)
 {
+       pg_data_t *pgdat = NODE_DATA(nid);
+
        pgdat->node_id = nid;
        pgdat->node_start_pfn = node_start_pfn;
        calculate_node_totalpages(pgdat, zones_size, zholes_size);
 
        alloc_node_mem_map(pgdat);
+#ifdef CONFIG_FLAT_NODE_MEM_MAP
+       printk(KERN_DEBUG "free_area_init_node: node %d, pgdat %08lx, node_mem_map %08lx\n",
+               nid, (unsigned long)pgdat,
+               (unsigned long)pgdat->node_mem_map);
+#endif
 
        free_area_init_core(pgdat, zones_size, zholes_size);
 }
@@ -3502,10 +3968,13 @@ void __init add_active_range(unsigned int nid, unsigned long start_pfn,
 {
        int i;
 
-       printk(KERN_DEBUG "Entering add_active_range(%d, %lu, %lu) "
-                         "%d entries of %d used\n",
-                         nid, start_pfn, end_pfn,
-                         nr_nodemap_entries, MAX_ACTIVE_REGIONS);
+       mminit_dprintk(MMINIT_TRACE, "memory_register",
+                       "Entering add_active_range(%d, %#lx, %#lx) "
+                       "%d entries of %d used\n",
+                       nid, start_pfn, end_pfn,
+                       nr_nodemap_entries, MAX_ACTIVE_REGIONS);
+
+       mminit_validate_memmodel_limits(&start_pfn, &end_pfn);
 
        /* Merge with existing active regions if possible */
        for (i = 0; i < nr_nodemap_entries; i++) {
@@ -3546,27 +4015,68 @@ void __init add_active_range(unsigned int nid, unsigned long start_pfn,
 }
 
 /**
- * shrink_active_range - Shrink an existing registered range of PFNs
+ * remove_active_range - Shrink an existing registered range of PFNs
  * @nid: The node id the range is on that should be shrunk
- * @old_end_pfn: The old end PFN of the range
- * @new_end_pfn: The new PFN of the range
+ * @start_pfn: The new PFN of the range
+ * @end_pfn: The new PFN of the range
  *
  * i386 with NUMA use alloc_remap() to store a node_mem_map on a local node.
- * The map is kept at the end physical page range that has already been
- * registered with add_active_range(). This function allows an arch to shrink
- * an existing registered range.
+ * The map is kept near the end physical page range that has already been
+ * registered. This function allows an arch to shrink an existing registered
+ * range.
  */
-void __init shrink_active_range(unsigned int nid, unsigned long old_end_pfn,
-                                               unsigned long new_end_pfn)
+void __init remove_active_range(unsigned int nid, unsigned long start_pfn,
+                               unsigned long end_pfn)
 {
-       int i;
+       int i, j;
+       int removed = 0;
+
+       printk(KERN_DEBUG "remove_active_range (%d, %lu, %lu)\n",
+                         nid, start_pfn, end_pfn);
 
        /* Find the old active region end and shrink */
-       for_each_active_range_index_in_nid(i, nid)
-               if (early_node_map[i].end_pfn == old_end_pfn) {
-                       early_node_map[i].end_pfn = new_end_pfn;
-                       break;
+       for_each_active_range_index_in_nid(i, nid) {
+               if (early_node_map[i].start_pfn >= start_pfn &&
+                   early_node_map[i].end_pfn <= end_pfn) {
+                       /* clear it */
+                       early_node_map[i].start_pfn = 0;
+                       early_node_map[i].end_pfn = 0;
+                       removed = 1;
+                       continue;
+               }
+               if (early_node_map[i].start_pfn < start_pfn &&
+                   early_node_map[i].end_pfn > start_pfn) {
+                       unsigned long temp_end_pfn = early_node_map[i].end_pfn;
+                       early_node_map[i].end_pfn = start_pfn;
+                       if (temp_end_pfn > end_pfn)
+                               add_active_range(nid, end_pfn, temp_end_pfn);
+                       continue;
                }
+               if (early_node_map[i].start_pfn >= start_pfn &&
+                   early_node_map[i].end_pfn > end_pfn &&
+                   early_node_map[i].start_pfn < end_pfn) {
+                       early_node_map[i].start_pfn = end_pfn;
+                       continue;
+               }
+       }
+
+       if (!removed)
+               return;
+
+       /* remove the blank ones */
+       for (i = nr_nodemap_entries - 1; i > 0; i--) {
+               if (early_node_map[i].nid != nid)
+                       continue;
+               if (early_node_map[i].end_pfn)
+                       continue;
+               /* we found it, get rid of it */
+               for (j = i; j < nr_nodemap_entries - 1; j++)
+                       memcpy(&early_node_map[j], &early_node_map[j+1],
+                               sizeof(early_node_map[j]));
+               j = nr_nodemap_entries - 1;
+               memset(&early_node_map[j], 0, sizeof(early_node_map[j]));
+               nr_nodemap_entries--;
+       }
 }
 
 /**
@@ -3580,10 +4090,6 @@ void __init remove_all_active_ranges(void)
 {
        memset(early_node_map, 0, sizeof(early_node_map));
        nr_nodemap_entries = 0;
-#ifdef CONFIG_MEMORY_HOTPLUG_RESERVE
-       memset(node_boundary_start_pfn, 0, sizeof(node_boundary_start_pfn));
-       memset(node_boundary_end_pfn, 0, sizeof(node_boundary_end_pfn));
-#endif /* CONFIG_MEMORY_HOTPLUG_RESERVE */
 }
 
 /* Compare two active node_active_regions */
@@ -3610,7 +4116,7 @@ static void __init sort_node_map(void)
 }
 
 /* Find the lowest pfn for a node */
-unsigned long __init find_min_pfn_for_node(unsigned long nid)
+static unsigned long __init find_min_pfn_for_node(int nid)
 {
        int i;
        unsigned long min_pfn = ULONG_MAX;
@@ -3621,7 +4127,7 @@ unsigned long __init find_min_pfn_for_node(unsigned long nid)
 
        if (min_pfn == ULONG_MAX) {
                printk(KERN_WARNING
-                       "Could not find start_pfn for node %lu\n", nid);
+                       "Could not find start_pfn for node %d\n", nid);
                return 0;
        }
 
@@ -3639,23 +4145,6 @@ unsigned long __init find_min_pfn_with_active_regions(void)
        return find_min_pfn_for_node(MAX_NUMNODES);
 }
 
-/**
- * find_max_pfn_with_active_regions - Find the maximum PFN registered
- *
- * It returns the maximum PFN based on information provided via
- * add_active_range().
- */
-unsigned long __init find_max_pfn_with_active_regions(void)
-{
-       int i;
-       unsigned long max_pfn = 0;
-
-       for (i = 0; i < nr_nodemap_entries; i++)
-               max_pfn = max(max_pfn, early_node_map[i].end_pfn);
-
-       return max_pfn;
-}
-
 /*
  * early_calculate_totalpages()
  * Sum pages in active regions for movable zone.
@@ -3682,11 +4171,13 @@ static unsigned long __init early_calculate_totalpages(void)
  * memory. When they don't, some nodes will have more kernelcore than
  * others
  */
-void __init find_zone_movable_pfns_for_nodes(unsigned long *movable_pfn)
+static void __init find_zone_movable_pfns_for_nodes(unsigned long *movable_pfn)
 {
        int i, nid;
        unsigned long usable_startpfn;
        unsigned long kernelcore_node, kernelcore_remaining;
+       /* save the state before borrow the nodemask */
+       nodemask_t saved_node_state = node_states[N_HIGH_MEMORY];
        unsigned long totalpages = early_calculate_totalpages();
        int usable_nodes = nodes_weight(node_states[N_HIGH_MEMORY]);
 
@@ -3714,7 +4205,7 @@ void __init find_zone_movable_pfns_for_nodes(unsigned long *movable_pfn)
 
        /* If kernelcore was not specified, there is no ZONE_MOVABLE */
        if (!required_kernelcore)
-               return;
+               goto out;
 
        /* usable_startpfn is the lowest possible pfn ZONE_MOVABLE can be at */
        find_usable_zone_for_movable();
@@ -3813,6 +4304,10 @@ restart:
        for (nid = 0; nid < MAX_NUMNODES; nid++)
                zone_movable_pfn[nid] =
                        roundup(zone_movable_pfn[nid], MAX_ORDER_NR_PAGES);
+
+out:
+       /* restore the node_state */
+       node_states[N_HIGH_MEMORY] = saved_node_state;
 }
 
 /* Any regular memory on that node ? */
@@ -3845,7 +4340,7 @@ static void check_for_regular_memory(pg_data_t *pgdat)
 void __init free_area_init_nodes(unsigned long *max_zone_pfn)
 {
        unsigned long nid;
-       enum zone_type i;
+       int i;
 
        /* Sort early_node_map as initialisation assumes it is sorted */
        sort_node_map();
@@ -3877,7 +4372,7 @@ void __init free_area_init_nodes(unsigned long *max_zone_pfn)
        for (i = 0; i < MAX_NR_ZONES; i++) {
                if (i == ZONE_MOVABLE)
                        continue;
-               printk("  %-8s %8lu -> %8lu\n",
+               printk("  %-8s %0#10lx -> %0#10lx\n",
                                zone_names[i],
                                arch_zone_lowest_possible_pfn[i],
                                arch_zone_highest_possible_pfn[i]);
@@ -3893,15 +4388,16 @@ void __init free_area_init_nodes(unsigned long *max_zone_pfn)
        /* Print out the early_node_map[] */
        printk("early_node_map[%d] active PFN ranges\n", nr_nodemap_entries);
        for (i = 0; i < nr_nodemap_entries; i++)
-               printk("  %3d: %8lu -> %8lu\n", early_node_map[i].nid,
+               printk("  %3d: %0#10lx -> %0#10lx\n", early_node_map[i].nid,
                                                early_node_map[i].start_pfn,
                                                early_node_map[i].end_pfn);
 
        /* Initialise every node */
+       mminit_verify_pageflags_layout();
        setup_nr_node_ids();
        for_each_online_node(nid) {
                pg_data_t *pgdat = NODE_DATA(nid);
-               free_area_init_node(nid, pgdat, NULL,
+               free_area_init_node(nid, NULL,
                                find_min_pfn_for_node(nid), NULL);
 
                /* Any memory on that node */
@@ -3966,15 +4462,13 @@ void __init set_dma_reserve(unsigned long new_dma_reserve)
 }
 
 #ifndef CONFIG_NEED_MULTIPLE_NODES
-static bootmem_data_t contig_bootmem_data;
-struct pglist_data contig_page_data = { .bdata = &contig_bootmem_data };
-
+struct pglist_data __refdata contig_page_data = { .bdata = &bootmem_node_data[0] };
 EXPORT_SYMBOL(contig_page_data);
 #endif
 
 void __init free_area_init(unsigned long *zones_size)
 {
-       free_area_init_node(0, NODE_DATA(0), zones_size,
+       free_area_init_node(0, zones_size,
                        __pa(PAGE_OFFSET) >> PAGE_SHIFT, NULL);
 }
 
@@ -4032,8 +4526,8 @@ static void calculate_totalreserve_pages(void)
                                        max = zone->lowmem_reserve[j];
                        }
 
-                       /* we treat pages_high as reserved pages. */
-                       max += zone->pages_high;
+                       /* we treat the high watermark as reserved pages. */
+                       max += high_wmark_pages(zone);
 
                        if (max > zone->present_pages)
                                max = zone->present_pages;
@@ -4083,12 +4577,13 @@ static void setup_per_zone_lowmem_reserve(void)
 }
 
 /**
- * setup_per_zone_pages_min - called when min_free_kbytes changes.
+ * setup_per_zone_wmarks - called when min_free_kbytes changes
+ * or when memory is hot-{added|removed}
  *
- * Ensures that the pages_{min,low,high} values for each zone are set correctly
- * with respect to min_free_kbytes.
+ * Ensures that the watermark[min,low,high] values for each zone are set
+ * correctly with respect to min_free_kbytes.
  */
-void setup_per_zone_pages_min(void)
+void setup_per_zone_wmarks(void)
 {
        unsigned long pages_min = min_free_kbytes >> (PAGE_SHIFT - 10);
        unsigned long lowmem_pages = 0;
@@ -4104,7 +4599,7 @@ void setup_per_zone_pages_min(void)
        for_each_zone(zone) {
                u64 tmp;
 
-               spin_lock_irqsave(&zone->lru_lock, flags);
+               spin_lock_irqsave(&zone->lock, flags);
                tmp = (u64)pages_min * zone->present_pages;
                do_div(tmp, lowmem_pages);
                if (is_highmem(zone)) {
@@ -4113,7 +4608,7 @@ void setup_per_zone_pages_min(void)
                         * need highmem pages, so cap pages_min to a small
                         * value here.
                         *
-                        * The (pages_high-pages_low) and (pages_low-pages_min)
+                        * The WMARK_HIGH-WMARK_LOW and (WMARK_LOW-WMARK_MIN)
                         * deltas controls asynch page reclaim, and so should
                         * not be capped for highmem.
                         */
@@ -4124,19 +4619,19 @@ void setup_per_zone_pages_min(void)
                                min_pages = SWAP_CLUSTER_MAX;
                        if (min_pages > 128)
                                min_pages = 128;
-                       zone->pages_min = min_pages;
+                       zone->watermark[WMARK_MIN] = min_pages;
                } else {
                        /*
                         * If it's a lowmem zone, reserve a number of pages
                         * proportionate to the zone's size.
                         */
-                       zone->pages_min = tmp;
+                       zone->watermark[WMARK_MIN] = tmp;
                }
 
-               zone->pages_low   = zone->pages_min + (tmp >> 2);
-               zone->pages_high  = zone->pages_min + (tmp >> 1);
+               zone->watermark[WMARK_LOW]  = min_wmark_pages(zone) + (tmp >> 2);
+               zone->watermark[WMARK_HIGH] = min_wmark_pages(zone) + (tmp >> 1);
                setup_zone_migrate_reserve(zone);
-               spin_unlock_irqrestore(&zone->lru_lock, flags);
+               spin_unlock_irqrestore(&zone->lock, flags);
        }
 
        /* update totalreserve_pages */
@@ -4144,6 +4639,49 @@ void setup_per_zone_pages_min(void)
 }
 
 /*
+ * The inactive anon list should be small enough that the VM never has to
+ * do too much work, but large enough that each inactive page has a chance
+ * to be referenced again before it is swapped out.
+ *
+ * The inactive_anon ratio is the target ratio of ACTIVE_ANON to
+ * INACTIVE_ANON pages on this zone's LRU, maintained by the
+ * pageout code. A zone->inactive_ratio of 3 means 3:1 or 25% of
+ * the anonymous pages are kept on the inactive list.
+ *
+ * total     target    max
+ * memory    ratio     inactive anon
+ * -------------------------------------
+ *   10MB       1         5MB
+ *  100MB       1        50MB
+ *    1GB       3       250MB
+ *   10GB      10       0.9GB
+ *  100GB      31         3GB
+ *    1TB     101        10GB
+ *   10TB     320        32GB
+ */
+void calculate_zone_inactive_ratio(struct zone *zone)
+{
+       unsigned int gb, ratio;
+
+       /* Zone size in gigabytes */
+       gb = zone->present_pages >> (30 - PAGE_SHIFT);
+       if (gb)
+               ratio = int_sqrt(10 * gb);
+       else
+               ratio = 1;
+
+       zone->inactive_ratio = ratio;
+}
+
+static void __init setup_per_zone_inactive_ratio(void)
+{
+       struct zone *zone;
+
+       for_each_zone(zone)
+               calculate_zone_inactive_ratio(zone);
+}
+
+/*
  * Initialise min_free_kbytes.
  *
  * For small machines we want it small (128k min).  For large machines
@@ -4167,7 +4705,7 @@ void setup_per_zone_pages_min(void)
  * 8192MB:     11584k
  * 16384MB:    16384k
  */
-static int __init init_per_zone_pages_min(void)
+static int __init init_per_zone_wmark_min(void)
 {
        unsigned long lowmem_kbytes;
 
@@ -4178,11 +4716,12 @@ static int __init init_per_zone_pages_min(void)
                min_free_kbytes = 128;
        if (min_free_kbytes > 65536)
                min_free_kbytes = 65536;
-       setup_per_zone_pages_min();
+       setup_per_zone_wmarks();
        setup_per_zone_lowmem_reserve();
+       setup_per_zone_inactive_ratio();
        return 0;
 }
-module_init(init_per_zone_pages_min)
+module_init(init_per_zone_wmark_min)
 
 /*
  * min_free_kbytes_sysctl_handler - just a wrapper around proc_dointvec() so 
@@ -4190,22 +4729,22 @@ module_init(init_per_zone_pages_min)
  *     changes.
  */
 int min_free_kbytes_sysctl_handler(ctl_table *table, int write, 
-       struct file *file, void __user *buffer, size_t *length, loff_t *ppos)
+       void __user *buffer, size_t *length, loff_t *ppos)
 {
-       proc_dointvec(table, write, file, buffer, length, ppos);
+       proc_dointvec(table, write, buffer, length, ppos);
        if (write)
-               setup_per_zone_pages_min();
+               setup_per_zone_wmarks();
        return 0;
 }
 
 #ifdef CONFIG_NUMA
 int sysctl_min_unmapped_ratio_sysctl_handler(ctl_table *table, int write,
-       struct file *file, void __user *buffer, size_t *length, loff_t *ppos)
+       void __user *buffer, size_t *length, loff_t *ppos)
 {
        struct zone *zone;
        int rc;
 
-       rc = proc_dointvec_minmax(table, write, file, buffer, length, ppos);
+       rc = proc_dointvec_minmax(table, write, buffer, length, ppos);
        if (rc)
                return rc;
 
@@ -4216,12 +4755,12 @@ int sysctl_min_unmapped_ratio_sysctl_handler(ctl_table *table, int write,
 }
 
 int sysctl_min_slab_ratio_sysctl_handler(ctl_table *table, int write,
-       struct file *file, void __user *buffer, size_t *length, loff_t *ppos)
+       void __user *buffer, size_t *length, loff_t *ppos)
 {
        struct zone *zone;
        int rc;
 
-       rc = proc_dointvec_minmax(table, write, file, buffer, length, ppos);
+       rc = proc_dointvec_minmax(table, write, buffer, length, ppos);
        if (rc)
                return rc;
 
@@ -4238,13 +4777,13 @@ int sysctl_min_slab_ratio_sysctl_handler(ctl_table *table, int write,
  *     whenever sysctl_lowmem_reserve_ratio changes.
  *
  * The reserve ratio obviously has absolutely no relation with the
- * pages_min watermarks. The lowmem reserve ratio can only make sense
+ * minimum watermarks. The lowmem reserve ratio can only make sense
  * if in function of the boot time zone sizes.
  */
 int lowmem_reserve_ratio_sysctl_handler(ctl_table *table, int write,
-       struct file *file, void __user *buffer, size_t *length, loff_t *ppos)
+       void __user *buffer, size_t *length, loff_t *ppos)
 {
-       proc_dointvec_minmax(table, write, file, buffer, length, ppos);
+       proc_dointvec_minmax(table, write, buffer, length, ppos);
        setup_per_zone_lowmem_reserve();
        return 0;
 }
@@ -4256,16 +4795,16 @@ int lowmem_reserve_ratio_sysctl_handler(ctl_table *table, int write,
  */
 
 int percpu_pagelist_fraction_sysctl_handler(ctl_table *table, int write,
-       struct file *file, void __user *buffer, size_t *length, loff_t *ppos)
+       void __user *buffer, size_t *length, loff_t *ppos)
 {
        struct zone *zone;
        unsigned int cpu;
        int ret;
 
-       ret = proc_dointvec_minmax(table, write, file, buffer, length, ppos);
+       ret = proc_dointvec_minmax(table, write, buffer, length, ppos);
        if (!write || (ret == -EINVAL))
                return ret;
-       for_each_zone(zone) {
+       for_each_populated_zone(zone) {
                for_each_online_cpu(cpu) {
                        unsigned long  high;
                        high = zone->present_pages / percpu_pagelist_fraction;
@@ -4322,7 +4861,14 @@ void *__init alloc_large_system_hash(const char *tablename,
                        numentries <<= (PAGE_SHIFT - scale);
 
                /* Make sure we've got at least a 0-order allocation.. */
-               if (unlikely((numentries * bucketsize) < PAGE_SIZE))
+               if (unlikely(flags & HASH_SMALL)) {
+                       /* Makes no sense without HASH_EARLY */
+                       WARN_ON(!(flags & HASH_EARLY));
+                       if (!(numentries >> *_hash_shift)) {
+                               numentries = 1UL << *_hash_shift;
+                               BUG_ON(!numentries);
+                       }
+               } else if (unlikely((numentries * bucketsize) < PAGE_SIZE))
                        numentries = PAGE_SIZE / bucketsize;
        }
        numentries = roundup_pow_of_two(numentries);
@@ -4341,28 +4887,18 @@ void *__init alloc_large_system_hash(const char *tablename,
        do {
                size = bucketsize << log2qty;
                if (flags & HASH_EARLY)
-                       table = alloc_bootmem(size);
+                       table = alloc_bootmem_nopanic(size);
                else if (hashdist)
                        table = __vmalloc(size, GFP_ATOMIC, PAGE_KERNEL);
                else {
-                       unsigned long order;
-                       for (order = 0; ((1UL << order) << PAGE_SHIFT) < size; order++)
-                               ;
-                       table = (void*) __get_free_pages(GFP_ATOMIC, order);
                        /*
                         * If bucketsize is not a power-of-two, we may free
-                        * some pages at the end of hash table.
+                        * some pages at the end of hash table which
+                        * alloc_pages_exact() automatically does
                         */
-                       if (table) {
-                               unsigned long alloc_end = (unsigned long)table +
-                                               (PAGE_SIZE << order);
-                               unsigned long used = (unsigned long)table +
-                                               PAGE_ALIGN(size);
-                               split_page(virt_to_page(table), order);
-                               while (used < alloc_end) {
-                                       free_page(used);
-                                       used += PAGE_SIZE;
-                               }
+                       if (get_order(size) < MAX_ORDER) {
+                               table = alloc_pages_exact(size, GFP_ATOMIC);
+                               kmemleak_alloc(table, size, 1, GFP_ATOMIC);
                        }
                }
        } while (!table && size > PAGE_SIZE && --log2qty);
@@ -4384,19 +4920,6 @@ void *__init alloc_large_system_hash(const char *tablename,
        return table;
 }
 
-#ifdef CONFIG_OUT_OF_LINE_PFN_TO_PAGE
-struct page *pfn_to_page(unsigned long pfn)
-{
-       return __pfn_to_page(pfn);
-}
-unsigned long page_to_pfn(struct page *page)
-{
-       return __page_to_pfn(page);
-}
-EXPORT_SYMBOL(pfn_to_page);
-EXPORT_SYMBOL(page_to_pfn);
-#endif /* CONFIG_OUT_OF_LINE_PFN_TO_PAGE */
-
 /* Return a pointer to the bitmap storing bits affecting a block of pages */
 static inline unsigned long *get_pageblock_bitmap(struct zone *zone,
                                                        unsigned long pfn)
@@ -4466,6 +4989,8 @@ void set_pageblock_flags_group(struct page *page, unsigned long flags,
        pfn = page_to_pfn(page);
        bitmap = get_pageblock_bitmap(zone, pfn);
        bitidx = pfn_to_bitidx(zone, pfn);
+       VM_BUG_ON(pfn < zone->zone_start_pfn);
+       VM_BUG_ON(pfn >= zone->zone_start_pfn + zone->spanned_pages);
 
        for (; start_bitidx <= end_bitidx; start_bitidx++, value <<= 1)
                if (flags & value)
@@ -4485,13 +5010,16 @@ int set_migratetype_isolate(struct page *page)
        struct zone *zone;
        unsigned long flags;
        int ret = -EBUSY;
+       int zone_idx;
 
        zone = page_zone(page);
+       zone_idx = zone_idx(zone);
        spin_lock_irqsave(&zone->lock, flags);
        /*
         * In future, more migrate types will be able to be isolation target.
         */
-       if (get_pageblock_migratetype(page) != MIGRATE_MOVABLE)
+       if (get_pageblock_migratetype(page) != MIGRATE_MOVABLE &&
+           zone_idx != ZONE_MOVABLE)
                goto out;
        set_pageblock_migratetype(page, MIGRATE_ISOLATE);
        move_freepages_block(zone, page, MIGRATE_ISOLATE);
@@ -4563,3 +5091,24 @@ __offline_isolated_pages(unsigned long start_pfn, unsigned long end_pfn)
        spin_unlock_irqrestore(&zone->lock, flags);
 }
 #endif
+
+#ifdef CONFIG_MEMORY_FAILURE
+bool is_free_buddy_page(struct page *page)
+{
+       struct zone *zone = page_zone(page);
+       unsigned long pfn = page_to_pfn(page);
+       unsigned long flags;
+       int order;
+
+       spin_lock_irqsave(&zone->lock, flags);
+       for (order = 0; order < MAX_ORDER; order++) {
+               struct page *page_head = page - (pfn & ((1 << order) - 1));
+
+               if (PageBuddy(page_head) && page_order(page_head) >= order)
+                       break;
+       }
+       spin_unlock_irqrestore(&zone->lock, flags);
+
+       return order < MAX_ORDER;
+}
+#endif