Merge branch 'akpm'
[safe/jmp/linux-2.6] / mm / page_alloc.c
index b098596..a5f3c27 100644 (file)
@@ -23,6 +23,7 @@
 #include <linux/bootmem.h>
 #include <linux/compiler.h>
 #include <linux/kernel.h>
+#include <linux/kmemcheck.h>
 #include <linux/module.h>
 #include <linux/suspend.h>
 #include <linux/pagevec.h>
@@ -161,7 +162,9 @@ static unsigned long __meminitdata dma_reserve;
 
 #if MAX_NUMNODES > 1
 int nr_node_ids __read_mostly = MAX_NUMNODES;
+int nr_online_nodes __read_mostly = 1;
 EXPORT_SYMBOL(nr_node_ids);
+EXPORT_SYMBOL(nr_online_nodes);
 #endif
 
 int page_group_by_mobility_disabled __read_mostly;
@@ -176,6 +179,8 @@ static void set_pageblock_migratetype(struct page *page, int migratetype)
                                        PB_migrate, PB_migrate_end);
 }
 
+bool oom_killer_disabled __read_mostly;
+
 #ifdef CONFIG_DEBUG_VM
 static int page_outside_zone_boundaries(struct zone *zone, struct page *page)
 {
@@ -298,23 +303,6 @@ void prep_compound_page(struct page *page, unsigned long order)
        }
 }
 
-#ifdef CONFIG_HUGETLBFS
-void prep_compound_gigantic_page(struct page *page, unsigned long order)
-{
-       int i;
-       int nr_pages = 1 << order;
-       struct page *p = page + 1;
-
-       set_compound_page_dtor(page, free_compound_page);
-       set_compound_order(page, order);
-       __SetPageHead(page);
-       for (i = 1; i < nr_pages; i++, p = mem_map_next(p, page, i)) {
-               __SetPageTail(p);
-               p->first_page = page;
-       }
-}
-#endif
-
 static int destroy_compound_page(struct page *page, unsigned long order)
 {
        int i;
@@ -421,7 +409,7 @@ static inline int page_is_buddy(struct page *page, struct page *buddy,
                return 0;
 
        if (PageBuddy(buddy) && page_order(buddy) == order) {
-               BUG_ON(page_count(buddy) != 0);
+               VM_BUG_ON(page_count(buddy) != 0);
                return 1;
        }
        return 0;
@@ -452,22 +440,22 @@ static inline int page_is_buddy(struct page *page, struct page *buddy,
  */
 
 static inline void __free_one_page(struct page *page,
-               struct zone *zone, unsigned int order)
+               struct zone *zone, unsigned int order,
+               int migratetype)
 {
        unsigned long page_idx;
-       int order_size = 1 << order;
-       int migratetype = get_pageblock_migratetype(page);
 
        if (unlikely(PageCompound(page)))
                if (unlikely(destroy_compound_page(page, order)))
                        return;
 
+       VM_BUG_ON(migratetype == -1);
+
        page_idx = page_to_pfn(page) & ((1 << MAX_ORDER) - 1);
 
-       VM_BUG_ON(page_idx & (order_size - 1));
+       VM_BUG_ON(page_idx & ((1 << order) - 1));
        VM_BUG_ON(bad_range(zone, page));
 
-       __mod_zone_page_state(zone, NR_FREE_PAGES, order_size);
        while (order < MAX_ORDER-1) {
                unsigned long combined_idx;
                struct page *buddy;
@@ -491,12 +479,27 @@ static inline void __free_one_page(struct page *page,
        zone->free_area[order].nr_free++;
 }
 
+#ifdef CONFIG_HAVE_MLOCKED_PAGE_BIT
+/*
+ * free_page_mlock() -- clean up attempts to free and mlocked() page.
+ * Page should not be on lru, so no need to fix that up.
+ * free_pages_check() will verify...
+ */
+static inline void free_page_mlock(struct page *page)
+{
+       __ClearPageMlocked(page);
+       __dec_zone_page_state(page, NR_MLOCK);
+       __count_vm_event(UNEVICTABLE_MLOCKFREED);
+}
+#else
+static void free_page_mlock(struct page *page) { }
+#endif
+
 static inline int free_pages_check(struct page *page)
 {
-       free_page_mlock(page);
        if (unlikely(page_mapcount(page) |
                (page->mapping != NULL)  |
-               (page_count(page) != 0)  |
+               (atomic_read(&page->_count) != 0) |
                (page->flags & PAGE_FLAGS_CHECK_AT_FREE))) {
                bad_page(page);
                return 1;
@@ -523,6 +526,8 @@ static void free_pages_bulk(struct zone *zone, int count,
        spin_lock(&zone->lock);
        zone_clear_flag(zone, ZONE_ALL_UNRECLAIMABLE);
        zone->pages_scanned = 0;
+
+       __mod_zone_page_state(zone, NR_FREE_PAGES, count << order);
        while (count--) {
                struct page *page;
 
@@ -530,17 +535,20 @@ static void free_pages_bulk(struct zone *zone, int count,
                page = list_entry(list->prev, struct page, lru);
                /* have to delete it as __free_one_page list manipulates */
                list_del(&page->lru);
-               __free_one_page(page, zone, order);
+               __free_one_page(page, zone, order, page_private(page));
        }
        spin_unlock(&zone->lock);
 }
 
-static void free_one_page(struct zone *zone, struct page *page, int order)
+static void free_one_page(struct zone *zone, struct page *page, int order,
+                               int migratetype)
 {
        spin_lock(&zone->lock);
        zone_clear_flag(zone, ZONE_ALL_UNRECLAIMABLE);
        zone->pages_scanned = 0;
-       __free_one_page(page, zone, order);
+
+       __mod_zone_page_state(zone, NR_FREE_PAGES, 1 << order);
+       __free_one_page(page, zone, order, migratetype);
        spin_unlock(&zone->lock);
 }
 
@@ -549,6 +557,9 @@ static void __free_pages_ok(struct page *page, unsigned int order)
        unsigned long flags;
        int i;
        int bad = 0;
+       int clearMlocked = PageMlocked(page);
+
+       kmemcheck_free_shadow(page, order);
 
        for (i = 0 ; i < (1 << order) ; ++i)
                bad += free_pages_check(page + i);
@@ -564,8 +575,11 @@ static void __free_pages_ok(struct page *page, unsigned int order)
        kernel_map_pages(page, 1 << order, 0);
 
        local_irq_save(flags);
+       if (unlikely(clearMlocked))
+               free_page_mlock(page);
        __count_vm_events(PGFREE, 1 << order);
-       free_one_page(page_zone(page), page, order);
+       free_one_page(page_zone(page), page, order,
+                                       get_pageblock_migratetype(page));
        local_irq_restore(flags);
 }
 
@@ -636,7 +650,7 @@ static int prep_new_page(struct page *page, int order, gfp_t gfp_flags)
 {
        if (unlikely(page_mapcount(page) |
                (page->mapping != NULL)  |
-               (page_count(page) != 0)  |
+               (atomic_read(&page->_count) != 0)  |
                (page->flags & PAGE_FLAGS_CHECK_AT_PREP))) {
                bad_page(page);
                return 1;
@@ -661,7 +675,8 @@ static int prep_new_page(struct page *page, int order, gfp_t gfp_flags)
  * Go through the free lists for the given migratetype and remove
  * the smallest available page from the freelists
  */
-static struct page *__rmqueue_smallest(struct zone *zone, unsigned int order,
+static inline
+struct page *__rmqueue_smallest(struct zone *zone, unsigned int order,
                                                int migratetype)
 {
        unsigned int current_order;
@@ -679,7 +694,6 @@ static struct page *__rmqueue_smallest(struct zone *zone, unsigned int order,
                list_del(&page->lru);
                rmv_page_order(page);
                area->nr_free--;
-               __mod_zone_page_state(zone, NR_FREE_PAGES, - (1UL << order));
                expand(zone, page, order, current_order, area, migratetype);
                return page;
        }
@@ -770,8 +784,8 @@ static int move_freepages_block(struct zone *zone, struct page *page,
 }
 
 /* Remove an element from the buddy allocator from the fallback list */
-static struct page *__rmqueue_fallback(struct zone *zone, int order,
-                                               int start_migratetype)
+static inline struct page *
+__rmqueue_fallback(struct zone *zone, int order, int start_migratetype)
 {
        struct free_area * area;
        int current_order;
@@ -819,8 +833,6 @@ static struct page *__rmqueue_fallback(struct zone *zone, int order,
                        /* Remove the page from the freelists */
                        list_del(&page->lru);
                        rmv_page_order(page);
-                       __mod_zone_page_state(zone, NR_FREE_PAGES,
-                                                       -(1UL << order));
 
                        if (current_order == pageblock_order)
                                set_pageblock_migratetype(page,
@@ -831,8 +843,7 @@ static struct page *__rmqueue_fallback(struct zone *zone, int order,
                }
        }
 
-       /* Use MIGRATE_RESERVE rather than fail an allocation */
-       return __rmqueue_smallest(zone, order, MIGRATE_RESERVE);
+       return NULL;
 }
 
 /*
@@ -844,11 +855,23 @@ static struct page *__rmqueue(struct zone *zone, unsigned int order,
 {
        struct page *page;
 
+retry_reserve:
        page = __rmqueue_smallest(zone, order, migratetype);
 
-       if (unlikely(!page))
+       if (unlikely(!page) && migratetype != MIGRATE_RESERVE) {
                page = __rmqueue_fallback(zone, order, migratetype);
 
+               /*
+                * Use MIGRATE_RESERVE rather than fail an allocation. goto
+                * is used because __rmqueue_smallest is an inline function
+                * and we want just one call site
+                */
+               if (!page) {
+                       migratetype = MIGRATE_RESERVE;
+                       goto retry_reserve;
+               }
+       }
+
        return page;
 }
 
@@ -882,6 +905,7 @@ static int rmqueue_bulk(struct zone *zone, unsigned int order,
                set_page_private(page, migratetype);
                list = &page->lru;
        }
+       __mod_zone_page_state(zone, NR_FREE_PAGES, -(i << order));
        spin_unlock(&zone->lock);
        return i;
 }
@@ -997,6 +1021,9 @@ static void free_hot_cold_page(struct page *page, int cold)
        struct zone *zone = page_zone(page);
        struct per_cpu_pages *pcp;
        unsigned long flags;
+       int clearMlocked = PageMlocked(page);
+
+       kmemcheck_free_shadow(page, 0);
 
        if (PageAnon(page))
                page->mapping = NULL;
@@ -1011,13 +1038,16 @@ static void free_hot_cold_page(struct page *page, int cold)
        kernel_map_pages(page, 1, 0);
 
        pcp = &zone_pcp(zone, get_cpu())->pcp;
+       set_page_private(page, get_pageblock_migratetype(page));
        local_irq_save(flags);
+       if (unlikely(clearMlocked))
+               free_page_mlock(page);
        __count_vm_event(PGFREE);
+
        if (cold)
                list_add_tail(&page->lru, &pcp->list);
        else
                list_add(&page->lru, &pcp->list);
-       set_page_private(page, get_pageblock_migratetype(page));
        pcp->count++;
        if (pcp->count >= pcp->high) {
                free_pages_bulk(zone, pcp->batch, &pcp->list, 0);
@@ -1051,6 +1081,16 @@ void split_page(struct page *page, unsigned int order)
 
        VM_BUG_ON(PageCompound(page));
        VM_BUG_ON(!page_count(page));
+
+#ifdef CONFIG_KMEMCHECK
+       /*
+        * Split shadow pages too, because free(page[0]) would
+        * otherwise free the whole shadow.
+        */
+       if (kmemcheck_page_is_tracked(page))
+               split_page(virt_to_page(page[0].shadow), order);
+#endif
+
        for (i = 1; i < (1 << order); i++)
                set_page_refcounted(page + i);
 }
@@ -1060,14 +1100,15 @@ void split_page(struct page *page, unsigned int order)
  * we cheat by calling it from here, in the order > 0 path.  Saves a branch
  * or two.
  */
-static struct page *buffered_rmqueue(struct zone *preferred_zone,
-                       struct zone *zone, int order, gfp_t gfp_flags)
+static inline
+struct page *buffered_rmqueue(struct zone *preferred_zone,
+                       struct zone *zone, int order, gfp_t gfp_flags,
+                       int migratetype)
 {
        unsigned long flags;
        struct page *page;
        int cold = !!(gfp_flags & __GFP_COLD);
        int cpu;
-       int migratetype = allocflags_to_migratetype(gfp_flags);
 
 again:
        cpu  = get_cpu();
@@ -1104,8 +1145,22 @@ again:
                list_del(&page->lru);
                pcp->count--;
        } else {
+               if (unlikely(gfp_flags & __GFP_NOFAIL)) {
+                       /*
+                        * __GFP_NOFAIL is not to be used in new code.
+                        *
+                        * All __GFP_NOFAIL callers should be fixed so that they
+                        * properly detect and handle allocation failures.
+                        *
+                        * We most definitely don't want callers attempting to
+                        * allocate greater than single-page units with
+                        * __GFP_NOFAIL.
+                        */
+                       WARN_ON_ONCE(order > 0);
+               }
                spin_lock_irqsave(&zone->lock, flags);
                page = __rmqueue(zone, order, migratetype);
+               __mod_zone_page_state(zone, NR_FREE_PAGES, -(1 << order));
                spin_unlock(&zone->lock);
                if (!page)
                        goto failed;
@@ -1127,10 +1182,15 @@ failed:
        return NULL;
 }
 
-#define ALLOC_NO_WATERMARKS    0x01 /* don't check watermarks at all */
-#define ALLOC_WMARK_MIN                0x02 /* use pages_min watermark */
-#define ALLOC_WMARK_LOW                0x04 /* use pages_low watermark */
-#define ALLOC_WMARK_HIGH       0x08 /* use pages_high watermark */
+/* The ALLOC_WMARK bits are used as an index to zone->watermark */
+#define ALLOC_WMARK_MIN                WMARK_MIN
+#define ALLOC_WMARK_LOW                WMARK_LOW
+#define ALLOC_WMARK_HIGH       WMARK_HIGH
+#define ALLOC_NO_WATERMARKS    0x04 /* don't check watermarks at all */
+
+/* Mask to get the watermark bits */
+#define ALLOC_WMARK_MASK       (ALLOC_NO_WATERMARKS-1)
+
 #define ALLOC_HARDER           0x10 /* try to alloc harder */
 #define ALLOC_HIGH             0x20 /* __GFP_HIGH set */
 #define ALLOC_CPUSET           0x40 /* check for correct cpuset */
@@ -1388,26 +1448,18 @@ static void zlc_mark_zone_full(struct zonelist *zonelist, struct zoneref *z)
  */
 static struct page *
 get_page_from_freelist(gfp_t gfp_mask, nodemask_t *nodemask, unsigned int order,
-               struct zonelist *zonelist, int high_zoneidx, int alloc_flags)
+               struct zonelist *zonelist, int high_zoneidx, int alloc_flags,
+               struct zone *preferred_zone, int migratetype)
 {
        struct zoneref *z;
        struct page *page = NULL;
        int classzone_idx;
-       struct zone *zone, *preferred_zone;
+       struct zone *zone;
        nodemask_t *allowednodes = NULL;/* zonelist_cache approximation */
        int zlc_active = 0;             /* set if using zonelist_cache */
        int did_zlc_setup = 0;          /* just call zlc_setup() one time */
 
-       (void)first_zones_zonelist(zonelist, high_zoneidx, nodemask,
-                                                       &preferred_zone);
-       if (!preferred_zone)
-               return NULL;
-
        classzone_idx = zone_idx(preferred_zone);
-
-       if (WARN_ON_ONCE(order >= MAX_ORDER))
-               return NULL;
-
 zonelist_scan:
        /*
         * Scan zonelist, looking for a zone with enough free.
@@ -1422,31 +1474,49 @@ zonelist_scan:
                        !cpuset_zone_allowed_softwall(zone, gfp_mask))
                                goto try_next_zone;
 
+               BUILD_BUG_ON(ALLOC_NO_WATERMARKS < NR_WMARK);
                if (!(alloc_flags & ALLOC_NO_WATERMARKS)) {
                        unsigned long mark;
-                       if (alloc_flags & ALLOC_WMARK_MIN)
-                               mark = zone->pages_min;
-                       else if (alloc_flags & ALLOC_WMARK_LOW)
-                               mark = zone->pages_low;
-                       else
-                               mark = zone->pages_high;
-                       if (!zone_watermark_ok(zone, order, mark,
-                                   classzone_idx, alloc_flags)) {
-                               if (!zone_reclaim_mode ||
-                                   !zone_reclaim(zone, gfp_mask, order))
+                       int ret;
+
+                       mark = zone->watermark[alloc_flags & ALLOC_WMARK_MASK];
+                       if (zone_watermark_ok(zone, order, mark,
+                                   classzone_idx, alloc_flags))
+                               goto try_this_zone;
+
+                       if (zone_reclaim_mode == 0)
+                               goto this_zone_full;
+
+                       ret = zone_reclaim(zone, gfp_mask, order);
+                       switch (ret) {
+                       case ZONE_RECLAIM_NOSCAN:
+                               /* did not scan */
+                               goto try_next_zone;
+                       case ZONE_RECLAIM_FULL:
+                               /* scanned but unreclaimable */
+                               goto this_zone_full;
+                       default:
+                               /* did we reclaim enough */
+                               if (!zone_watermark_ok(zone, order, mark,
+                                               classzone_idx, alloc_flags))
                                        goto this_zone_full;
                        }
                }
 
-               page = buffered_rmqueue(preferred_zone, zone, order, gfp_mask);
+try_this_zone:
+               page = buffered_rmqueue(preferred_zone, zone, order,
+                                               gfp_mask, migratetype);
                if (page)
                        break;
 this_zone_full:
                if (NUMA_BUILD)
                        zlc_mark_zone_full(zonelist, z);
 try_next_zone:
-               if (NUMA_BUILD && !did_zlc_setup) {
-                       /* we do zlc_setup after the first zone is tried */
+               if (NUMA_BUILD && !did_zlc_setup && nr_online_nodes > 1) {
+                       /*
+                        * we do zlc_setup after the first zone is tried but only
+                        * if there are multiple nodes make it worthwhile
+                        */
                        allowednodes = zlc_setup(zonelist, alloc_flags);
                        zlc_active = 1;
                        did_zlc_setup = 1;
@@ -1500,7 +1570,8 @@ should_alloc_retry(gfp_t gfp_mask, unsigned int order,
 static inline struct page *
 __alloc_pages_may_oom(gfp_t gfp_mask, unsigned int order,
        struct zonelist *zonelist, enum zone_type high_zoneidx,
-       nodemask_t *nodemask)
+       nodemask_t *nodemask, struct zone *preferred_zone,
+       int migratetype)
 {
        struct page *page;
 
@@ -1517,12 +1588,13 @@ __alloc_pages_may_oom(gfp_t gfp_mask, unsigned int order,
         */
        page = get_page_from_freelist(gfp_mask|__GFP_HARDWALL, nodemask,
                order, zonelist, high_zoneidx,
-               ALLOC_WMARK_HIGH|ALLOC_CPUSET);
+               ALLOC_WMARK_HIGH|ALLOC_CPUSET,
+               preferred_zone, migratetype);
        if (page)
                goto out;
 
        /* The OOM killer will not help higher order allocs */
-       if (order > PAGE_ALLOC_COSTLY_ORDER)
+       if (order > PAGE_ALLOC_COSTLY_ORDER && !(gfp_mask & __GFP_NOFAIL))
                goto out;
 
        /* Exhausted what can be done so it's blamo time */
@@ -1537,7 +1609,8 @@ out:
 static inline struct page *
 __alloc_pages_direct_reclaim(gfp_t gfp_mask, unsigned int order,
        struct zonelist *zonelist, enum zone_type high_zoneidx,
-       nodemask_t *nodemask, int alloc_flags, unsigned long *did_some_progress)
+       nodemask_t *nodemask, int alloc_flags, struct zone *preferred_zone,
+       int migratetype, unsigned long *did_some_progress)
 {
        struct page *page = NULL;
        struct reclaim_state reclaim_state;
@@ -1569,19 +1642,12 @@ __alloc_pages_direct_reclaim(gfp_t gfp_mask, unsigned int order,
 
        if (likely(*did_some_progress))
                page = get_page_from_freelist(gfp_mask, nodemask, order,
-                                       zonelist, high_zoneidx, alloc_flags);
+                                       zonelist, high_zoneidx,
+                                       alloc_flags, preferred_zone,
+                                       migratetype);
        return page;
 }
 
-static inline int
-is_allocation_high_priority(struct task_struct *p, gfp_t gfp_mask)
-{
-       if (((p->flags & PF_MEMALLOC) || unlikely(test_thread_flag(TIF_MEMDIE)))
-                       && !in_interrupt())
-               return 1;
-       return 0;
-}
-
 /*
  * This is called in the allocator slow-path if the allocation request is of
  * sufficient urgency to ignore watermarks and take other desperate measures
@@ -1589,13 +1655,15 @@ is_allocation_high_priority(struct task_struct *p, gfp_t gfp_mask)
 static inline struct page *
 __alloc_pages_high_priority(gfp_t gfp_mask, unsigned int order,
        struct zonelist *zonelist, enum zone_type high_zoneidx,
-       nodemask_t *nodemask)
+       nodemask_t *nodemask, struct zone *preferred_zone,
+       int migratetype)
 {
        struct page *page;
 
        do {
                page = get_page_from_freelist(gfp_mask, nodemask, order,
-                       zonelist, high_zoneidx, ALLOC_NO_WATERMARKS);
+                       zonelist, high_zoneidx, ALLOC_NO_WATERMARKS,
+                       preferred_zone, migratetype);
 
                if (!page && gfp_mask & __GFP_NOFAIL)
                        congestion_wait(WRITE, HZ/50);
@@ -1615,10 +1683,49 @@ void wake_all_kswapd(unsigned int order, struct zonelist *zonelist,
                wakeup_kswapd(zone, order);
 }
 
+static inline int
+gfp_to_alloc_flags(gfp_t gfp_mask)
+{
+       struct task_struct *p = current;
+       int alloc_flags = ALLOC_WMARK_MIN | ALLOC_CPUSET;
+       const gfp_t wait = gfp_mask & __GFP_WAIT;
+
+       /* __GFP_HIGH is assumed to be the same as ALLOC_HIGH to save a branch. */
+       BUILD_BUG_ON(__GFP_HIGH != ALLOC_HIGH);
+
+       /*
+        * The caller may dip into page reserves a bit more if the caller
+        * cannot run direct reclaim, or if the caller has realtime scheduling
+        * policy or is asking for __GFP_HIGH memory.  GFP_ATOMIC requests will
+        * set both ALLOC_HARDER (!wait) and ALLOC_HIGH (__GFP_HIGH).
+        */
+       alloc_flags |= (gfp_mask & __GFP_HIGH);
+
+       if (!wait) {
+               alloc_flags |= ALLOC_HARDER;
+               /*
+                * Ignore cpuset if GFP_ATOMIC (!wait) rather than fail alloc.
+                * See also cpuset_zone_allowed() comment in kernel/cpuset.c.
+                */
+               alloc_flags &= ~ALLOC_CPUSET;
+       } else if (unlikely(rt_task(p)))
+               alloc_flags |= ALLOC_HARDER;
+
+       if (likely(!(gfp_mask & __GFP_NOMEMALLOC))) {
+               if (!in_interrupt() &&
+                   ((p->flags & PF_MEMALLOC) ||
+                    unlikely(test_thread_flag(TIF_MEMDIE))))
+                       alloc_flags |= ALLOC_NO_WATERMARKS;
+       }
+
+       return alloc_flags;
+}
+
 static inline struct page *
 __alloc_pages_slowpath(gfp_t gfp_mask, unsigned int order,
        struct zonelist *zonelist, enum zone_type high_zoneidx,
-       nodemask_t *nodemask)
+       nodemask_t *nodemask, struct zone *preferred_zone,
+       int migratetype)
 {
        const gfp_t wait = gfp_mask & __GFP_WAIT;
        struct page *page = NULL;
@@ -1628,6 +1735,15 @@ __alloc_pages_slowpath(gfp_t gfp_mask, unsigned int order,
        struct task_struct *p = current;
 
        /*
+        * In the slowpath, we sanity check order to avoid ever trying to
+        * reclaim >= MAX_ORDER areas which will never succeed. Callers may
+        * be using allocators in order of preference for an area that is
+        * too large.
+        */
+       if (WARN_ON_ONCE(order >= MAX_ORDER))
+               return NULL;
+
+       /*
         * GFP_THISNODE (meaning __GFP_THISNODE, __GFP_NORETRY and
         * __GFP_NOWARN set) should not cause reclaim since the subsystem
         * (f.e. slab) using GFP_THISNODE may choose to trigger reclaim
@@ -1644,58 +1760,41 @@ __alloc_pages_slowpath(gfp_t gfp_mask, unsigned int order,
         * OK, we're below the kswapd watermark and have kicked background
         * reclaim. Now things get more complex, so set up alloc_flags according
         * to how we want to proceed.
-        *
-        * The caller may dip into page reserves a bit more if the caller
-        * cannot run direct reclaim, or if the caller has realtime scheduling
-        * policy or is asking for __GFP_HIGH memory.  GFP_ATOMIC requests will
-        * set both ALLOC_HARDER (!wait) and ALLOC_HIGH (__GFP_HIGH).
         */
-       alloc_flags = ALLOC_WMARK_MIN;
-       if ((unlikely(rt_task(p)) && !in_interrupt()) || !wait)
-               alloc_flags |= ALLOC_HARDER;
-       if (gfp_mask & __GFP_HIGH)
-               alloc_flags |= ALLOC_HIGH;
-       if (wait)
-               alloc_flags |= ALLOC_CPUSET;
+       alloc_flags = gfp_to_alloc_flags(gfp_mask);
 
 restart:
-       /*
-        * Go through the zonelist again. Let __GFP_HIGH and allocations
-        * coming from realtime tasks go deeper into reserves.
-        *
-        * This is the last chance, in general, before the goto nopage.
-        * Ignore cpuset if GFP_ATOMIC (!wait) rather than fail alloc.
-        * See also cpuset_zone_allowed() comment in kernel/cpuset.c.
-        */
+       /* This is the last chance, in general, before the goto nopage. */
        page = get_page_from_freelist(gfp_mask, nodemask, order, zonelist,
-                                               high_zoneidx, alloc_flags);
+                       high_zoneidx, alloc_flags & ~ALLOC_NO_WATERMARKS,
+                       preferred_zone, migratetype);
        if (page)
                goto got_pg;
 
 rebalance:
        /* Allocate without watermarks if the context allows */
-       if (is_allocation_high_priority(p, gfp_mask)) {
-               /* Do not dip into emergency reserves if specified */
-               if (!(gfp_mask & __GFP_NOMEMALLOC)) {
-                       page = __alloc_pages_high_priority(gfp_mask, order,
-                               zonelist, high_zoneidx, nodemask);
-                       if (page)
-                               goto got_pg;
-               }
-
-               /* Ensure no recursion into the allocator */
-               goto nopage;
+       if (alloc_flags & ALLOC_NO_WATERMARKS) {
+               page = __alloc_pages_high_priority(gfp_mask, order,
+                               zonelist, high_zoneidx, nodemask,
+                               preferred_zone, migratetype);
+               if (page)
+                       goto got_pg;
        }
 
        /* Atomic allocations - we can't balance anything */
        if (!wait)
                goto nopage;
 
+       /* Avoid recursion of direct reclaim */
+       if (p->flags & PF_MEMALLOC)
+               goto nopage;
+
        /* Try direct reclaim and then allocating */
        page = __alloc_pages_direct_reclaim(gfp_mask, order,
                                        zonelist, high_zoneidx,
                                        nodemask,
-                                       alloc_flags, &did_some_progress);
+                                       alloc_flags, preferred_zone,
+                                       migratetype, &did_some_progress);
        if (page)
                goto got_pg;
 
@@ -1705,18 +1804,23 @@ rebalance:
         */
        if (!did_some_progress) {
                if ((gfp_mask & __GFP_FS) && !(gfp_mask & __GFP_NORETRY)) {
+                       if (oom_killer_disabled)
+                               goto nopage;
                        page = __alloc_pages_may_oom(gfp_mask, order,
                                        zonelist, high_zoneidx,
-                                       nodemask);
+                                       nodemask, preferred_zone,
+                                       migratetype);
                        if (page)
                                goto got_pg;
 
                        /*
-                        * The OOM killer does not trigger for high-order allocations
-                        * but if no progress is being made, there are no other
-                        * options and retrying is unlikely to help
+                        * The OOM killer does not trigger for high-order
+                        * ~__GFP_NOFAIL allocations so if no progress is being
+                        * made, there are no other options and retrying is
+                        * unlikely to help.
                         */
-                       if (order > PAGE_ALLOC_COSTLY_ORDER)
+                       if (order > PAGE_ALLOC_COSTLY_ORDER &&
+                                               !(gfp_mask & __GFP_NOFAIL))
                                goto nopage;
 
                        goto restart;
@@ -1739,7 +1843,10 @@ nopage:
                dump_stack();
                show_mem();
        }
+       return page;
 got_pg:
+       if (kmemcheck_enabled)
+               kmemcheck_pagealloc_alloc(page, order, gfp_mask);
        return page;
 
 }
@@ -1752,7 +1859,9 @@ __alloc_pages_nodemask(gfp_t gfp_mask, unsigned int order,
                        struct zonelist *zonelist, nodemask_t *nodemask)
 {
        enum zone_type high_zoneidx = gfp_zone(gfp_mask);
+       struct zone *preferred_zone;
        struct page *page;
+       int migratetype = allocflags_to_migratetype(gfp_mask);
 
        lockdep_trace_alloc(gfp_mask);
 
@@ -1769,11 +1878,19 @@ __alloc_pages_nodemask(gfp_t gfp_mask, unsigned int order,
        if (unlikely(!zonelist->_zonerefs->zone))
                return NULL;
 
+       /* The preferred zone is used for statistics later */
+       first_zones_zonelist(zonelist, high_zoneidx, nodemask, &preferred_zone);
+       if (!preferred_zone)
+               return NULL;
+
+       /* First allocation attempt */
        page = get_page_from_freelist(gfp_mask|__GFP_HARDWALL, nodemask, order,
-                       zonelist, high_zoneidx, ALLOC_WMARK_LOW|ALLOC_CPUSET);
+                       zonelist, high_zoneidx, ALLOC_WMARK_LOW|ALLOC_CPUSET,
+                       preferred_zone, migratetype);
        if (unlikely(!page))
                page = __alloc_pages_slowpath(gfp_mask, order,
-                               zonelist, high_zoneidx, nodemask);
+                               zonelist, high_zoneidx, nodemask,
+                               preferred_zone, migratetype);
 
        return page;
 }
@@ -1906,7 +2023,7 @@ static unsigned int nr_free_zone_pages(int offset)
 
        for_each_zone_zonelist(zone, z, zonelist, offset) {
                unsigned long size = zone->present_pages;
-               unsigned long high = zone->pages_high;
+               unsigned long high = high_wmark_pages(zone);
                if (size > high)
                        sum += size - high;
        }
@@ -1998,19 +2115,14 @@ void show_free_areas(void)
 
        printk("Active_anon:%lu active_file:%lu inactive_anon:%lu\n"
                " inactive_file:%lu"
-//TODO:  check/adjust line lengths
-#ifdef CONFIG_UNEVICTABLE_LRU
                " unevictable:%lu"
-#endif
                " dirty:%lu writeback:%lu unstable:%lu\n"
                " free:%lu slab:%lu mapped:%lu pagetables:%lu bounce:%lu\n",
                global_page_state(NR_ACTIVE_ANON),
                global_page_state(NR_ACTIVE_FILE),
                global_page_state(NR_INACTIVE_ANON),
                global_page_state(NR_INACTIVE_FILE),
-#ifdef CONFIG_UNEVICTABLE_LRU
                global_page_state(NR_UNEVICTABLE),
-#endif
                global_page_state(NR_FILE_DIRTY),
                global_page_state(NR_WRITEBACK),
                global_page_state(NR_UNSTABLE_NFS),
@@ -2034,25 +2146,21 @@ void show_free_areas(void)
                        " inactive_anon:%lukB"
                        " active_file:%lukB"
                        " inactive_file:%lukB"
-#ifdef CONFIG_UNEVICTABLE_LRU
                        " unevictable:%lukB"
-#endif
                        " present:%lukB"
                        " pages_scanned:%lu"
                        " all_unreclaimable? %s"
                        "\n",
                        zone->name,
                        K(zone_page_state(zone, NR_FREE_PAGES)),
-                       K(zone->pages_min),
-                       K(zone->pages_low),
-                       K(zone->pages_high),
+                       K(min_wmark_pages(zone)),
+                       K(low_wmark_pages(zone)),
+                       K(high_wmark_pages(zone)),
                        K(zone_page_state(zone, NR_ACTIVE_ANON)),
                        K(zone_page_state(zone, NR_INACTIVE_ANON)),
                        K(zone_page_state(zone, NR_ACTIVE_FILE)),
                        K(zone_page_state(zone, NR_INACTIVE_FILE)),
-#ifdef CONFIG_UNEVICTABLE_LRU
                        K(zone_page_state(zone, NR_UNEVICTABLE)),
-#endif
                        K(zone->present_pages),
                        zone->pages_scanned,
                        (zone_is_all_unreclaimable(zone) ? "yes" : "no")
@@ -2210,7 +2318,7 @@ int numa_zonelist_order_handler(ctl_table *table, int write,
 }
 
 
-#define MAX_NODE_LOAD (num_online_nodes())
+#define MAX_NODE_LOAD (nr_online_nodes)
 static int node_load[MAX_NUMNODES];
 
 /**
@@ -2419,7 +2527,7 @@ static void build_zonelists(pg_data_t *pgdat)
 
        /* NUMA-aware ordering of nodes */
        local_node = pgdat->node_id;
-       load = num_online_nodes();
+       load = nr_online_nodes;
        prev_node = local_node;
        nodes_clear(used_mask);
 
@@ -2570,7 +2678,7 @@ void build_all_zonelists(void)
 
        printk("Built %i zonelists in %s order, mobility grouping %s.  "
                "Total pages: %ld\n",
-                       num_online_nodes(),
+                       nr_online_nodes,
                        zonelist_order_name[current_zonelist_order],
                        page_group_by_mobility_disabled ? "off" : "on",
                        vm_total_pages);
@@ -2649,8 +2757,8 @@ static inline unsigned long wait_table_bits(unsigned long size)
 
 /*
  * Mark a number of pageblocks as MIGRATE_RESERVE. The number
- * of blocks reserved is based on zone->pages_min. The memory within the
- * reserve will tend to store contiguous free pages. Setting min_free_kbytes
+ * of blocks reserved is based on min_wmark_pages(zone). The memory within
+ * the reserve will tend to store contiguous free pages. Setting min_free_kbytes
  * higher will lead to a bigger reserve which will get freed as contiguous
  * blocks as reclaim kicks in
  */
@@ -2663,7 +2771,7 @@ static void setup_zone_migrate_reserve(struct zone *zone)
        /* Get the start pfn, end pfn and the number of blocks to reserve */
        start_pfn = zone->zone_start_pfn;
        end_pfn = start_pfn + zone->spanned_pages;
-       reserve = roundup(zone->pages_min, pageblock_nr_pages) >>
+       reserve = roundup(min_wmark_pages(zone), pageblock_nr_pages) >>
                                                        pageblock_order;
 
        for (pfn = start_pfn; pfn < end_pfn; pfn += pageblock_nr_pages) {
@@ -3595,7 +3703,7 @@ static void __paginginit free_area_init_core(struct pglist_data *pgdat,
                zone_pcp_init(zone);
                for_each_lru(l) {
                        INIT_LIST_HEAD(&zone->lru[l].list);
-                       zone->lru[l].nr_scan = 0;
+                       zone->lru[l].nr_saved_scan = 0;
                }
                zone->reclaim_stat.recent_rotated[0] = 0;
                zone->reclaim_stat.recent_rotated[1] = 0;
@@ -4132,6 +4240,11 @@ void __init free_area_init_nodes(unsigned long *max_zone_pfn)
                                                early_node_map[i].start_pfn,
                                                early_node_map[i].end_pfn);
 
+       /*
+        * find_zone_movable_pfns_for_nodes/early_calculate_totalpages init
+        * that node_mask, clear it at first
+        */
+       nodes_clear(node_states[N_HIGH_MEMORY]);
        /* Initialise every node */
        mminit_verify_pageflags_layout();
        setup_nr_node_ids();
@@ -4266,8 +4379,8 @@ static void calculate_totalreserve_pages(void)
                                        max = zone->lowmem_reserve[j];
                        }
 
-                       /* we treat pages_high as reserved pages. */
-                       max += zone->pages_high;
+                       /* we treat the high watermark as reserved pages. */
+                       max += high_wmark_pages(zone);
 
                        if (max > zone->present_pages)
                                max = zone->present_pages;
@@ -4317,12 +4430,13 @@ static void setup_per_zone_lowmem_reserve(void)
 }
 
 /**
- * setup_per_zone_pages_min - called when min_free_kbytes changes.
+ * setup_per_zone_wmarks - called when min_free_kbytes changes
+ * or when memory is hot-{added|removed}
  *
- * Ensures that the pages_{min,low,high} values for each zone are set correctly
- * with respect to min_free_kbytes.
+ * Ensures that the watermark[min,low,high] values for each zone are set
+ * correctly with respect to min_free_kbytes.
  */
-void setup_per_zone_pages_min(void)
+void setup_per_zone_wmarks(void)
 {
        unsigned long pages_min = min_free_kbytes >> (PAGE_SHIFT - 10);
        unsigned long lowmem_pages = 0;
@@ -4347,7 +4461,7 @@ void setup_per_zone_pages_min(void)
                         * need highmem pages, so cap pages_min to a small
                         * value here.
                         *
-                        * The (pages_high-pages_low) and (pages_low-pages_min)
+                        * The WMARK_HIGH-WMARK_LOW and (WMARK_LOW-WMARK_MIN)
                         * deltas controls asynch page reclaim, and so should
                         * not be capped for highmem.
                         */
@@ -4358,17 +4472,17 @@ void setup_per_zone_pages_min(void)
                                min_pages = SWAP_CLUSTER_MAX;
                        if (min_pages > 128)
                                min_pages = 128;
-                       zone->pages_min = min_pages;
+                       zone->watermark[WMARK_MIN] = min_pages;
                } else {
                        /*
                         * If it's a lowmem zone, reserve a number of pages
                         * proportionate to the zone's size.
                         */
-                       zone->pages_min = tmp;
+                       zone->watermark[WMARK_MIN] = tmp;
                }
 
-               zone->pages_low   = zone->pages_min + (tmp >> 2);
-               zone->pages_high  = zone->pages_min + (tmp >> 1);
+               zone->watermark[WMARK_LOW]  = min_wmark_pages(zone) + (tmp >> 2);
+               zone->watermark[WMARK_HIGH] = min_wmark_pages(zone) + (tmp >> 1);
                setup_zone_migrate_reserve(zone);
                spin_unlock_irqrestore(&zone->lock, flags);
        }
@@ -4378,8 +4492,6 @@ void setup_per_zone_pages_min(void)
 }
 
 /**
- * setup_per_zone_inactive_ratio - called when min_free_kbytes changes.
- *
  * The inactive anon list should be small enough that the VM never has to
  * do too much work, but large enough that each inactive page has a chance
  * to be referenced again before it is swapped out.
@@ -4400,21 +4512,26 @@ void setup_per_zone_pages_min(void)
  *    1TB     101        10GB
  *   10TB     320        32GB
  */
-static void setup_per_zone_inactive_ratio(void)
+void calculate_zone_inactive_ratio(struct zone *zone)
 {
-       struct zone *zone;
+       unsigned int gb, ratio;
 
-       for_each_zone(zone) {
-               unsigned int gb, ratio;
-
-               /* Zone size in gigabytes */
-               gb = zone->present_pages >> (30 - PAGE_SHIFT);
+       /* Zone size in gigabytes */
+       gb = zone->present_pages >> (30 - PAGE_SHIFT);
+       if (gb)
                ratio = int_sqrt(10 * gb);
-               if (!ratio)
-                       ratio = 1;
+       else
+               ratio = 1;
 
-               zone->inactive_ratio = ratio;
-       }
+       zone->inactive_ratio = ratio;
+}
+
+static void __init setup_per_zone_inactive_ratio(void)
+{
+       struct zone *zone;
+
+       for_each_zone(zone)
+               calculate_zone_inactive_ratio(zone);
 }
 
 /*
@@ -4441,7 +4558,7 @@ static void setup_per_zone_inactive_ratio(void)
  * 8192MB:     11584k
  * 16384MB:    16384k
  */
-static int __init init_per_zone_pages_min(void)
+static int __init init_per_zone_wmark_min(void)
 {
        unsigned long lowmem_kbytes;
 
@@ -4452,12 +4569,12 @@ static int __init init_per_zone_pages_min(void)
                min_free_kbytes = 128;
        if (min_free_kbytes > 65536)
                min_free_kbytes = 65536;
-       setup_per_zone_pages_min();
+       setup_per_zone_wmarks();
        setup_per_zone_lowmem_reserve();
        setup_per_zone_inactive_ratio();
        return 0;
 }
-module_init(init_per_zone_pages_min)
+module_init(init_per_zone_wmark_min)
 
 /*
  * min_free_kbytes_sysctl_handler - just a wrapper around proc_dointvec() so 
@@ -4469,7 +4586,7 @@ int min_free_kbytes_sysctl_handler(ctl_table *table, int write,
 {
        proc_dointvec(table, write, file, buffer, length, ppos);
        if (write)
-               setup_per_zone_pages_min();
+               setup_per_zone_wmarks();
        return 0;
 }
 
@@ -4513,7 +4630,7 @@ int sysctl_min_slab_ratio_sysctl_handler(ctl_table *table, int write,
  *     whenever sysctl_lowmem_reserve_ratio changes.
  *
  * The reserve ratio obviously has absolutely no relation with the
- * pages_min watermarks. The lowmem reserve ratio can only make sense
+ * minimum watermarks. The lowmem reserve ratio can only make sense
  * if in function of the boot time zone sizes.
  */
 int lowmem_reserve_ratio_sysctl_handler(ctl_table *table, int write,
@@ -4620,26 +4737,13 @@ void *__init alloc_large_system_hash(const char *tablename,
                else if (hashdist)
                        table = __vmalloc(size, GFP_ATOMIC, PAGE_KERNEL);
                else {
-                       unsigned long order = get_order(size);
-
-                       if (order < MAX_ORDER)
-                               table = (void *)__get_free_pages(GFP_ATOMIC,
-                                                               order);
                        /*
                         * If bucketsize is not a power-of-two, we may free
-                        * some pages at the end of hash table.
+                        * some pages at the end of hash table which
+                        * alloc_pages_exact() automatically does
                         */
-                       if (table) {
-                               unsigned long alloc_end = (unsigned long)table +
-                                               (PAGE_SIZE << order);
-                               unsigned long used = (unsigned long)table +
-                                               PAGE_ALIGN(size);
-                               split_page(virt_to_page(table), order);
-                               while (used < alloc_end) {
-                                       free_page(used);
-                                       used += PAGE_SIZE;
-                               }
-                       }
+                       if (get_order(size) < MAX_ORDER)
+                               table = alloc_pages_exact(size, GFP_ATOMIC);
                }
        } while (!table && size > PAGE_SIZE && --log2qty);