tunnels: fix netns vs proto registration ordering
[safe/jmp/linux-2.6] / mm / page_alloc.c
index 85759cd..d2a8889 100644 (file)
@@ -23,6 +23,7 @@
 #include <linux/bootmem.h>
 #include <linux/compiler.h>
 #include <linux/kernel.h>
+#include <linux/kmemcheck.h>
 #include <linux/module.h>
 #include <linux/suspend.h>
 #include <linux/pagevec.h>
@@ -47,6 +48,8 @@
 #include <linux/page_cgroup.h>
 #include <linux/debugobjects.h>
 #include <linux/kmemleak.h>
+#include <linux/memory.h>
+#include <trace/events/kmem.h>
 
 #include <asm/tlbflush.h>
 #include <asm/div64.h>
@@ -70,8 +73,8 @@ EXPORT_SYMBOL(node_states);
 
 unsigned long totalram_pages __read_mostly;
 unsigned long totalreserve_pages __read_mostly;
-unsigned long highest_memmap_pfn __read_mostly;
 int percpu_pagelist_fraction;
+gfp_t gfp_allowed_mask __read_mostly = GFP_BOOT_MASK;
 
 #ifdef CONFIG_HUGETLB_PAGE_SIZE_VARIABLE
 int pageblock_order __read_mostly;
@@ -121,8 +124,8 @@ static char * const zone_names[MAX_NR_ZONES] = {
 
 int min_free_kbytes = 1024;
 
-unsigned long __meminitdata nr_kernel_pages;
-unsigned long __meminitdata nr_all_pages;
+static unsigned long __meminitdata nr_kernel_pages;
+static unsigned long __meminitdata nr_all_pages;
 static unsigned long __meminitdata dma_reserve;
 
 #ifdef CONFIG_ARCH_POPULATES_NODE_MAP
@@ -178,6 +181,8 @@ static void set_pageblock_migratetype(struct page *page, int migratetype)
                                        PB_migrate, PB_migrate_end);
 }
 
+bool oom_killer_disabled __read_mostly;
+
 #ifdef CONFIG_DEBUG_VM
 static int page_outside_zone_boundaries(struct zone *zone, struct page *page)
 {
@@ -230,6 +235,12 @@ static void bad_page(struct page *page)
        static unsigned long nr_shown;
        static unsigned long nr_unshown;
 
+       /* Don't complain about poisoned pages */
+       if (PageHWPoison(page)) {
+               __ClearPageBuddy(page);
+               return;
+       }
+
        /*
         * Allow a burst of 60 reports, then keep quiet for that minute;
         * or allow a steady drip of one report per second.
@@ -300,23 +311,6 @@ void prep_compound_page(struct page *page, unsigned long order)
        }
 }
 
-#ifdef CONFIG_HUGETLBFS
-void prep_compound_gigantic_page(struct page *page, unsigned long order)
-{
-       int i;
-       int nr_pages = 1 << order;
-       struct page *p = page + 1;
-
-       set_compound_page_dtor(page, free_compound_page);
-       set_compound_order(page, order);
-       __SetPageHead(page);
-       for (i = 1; i < nr_pages; i++, p = mem_map_next(p, page, i)) {
-               __SetPageTail(p);
-               p->first_page = page;
-       }
-}
-#endif
-
 static int destroy_compound_page(struct page *page, unsigned long order)
 {
        int i;
@@ -493,7 +487,6 @@ static inline void __free_one_page(struct page *page,
        zone->free_area[order].nr_free++;
 }
 
-#ifdef CONFIG_HAVE_MLOCKED_PAGE_BIT
 /*
  * free_page_mlock() -- clean up attempts to free and mlocked() page.
  * Page should not be on lru, so no need to fix that up.
@@ -501,13 +494,9 @@ static inline void __free_one_page(struct page *page,
  */
 static inline void free_page_mlock(struct page *page)
 {
-       __ClearPageMlocked(page);
        __dec_zone_page_state(page, NR_MLOCK);
        __count_vm_event(UNEVICTABLE_MLOCKFREED);
 }
-#else
-static void free_page_mlock(struct page *page) { }
-#endif
 
 static inline int free_pages_check(struct page *page)
 {
@@ -524,7 +513,7 @@ static inline int free_pages_check(struct page *page)
 }
 
 /*
- * Frees a list of pages. 
+ * Frees a number of pages from the PCP lists
  * Assumes all pages on list are in same zone, and of same order.
  * count is the number of pages to free.
  *
@@ -534,22 +523,42 @@ static inline int free_pages_check(struct page *page)
  * And clear the zone's pages_scanned counter, to hold off the "all pages are
  * pinned" detection logic.
  */
-static void free_pages_bulk(struct zone *zone, int count,
-                                       struct list_head *list, int order)
+static void free_pcppages_bulk(struct zone *zone, int count,
+                                       struct per_cpu_pages *pcp)
 {
+       int migratetype = 0;
+       int batch_free = 0;
+
        spin_lock(&zone->lock);
        zone_clear_flag(zone, ZONE_ALL_UNRECLAIMABLE);
        zone->pages_scanned = 0;
 
-       __mod_zone_page_state(zone, NR_FREE_PAGES, count << order);
-       while (count--) {
+       __mod_zone_page_state(zone, NR_FREE_PAGES, count);
+       while (count) {
                struct page *page;
+               struct list_head *list;
 
-               VM_BUG_ON(list_empty(list));
-               page = list_entry(list->prev, struct page, lru);
-               /* have to delete it as __free_one_page list manipulates */
-               list_del(&page->lru);
-               __free_one_page(page, zone, order, page_private(page));
+               /*
+                * Remove pages from lists in a round-robin fashion. A
+                * batch_free count is maintained that is incremented when an
+                * empty list is encountered.  This is so more pages are freed
+                * off fuller lists instead of spinning excessively around empty
+                * lists
+                */
+               do {
+                       batch_free++;
+                       if (++migratetype == MIGRATE_PCPTYPES)
+                               migratetype = 0;
+                       list = &pcp->lists[migratetype];
+               } while (list_empty(list));
+
+               do {
+                       page = list_entry(list->prev, struct page, lru);
+                       /* must delete as __free_one_page list manipulates */
+                       list_del(&page->lru);
+                       __free_one_page(page, zone, 0, migratetype);
+                       trace_mm_page_pcpu_drain(page, 0, migratetype);
+               } while (--count && --batch_free && !list_empty(list));
        }
        spin_unlock(&zone->lock);
 }
@@ -571,7 +580,9 @@ static void __free_pages_ok(struct page *page, unsigned int order)
        unsigned long flags;
        int i;
        int bad = 0;
-       int clearMlocked = PageMlocked(page);
+       int wasMlocked = __TestClearPageMlocked(page);
+
+       kmemcheck_free_shadow(page, order);
 
        for (i = 0 ; i < (1 << order) ; ++i)
                bad += free_pages_check(page + i);
@@ -587,7 +598,7 @@ static void __free_pages_ok(struct page *page, unsigned int order)
        kernel_map_pages(page, 1 << order, 0);
 
        local_irq_save(flags);
-       if (unlikely(clearMlocked))
+       if (unlikely(wasMlocked))
                free_page_mlock(page);
        __count_vm_events(PGFREE, 1 << order);
        free_one_page(page_zone(page), page, order,
@@ -658,7 +669,7 @@ static inline void expand(struct zone *zone, struct page *page,
 /*
  * This page is about to be returned from the page allocator
  */
-static int prep_new_page(struct page *page, int order, gfp_t gfp_flags)
+static inline int check_new_page(struct page *page)
 {
        if (unlikely(page_mapcount(page) |
                (page->mapping != NULL)  |
@@ -667,6 +678,18 @@ static int prep_new_page(struct page *page, int order, gfp_t gfp_flags)
                bad_page(page);
                return 1;
        }
+       return 0;
+}
+
+static int prep_new_page(struct page *page, int order, gfp_t gfp_flags)
+{
+       int i;
+
+       for (i = 0; i < (1 << order); i++) {
+               struct page *p = page + i;
+               if (unlikely(check_new_page(p)))
+                       return 1;
+       }
 
        set_page_private(page, 0);
        set_page_refcounted(page);
@@ -795,6 +818,17 @@ static int move_freepages_block(struct zone *zone, struct page *page,
        return move_freepages(zone, start_page, end_page, migratetype);
 }
 
+static void change_pageblock_range(struct page *pageblock_page,
+                                       int start_order, int migratetype)
+{
+       int nr_pageblocks = 1 << (start_order - pageblock_order);
+
+       while (nr_pageblocks--) {
+               set_pageblock_migratetype(pageblock_page, migratetype);
+               pageblock_page += pageblock_nr_pages;
+       }
+}
+
 /* Remove an element from the buddy allocator from the fallback list */
 static inline struct page *
 __rmqueue_fallback(struct zone *zone, int order, int start_migratetype)
@@ -829,13 +863,15 @@ __rmqueue_fallback(struct zone *zone, int order, int start_migratetype)
                         * agressive about taking ownership of free pages
                         */
                        if (unlikely(current_order >= (pageblock_order >> 1)) ||
-                                       start_migratetype == MIGRATE_RECLAIMABLE) {
+                                       start_migratetype == MIGRATE_RECLAIMABLE ||
+                                       page_group_by_mobility_disabled) {
                                unsigned long pages;
                                pages = move_freepages_block(zone, page,
                                                                start_migratetype);
 
                                /* Claim the whole block if over half of it is free */
-                               if (pages >= (1 << (pageblock_order-1)))
+                               if (pages >= (1 << (pageblock_order-1)) ||
+                                               page_group_by_mobility_disabled)
                                        set_pageblock_migratetype(page,
                                                                start_migratetype);
 
@@ -846,11 +882,16 @@ __rmqueue_fallback(struct zone *zone, int order, int start_migratetype)
                        list_del(&page->lru);
                        rmv_page_order(page);
 
-                       if (current_order == pageblock_order)
-                               set_pageblock_migratetype(page,
+                       /* Take ownership for orders >= pageblock_order */
+                       if (current_order >= pageblock_order)
+                               change_pageblock_range(page, current_order,
                                                        start_migratetype);
 
                        expand(zone, page, order, current_order, area, migratetype);
+
+                       trace_mm_page_alloc_extfrag(page, order, current_order,
+                               start_migratetype, migratetype);
+
                        return page;
                }
        }
@@ -884,6 +925,7 @@ retry_reserve:
                }
        }
 
+       trace_mm_page_alloc_zone_locked(page, order, migratetype);
        return page;
 }
 
@@ -894,7 +936,7 @@ retry_reserve:
  */
 static int rmqueue_bulk(struct zone *zone, unsigned int order, 
                        unsigned long count, struct list_head *list,
-                       int migratetype)
+                       int migratetype, int cold)
 {
        int i;
        
@@ -913,7 +955,10 @@ static int rmqueue_bulk(struct zone *zone, unsigned int order,
                 * merge IO requests if the physical pages are ordered
                 * properly.
                 */
-               list_add(&page->lru, list);
+               if (likely(cold == 0))
+                       list_add(&page->lru, list);
+               else
+                       list_add_tail(&page->lru, list);
                set_page_private(page, migratetype);
                list = &page->lru;
        }
@@ -941,7 +986,7 @@ void drain_zone_pages(struct zone *zone, struct per_cpu_pages *pcp)
                to_drain = pcp->batch;
        else
                to_drain = pcp->count;
-       free_pages_bulk(zone, to_drain, &pcp->list, 0);
+       free_pcppages_bulk(zone, to_drain, pcp);
        pcp->count -= to_drain;
        local_irq_restore(flags);
 }
@@ -967,7 +1012,7 @@ static void drain_pages(unsigned int cpu)
 
                pcp = &pset->pcp;
                local_irq_save(flags);
-               free_pages_bulk(zone, pcp->count, &pcp->list, 0);
+               free_pcppages_bulk(zone, pcp->count, pcp);
                pcp->count = 0;
                local_irq_restore(flags);
        }
@@ -1033,7 +1078,10 @@ static void free_hot_cold_page(struct page *page, int cold)
        struct zone *zone = page_zone(page);
        struct per_cpu_pages *pcp;
        unsigned long flags;
-       int clearMlocked = PageMlocked(page);
+       int migratetype;
+       int wasMlocked = __TestClearPageMlocked(page);
+
+       kmemcheck_free_shadow(page, 0);
 
        if (PageAnon(page))
                page->mapping = NULL;
@@ -1048,35 +1096,49 @@ static void free_hot_cold_page(struct page *page, int cold)
        kernel_map_pages(page, 1, 0);
 
        pcp = &zone_pcp(zone, get_cpu())->pcp;
-       set_page_private(page, get_pageblock_migratetype(page));
+       migratetype = get_pageblock_migratetype(page);
+       set_page_private(page, migratetype);
        local_irq_save(flags);
-       if (unlikely(clearMlocked))
+       if (unlikely(wasMlocked))
                free_page_mlock(page);
        __count_vm_event(PGFREE);
 
+       /*
+        * We only track unmovable, reclaimable and movable on pcp lists.
+        * Free ISOLATE pages back to the allocator because they are being
+        * offlined but treat RESERVE as movable pages so we can get those
+        * areas back if necessary. Otherwise, we may have to free
+        * excessively into the page allocator
+        */
+       if (migratetype >= MIGRATE_PCPTYPES) {
+               if (unlikely(migratetype == MIGRATE_ISOLATE)) {
+                       free_one_page(zone, page, 0, migratetype);
+                       goto out;
+               }
+               migratetype = MIGRATE_MOVABLE;
+       }
+
        if (cold)
-               list_add_tail(&page->lru, &pcp->list);
+               list_add_tail(&page->lru, &pcp->lists[migratetype]);
        else
-               list_add(&page->lru, &pcp->list);
+               list_add(&page->lru, &pcp->lists[migratetype]);
        pcp->count++;
        if (pcp->count >= pcp->high) {
-               free_pages_bulk(zone, pcp->batch, &pcp->list, 0);
+               free_pcppages_bulk(zone, pcp->batch, pcp);
                pcp->count -= pcp->batch;
        }
+
+out:
        local_irq_restore(flags);
        put_cpu();
 }
 
 void free_hot_page(struct page *page)
 {
+       trace_mm_page_free_direct(page, 0);
        free_hot_cold_page(page, 0);
 }
        
-void free_cold_page(struct page *page)
-{
-       free_hot_cold_page(page, 1);
-}
-
 /*
  * split_page takes a non-compound higher-order page, and splits it into
  * n (1<<order) sub-pages: page[0..n]
@@ -1091,6 +1153,16 @@ void split_page(struct page *page, unsigned int order)
 
        VM_BUG_ON(PageCompound(page));
        VM_BUG_ON(!page_count(page));
+
+#ifdef CONFIG_KMEMCHECK
+       /*
+        * Split shadow pages too, because free(page[0]) would
+        * otherwise free the whole shadow.
+        */
+       if (kmemcheck_page_is_tracked(page))
+               split_page(virt_to_page(page[0].shadow), order);
+#endif
+
        for (i = 1; i < (1 << order); i++)
                set_page_refcounted(page + i);
 }
@@ -1114,43 +1186,46 @@ again:
        cpu  = get_cpu();
        if (likely(order == 0)) {
                struct per_cpu_pages *pcp;
+               struct list_head *list;
 
                pcp = &zone_pcp(zone, cpu)->pcp;
+               list = &pcp->lists[migratetype];
                local_irq_save(flags);
-               if (!pcp->count) {
-                       pcp->count = rmqueue_bulk(zone, 0,
-                                       pcp->batch, &pcp->list, migratetype);
-                       if (unlikely(!pcp->count))
+               if (list_empty(list)) {
+                       pcp->count += rmqueue_bulk(zone, 0,
+                                       pcp->batch, list,
+                                       migratetype, cold);
+                       if (unlikely(list_empty(list)))
                                goto failed;
                }
 
-               /* Find a page of the appropriate migrate type */
-               if (cold) {
-                       list_for_each_entry_reverse(page, &pcp->list, lru)
-                               if (page_private(page) == migratetype)
-                                       break;
-               } else {
-                       list_for_each_entry(page, &pcp->list, lru)
-                               if (page_private(page) == migratetype)
-                                       break;
-               }
-
-               /* Allocate more to the pcp list if necessary */
-               if (unlikely(&page->lru == &pcp->list)) {
-                       pcp->count += rmqueue_bulk(zone, 0,
-                                       pcp->batch, &pcp->list, migratetype);
-                       page = list_entry(pcp->list.next, struct page, lru);
-               }
+               if (cold)
+                       page = list_entry(list->prev, struct page, lru);
+               else
+                       page = list_entry(list->next, struct page, lru);
 
                list_del(&page->lru);
                pcp->count--;
        } else {
+               if (unlikely(gfp_flags & __GFP_NOFAIL)) {
+                       /*
+                        * __GFP_NOFAIL is not to be used in new code.
+                        *
+                        * All __GFP_NOFAIL callers should be fixed so that they
+                        * properly detect and handle allocation failures.
+                        *
+                        * We most definitely don't want callers attempting to
+                        * allocate greater than order-1 page units with
+                        * __GFP_NOFAIL.
+                        */
+                       WARN_ON_ONCE(order > 1);
+               }
                spin_lock_irqsave(&zone->lock, flags);
                page = __rmqueue(zone, order, migratetype);
-               __mod_zone_page_state(zone, NR_FREE_PAGES, -(1 << order));
                spin_unlock(&zone->lock);
                if (!page)
                        goto failed;
+               __mod_zone_page_state(zone, NR_FREE_PAGES, -(1 << order));
        }
 
        __count_zone_vm_events(PGALLOC, zone, 1 << order);
@@ -1464,15 +1539,33 @@ zonelist_scan:
                BUILD_BUG_ON(ALLOC_NO_WATERMARKS < NR_WMARK);
                if (!(alloc_flags & ALLOC_NO_WATERMARKS)) {
                        unsigned long mark;
+                       int ret;
+
                        mark = zone->watermark[alloc_flags & ALLOC_WMARK_MASK];
-                       if (!zone_watermark_ok(zone, order, mark,
-                                   classzone_idx, alloc_flags)) {
-                               if (!zone_reclaim_mode ||
-                                   !zone_reclaim(zone, gfp_mask, order))
+                       if (zone_watermark_ok(zone, order, mark,
+                                   classzone_idx, alloc_flags))
+                               goto try_this_zone;
+
+                       if (zone_reclaim_mode == 0)
+                               goto this_zone_full;
+
+                       ret = zone_reclaim(zone, gfp_mask, order);
+                       switch (ret) {
+                       case ZONE_RECLAIM_NOSCAN:
+                               /* did not scan */
+                               goto try_next_zone;
+                       case ZONE_RECLAIM_FULL:
+                               /* scanned but unreclaimable */
+                               goto this_zone_full;
+                       default:
+                               /* did we reclaim enough */
+                               if (!zone_watermark_ok(zone, order, mark,
+                                               classzone_idx, alloc_flags))
                                        goto this_zone_full;
                        }
                }
 
+try_this_zone:
                page = buffered_rmqueue(preferred_zone, zone, order,
                                                gfp_mask, migratetype);
                if (page)
@@ -1562,12 +1655,22 @@ __alloc_pages_may_oom(gfp_t gfp_mask, unsigned int order,
        if (page)
                goto out;
 
-       /* The OOM killer will not help higher order allocs */
-       if (order > PAGE_ALLOC_COSTLY_ORDER)
-               goto out;
-
+       if (!(gfp_mask & __GFP_NOFAIL)) {
+               /* The OOM killer will not help higher order allocs */
+               if (order > PAGE_ALLOC_COSTLY_ORDER)
+                       goto out;
+               /*
+                * GFP_THISNODE contains __GFP_NORETRY and we never hit this.
+                * Sanity check for bare calls of __GFP_THISNODE, not real OOM.
+                * The caller should handle page allocation failure by itself if
+                * it specifies __GFP_THISNODE.
+                * Note: Hugepage uses it but will hit PAGE_ALLOC_COSTLY_ORDER.
+                */
+               if (gfp_mask & __GFP_THISNODE)
+                       goto out;
+       }
        /* Exhausted what can be done so it's blamo time */
-       out_of_memory(zonelist, gfp_mask, order);
+       out_of_memory(zonelist, gfp_mask, order, nodemask);
 
 out:
        clear_zonelist_oom(zonelist, gfp_mask);
@@ -1589,10 +1692,6 @@ __alloc_pages_direct_reclaim(gfp_t gfp_mask, unsigned int order,
 
        /* We now go into synchronous reclaim */
        cpuset_memory_pressure_bump();
-
-       /*
-        * The task's cpuset might have expanded its set of allowable nodes
-        */
        p->flags |= PF_MEMALLOC;
        lockdep_set_current_reclaim_state(gfp_mask);
        reclaim_state.reclaimed_slab = 0;
@@ -1635,7 +1734,7 @@ __alloc_pages_high_priority(gfp_t gfp_mask, unsigned int order,
                        preferred_zone, migratetype);
 
                if (!page && gfp_mask & __GFP_NOFAIL)
-                       congestion_wait(WRITE, HZ/50);
+                       congestion_wait(BLK_RW_ASYNC, HZ/50);
        } while (!page && (gfp_mask & __GFP_NOFAIL));
 
        return page;
@@ -1677,7 +1776,7 @@ gfp_to_alloc_flags(gfp_t gfp_mask)
                 * See also cpuset_zone_allowed() comment in kernel/cpuset.c.
                 */
                alloc_flags &= ~ALLOC_CPUSET;
-       } else if (unlikely(rt_task(p)))
+       } else if (unlikely(rt_task(p)) && !in_interrupt())
                alloc_flags |= ALLOC_HARDER;
 
        if (likely(!(gfp_mask & __GFP_NOMEMALLOC))) {
@@ -1709,8 +1808,10 @@ __alloc_pages_slowpath(gfp_t gfp_mask, unsigned int order,
         * be using allocators in order of preference for an area that is
         * too large.
         */
-       if (WARN_ON_ONCE(order >= MAX_ORDER))
+       if (order >= MAX_ORDER) {
+               WARN_ON_ONCE(!(gfp_mask & __GFP_NOWARN));
                return NULL;
+       }
 
        /*
         * GFP_THISNODE (meaning __GFP_THISNODE, __GFP_NORETRY and
@@ -1723,6 +1824,7 @@ __alloc_pages_slowpath(gfp_t gfp_mask, unsigned int order,
        if (NUMA_BUILD && (gfp_mask & GFP_THISNODE) == GFP_THISNODE)
                goto nopage;
 
+restart:
        wake_all_kswapd(order, zonelist, high_zoneidx);
 
        /*
@@ -1732,7 +1834,6 @@ __alloc_pages_slowpath(gfp_t gfp_mask, unsigned int order,
         */
        alloc_flags = gfp_to_alloc_flags(gfp_mask);
 
-restart:
        /* This is the last chance, in general, before the goto nopage. */
        page = get_page_from_freelist(gfp_mask, nodemask, order, zonelist,
                        high_zoneidx, alloc_flags & ~ALLOC_NO_WATERMARKS,
@@ -1758,6 +1859,10 @@ rebalance:
        if (p->flags & PF_MEMALLOC)
                goto nopage;
 
+       /* Avoid allocations with no watermarks from looping endlessly */
+       if (test_thread_flag(TIF_MEMDIE) && !(gfp_mask & __GFP_NOFAIL))
+               goto nopage;
+
        /* Try direct reclaim and then allocating */
        page = __alloc_pages_direct_reclaim(gfp_mask, order,
                                        zonelist, high_zoneidx,
@@ -1773,6 +1878,8 @@ rebalance:
         */
        if (!did_some_progress) {
                if ((gfp_mask & __GFP_FS) && !(gfp_mask & __GFP_NORETRY)) {
+                       if (oom_killer_disabled)
+                               goto nopage;
                        page = __alloc_pages_may_oom(gfp_mask, order,
                                        zonelist, high_zoneidx,
                                        nodemask, preferred_zone,
@@ -1781,11 +1888,13 @@ rebalance:
                                goto got_pg;
 
                        /*
-                        * The OOM killer does not trigger for high-order allocations
-                        * but if no progress is being made, there are no other
-                        * options and retrying is unlikely to help
+                        * The OOM killer does not trigger for high-order
+                        * ~__GFP_NOFAIL allocations so if no progress is being
+                        * made, there are no other options and retrying is
+                        * unlikely to help.
                         */
-                       if (order > PAGE_ALLOC_COSTLY_ORDER)
+                       if (order > PAGE_ALLOC_COSTLY_ORDER &&
+                                               !(gfp_mask & __GFP_NOFAIL))
                                goto nopage;
 
                        goto restart;
@@ -1796,7 +1905,7 @@ rebalance:
        pages_reclaimed += did_some_progress;
        if (should_alloc_retry(gfp_mask, order, pages_reclaimed)) {
                /* Wait for some write requests to complete then retry */
-               congestion_wait(WRITE, HZ/50);
+               congestion_wait(BLK_RW_ASYNC, HZ/50);
                goto rebalance;
        }
 
@@ -1808,7 +1917,10 @@ nopage:
                dump_stack();
                show_mem();
        }
+       return page;
 got_pg:
+       if (kmemcheck_enabled)
+               kmemcheck_pagealloc_alloc(page, order, gfp_mask);
        return page;
 
 }
@@ -1825,6 +1937,8 @@ __alloc_pages_nodemask(gfp_t gfp_mask, unsigned int order,
        struct page *page;
        int migratetype = allocflags_to_migratetype(gfp_mask);
 
+       gfp_mask &= gfp_allowed_mask;
+
        lockdep_trace_alloc(gfp_mask);
 
        might_sleep_if(gfp_mask & __GFP_WAIT);
@@ -1854,6 +1968,7 @@ __alloc_pages_nodemask(gfp_t gfp_mask, unsigned int order,
                                zonelist, high_zoneidx, nodemask,
                                preferred_zone, migratetype);
 
+       trace_mm_page_alloc(page, order, gfp_mask, migratetype);
        return page;
 }
 EXPORT_SYMBOL(__alloc_pages_nodemask);
@@ -1863,44 +1978,41 @@ EXPORT_SYMBOL(__alloc_pages_nodemask);
  */
 unsigned long __get_free_pages(gfp_t gfp_mask, unsigned int order)
 {
-       struct page * page;
+       struct page *page;
+
+       /*
+        * __get_free_pages() returns a 32-bit address, which cannot represent
+        * a highmem page
+        */
+       VM_BUG_ON((gfp_mask & __GFP_HIGHMEM) != 0);
+
        page = alloc_pages(gfp_mask, order);
        if (!page)
                return 0;
        return (unsigned long) page_address(page);
 }
-
 EXPORT_SYMBOL(__get_free_pages);
 
 unsigned long get_zeroed_page(gfp_t gfp_mask)
 {
-       struct page * page;
-
-       /*
-        * get_zeroed_page() returns a 32-bit address, which cannot represent
-        * a highmem page
-        */
-       VM_BUG_ON((gfp_mask & __GFP_HIGHMEM) != 0);
-
-       page = alloc_pages(gfp_mask | __GFP_ZERO, 0);
-       if (page)
-               return (unsigned long) page_address(page);
-       return 0;
+       return __get_free_pages(gfp_mask | __GFP_ZERO, 0);
 }
-
 EXPORT_SYMBOL(get_zeroed_page);
 
 void __pagevec_free(struct pagevec *pvec)
 {
        int i = pagevec_count(pvec);
 
-       while (--i >= 0)
+       while (--i >= 0) {
+               trace_mm_pagevec_free(pvec->pages[i], pvec->cold);
                free_hot_cold_page(pvec->pages[i], pvec->cold);
+       }
 }
 
 void __free_pages(struct page *page, unsigned int order)
 {
        if (put_page_testzero(page)) {
+               trace_mm_page_free_direct(page, order);
                if (order == 0)
                        free_hot_page(page);
                else
@@ -1943,7 +2055,7 @@ void *alloc_pages_exact(size_t size, gfp_t gfp_mask)
                unsigned long alloc_end = addr + (PAGE_SIZE << order);
                unsigned long used = addr + PAGE_ALIGN(size);
 
-               split_page(virt_to_page(addr), order);
+               split_page(virt_to_page((void *)addr), order);
                while (used < alloc_end) {
                        free_page(used);
                        used += PAGE_SIZE;
@@ -2075,28 +2187,27 @@ void show_free_areas(void)
                }
        }
 
-       printk("Active_anon:%lu active_file:%lu inactive_anon:%lu\n"
-               " inactive_file:%lu"
-//TODO:  check/adjust line lengths
-#ifdef CONFIG_UNEVICTABLE_LRU
+       printk("active_anon:%lu inactive_anon:%lu isolated_anon:%lu\n"
+               " active_file:%lu inactive_file:%lu isolated_file:%lu\n"
                " unevictable:%lu"
-#endif
                " dirty:%lu writeback:%lu unstable:%lu\n"
-               " free:%lu slab:%lu mapped:%lu pagetables:%lu bounce:%lu\n",
+               " free:%lu slab_reclaimable:%lu slab_unreclaimable:%lu\n"
+               " mapped:%lu shmem:%lu pagetables:%lu bounce:%lu\n",
                global_page_state(NR_ACTIVE_ANON),
-               global_page_state(NR_ACTIVE_FILE),
                global_page_state(NR_INACTIVE_ANON),
+               global_page_state(NR_ISOLATED_ANON),
+               global_page_state(NR_ACTIVE_FILE),
                global_page_state(NR_INACTIVE_FILE),
-#ifdef CONFIG_UNEVICTABLE_LRU
+               global_page_state(NR_ISOLATED_FILE),
                global_page_state(NR_UNEVICTABLE),
-#endif
                global_page_state(NR_FILE_DIRTY),
                global_page_state(NR_WRITEBACK),
                global_page_state(NR_UNSTABLE_NFS),
                global_page_state(NR_FREE_PAGES),
-               global_page_state(NR_SLAB_RECLAIMABLE) +
-                       global_page_state(NR_SLAB_UNRECLAIMABLE),
+               global_page_state(NR_SLAB_RECLAIMABLE),
+               global_page_state(NR_SLAB_UNRECLAIMABLE),
                global_page_state(NR_FILE_MAPPED),
+               global_page_state(NR_SHMEM),
                global_page_state(NR_PAGETABLE),
                global_page_state(NR_BOUNCE));
 
@@ -2113,10 +2224,22 @@ void show_free_areas(void)
                        " inactive_anon:%lukB"
                        " active_file:%lukB"
                        " inactive_file:%lukB"
-#ifdef CONFIG_UNEVICTABLE_LRU
                        " unevictable:%lukB"
-#endif
+                       " isolated(anon):%lukB"
+                       " isolated(file):%lukB"
                        " present:%lukB"
+                       " mlocked:%lukB"
+                       " dirty:%lukB"
+                       " writeback:%lukB"
+                       " mapped:%lukB"
+                       " shmem:%lukB"
+                       " slab_reclaimable:%lukB"
+                       " slab_unreclaimable:%lukB"
+                       " kernel_stack:%lukB"
+                       " pagetables:%lukB"
+                       " unstable:%lukB"
+                       " bounce:%lukB"
+                       " writeback_tmp:%lukB"
                        " pages_scanned:%lu"
                        " all_unreclaimable? %s"
                        "\n",
@@ -2129,10 +2252,23 @@ void show_free_areas(void)
                        K(zone_page_state(zone, NR_INACTIVE_ANON)),
                        K(zone_page_state(zone, NR_ACTIVE_FILE)),
                        K(zone_page_state(zone, NR_INACTIVE_FILE)),
-#ifdef CONFIG_UNEVICTABLE_LRU
                        K(zone_page_state(zone, NR_UNEVICTABLE)),
-#endif
+                       K(zone_page_state(zone, NR_ISOLATED_ANON)),
+                       K(zone_page_state(zone, NR_ISOLATED_FILE)),
                        K(zone->present_pages),
+                       K(zone_page_state(zone, NR_MLOCK)),
+                       K(zone_page_state(zone, NR_FILE_DIRTY)),
+                       K(zone_page_state(zone, NR_WRITEBACK)),
+                       K(zone_page_state(zone, NR_FILE_MAPPED)),
+                       K(zone_page_state(zone, NR_SHMEM)),
+                       K(zone_page_state(zone, NR_SLAB_RECLAIMABLE)),
+                       K(zone_page_state(zone, NR_SLAB_UNRECLAIMABLE)),
+                       zone_page_state(zone, NR_KERNEL_STACK) *
+                               THREAD_SIZE / 1024,
+                       K(zone_page_state(zone, NR_PAGETABLE)),
+                       K(zone_page_state(zone, NR_UNSTABLE_NFS)),
+                       K(zone_page_state(zone, NR_BOUNCE)),
+                       K(zone_page_state(zone, NR_WRITEBACK_TEMP)),
                        zone->pages_scanned,
                        (zone_is_all_unreclaimable(zone) ? "yes" : "no")
                        );
@@ -2261,18 +2397,19 @@ early_param("numa_zonelist_order", setup_numa_zonelist_order);
  * sysctl handler for numa_zonelist_order
  */
 int numa_zonelist_order_handler(ctl_table *table, int write,
-               struct file *file, void __user *buffer, size_t *length,
+               void __user *buffer, size_t *length,
                loff_t *ppos)
 {
        char saved_string[NUMA_ZONELIST_ORDER_LEN];
        int ret;
+       static DEFINE_MUTEX(zl_order_mutex);
 
+       mutex_lock(&zl_order_mutex);
        if (write)
-               strncpy(saved_string, (char*)table->data,
-                       NUMA_ZONELIST_ORDER_LEN);
-       ret = proc_dostring(table, write, file, buffer, length, ppos);
+               strcpy(saved_string, (char*)table->data);
+       ret = proc_dostring(table, write, buffer, length, ppos);
        if (ret)
-               return ret;
+               goto out;
        if (write) {
                int oldval = user_zonelist_order;
                if (__parse_numa_zonelist_order((char*)table->data)) {
@@ -2285,7 +2422,9 @@ int numa_zonelist_order_handler(ctl_table *table, int write,
                } else if (oldval != user_zonelist_order)
                        build_all_zonelists();
        }
-       return 0;
+out:
+       mutex_unlock(&zl_order_mutex);
+       return ret;
 }
 
 
@@ -2502,7 +2641,6 @@ static void build_zonelists(pg_data_t *pgdat)
        prev_node = local_node;
        nodes_clear(used_mask);
 
-       memset(node_load, 0, sizeof(node_load));
        memset(node_order, 0, sizeof(node_order));
        j = 0;
 
@@ -2611,6 +2749,9 @@ static int __build_all_zonelists(void *dummy)
 {
        int nid;
 
+#ifdef CONFIG_NUMA
+       memset(node_load, 0, sizeof(node_load));
+#endif
        for_each_online_node(nid) {
                pg_data_t *pgdat = NODE_DATA(nid);
 
@@ -2737,7 +2878,8 @@ static void setup_zone_migrate_reserve(struct zone *zone)
 {
        unsigned long start_pfn, pfn, end_pfn;
        struct page *page;
-       unsigned long reserve, block_migratetype;
+       unsigned long block_migratetype;
+       int reserve;
 
        /* Get the start pfn, end pfn and the number of blocks to reserve */
        start_pfn = zone->zone_start_pfn;
@@ -2745,6 +2887,15 @@ static void setup_zone_migrate_reserve(struct zone *zone)
        reserve = roundup(min_wmark_pages(zone), pageblock_nr_pages) >>
                                                        pageblock_order;
 
+       /*
+        * Reserve blocks are generally in place to help high-order atomic
+        * allocations that are short-lived. A min_free_kbytes value that
+        * would result in more than 2 reserve blocks for atomic allocations
+        * is assumed to be in place to help anti-fragmentation for the
+        * future allocation of hugepages at runtime.
+        */
+       reserve = min(2, reserve);
+
        for (pfn = start_pfn; pfn < end_pfn; pfn += pageblock_nr_pages) {
                if (!pfn_valid(pfn))
                        continue;
@@ -2915,6 +3066,7 @@ static int zone_batchsize(struct zone *zone)
 static void setup_pageset(struct per_cpu_pageset *p, unsigned long batch)
 {
        struct per_cpu_pages *pcp;
+       int migratetype;
 
        memset(p, 0, sizeof(*p));
 
@@ -2922,7 +3074,8 @@ static void setup_pageset(struct per_cpu_pageset *p, unsigned long batch)
        pcp->count = 0;
        pcp->high = 6 * batch;
        pcp->batch = max(1UL, 1 * batch);
-       INIT_LIST_HEAD(&pcp->list);
+       for (migratetype = 0; migratetype < MIGRATE_PCPTYPES; migratetype++)
+               INIT_LIST_HEAD(&pcp->lists[migratetype]);
 }
 
 /*
@@ -2984,7 +3137,7 @@ static int __cpuinit process_zones(int cpu)
 
                if (percpu_pagelist_fraction)
                        setup_pagelist_highmark(zone_pcp(zone, cpu),
-                               (zone->present_pages / percpu_pagelist_fraction));
+                           (zone->present_pages / percpu_pagelist_fraction));
        }
 
        return 0;
@@ -2995,7 +3148,7 @@ bad:
                if (dzone == zone)
                        break;
                kfree(zone_pcp(dzone, cpu));
-               zone_pcp(dzone, cpu) = NULL;
+               zone_pcp(dzone, cpu) = &boot_pageset[cpu];
        }
        return -ENOMEM;
 }
@@ -3010,7 +3163,7 @@ static inline void free_zone_pagesets(int cpu)
                /* Free per_cpu_pageset if it is slab allocated */
                if (pset != &boot_pageset[cpu])
                        kfree(pset);
-               zone_pcp(zone, cpu) = NULL;
+               zone_pcp(zone, cpu) = &boot_pageset[cpu];
        }
 }
 
@@ -3100,6 +3253,32 @@ int zone_wait_table_init(struct zone *zone, unsigned long zone_size_pages)
        return 0;
 }
 
+static int __zone_pcp_update(void *data)
+{
+       struct zone *zone = data;
+       int cpu;
+       unsigned long batch = zone_batchsize(zone), flags;
+
+       for (cpu = 0; cpu < NR_CPUS; cpu++) {
+               struct per_cpu_pageset *pset;
+               struct per_cpu_pages *pcp;
+
+               pset = zone_pcp(zone, cpu);
+               pcp = &pset->pcp;
+
+               local_irq_save(flags);
+               free_pcppages_bulk(zone, pcp->count, pcp);
+               setup_pageset(pset, batch);
+               local_irq_restore(flags);
+       }
+       return 0;
+}
+
+void zone_pcp_update(struct zone *zone)
+{
+       stop_machine(__zone_pcp_update, zone, NULL);
+}
+
 static __meminit void zone_pcp_init(struct zone *zone)
 {
        int cpu;
@@ -3404,7 +3583,7 @@ static unsigned long __meminit zone_spanned_pages_in_node(int nid,
  * Return the number of holes in a range on a node. If nid is MAX_NUMNODES,
  * then all holes in the requested range will be accounted for.
  */
-static unsigned long __meminit __absent_pages_in_range(int nid,
+unsigned long __meminit __absent_pages_in_range(int nid,
                                unsigned long range_start_pfn,
                                unsigned long range_end_pfn)
 {
@@ -3674,7 +3853,7 @@ static void __paginginit free_area_init_core(struct pglist_data *pgdat,
                zone_pcp_init(zone);
                for_each_lru(l) {
                        INIT_LIST_HEAD(&zone->lru[l].list);
-                       zone->lru[l].nr_scan = 0;
+                       zone->reclaim_stat.nr_saved_scan[l] = 0;
                }
                zone->reclaim_stat.recent_rotated[0] = 0;
                zone->reclaim_stat.recent_rotated[1] = 0;
@@ -3819,7 +3998,7 @@ void __init add_active_range(unsigned int nid, unsigned long start_pfn,
                }
 
                /* Merge backward if suitable */
-               if (start_pfn < early_node_map[i].end_pfn &&
+               if (start_pfn < early_node_map[i].start_pfn &&
                                end_pfn >= early_node_map[i].start_pfn) {
                        early_node_map[i].start_pfn = start_pfn;
                        return;
@@ -3933,7 +4112,7 @@ static int __init cmp_node_active_region(const void *a, const void *b)
 }
 
 /* sort the node_map by start_pfn */
-static void __init sort_node_map(void)
+void __init sort_node_map(void)
 {
        sort(early_node_map, (size_t)nr_nodemap_entries,
                        sizeof(struct node_active_region),
@@ -4001,6 +4180,8 @@ static void __init find_zone_movable_pfns_for_nodes(unsigned long *movable_pfn)
        int i, nid;
        unsigned long usable_startpfn;
        unsigned long kernelcore_node, kernelcore_remaining;
+       /* save the state before borrow the nodemask */
+       nodemask_t saved_node_state = node_states[N_HIGH_MEMORY];
        unsigned long totalpages = early_calculate_totalpages();
        int usable_nodes = nodes_weight(node_states[N_HIGH_MEMORY]);
 
@@ -4028,7 +4209,7 @@ static void __init find_zone_movable_pfns_for_nodes(unsigned long *movable_pfn)
 
        /* If kernelcore was not specified, there is no ZONE_MOVABLE */
        if (!required_kernelcore)
-               return;
+               goto out;
 
        /* usable_startpfn is the lowest possible pfn ZONE_MOVABLE can be at */
        find_usable_zone_for_movable();
@@ -4127,6 +4308,10 @@ restart:
        for (nid = 0; nid < MAX_NUMNODES; nid++)
                zone_movable_pfn[nid] =
                        roundup(zone_movable_pfn[nid], MAX_ORDER_NR_PAGES);
+
+out:
+       /* restore the node_state */
+       node_states[N_HIGH_MEMORY] = saved_node_state;
 }
 
 /* Any regular memory on that node ? */
@@ -4396,12 +4581,13 @@ static void setup_per_zone_lowmem_reserve(void)
 }
 
 /**
- * setup_per_zone_pages_min - called when min_free_kbytes changes.
+ * setup_per_zone_wmarks - called when min_free_kbytes changes
+ * or when memory is hot-{added|removed}
  *
- * Ensures that the pages_{min,low,high} values for each zone are set correctly
- * with respect to min_free_kbytes.
+ * Ensures that the watermark[min,low,high] values for each zone are set
+ * correctly with respect to min_free_kbytes.
  */
-void setup_per_zone_pages_min(void)
+void setup_per_zone_wmarks(void)
 {
        unsigned long pages_min = min_free_kbytes >> (PAGE_SHIFT - 10);
        unsigned long lowmem_pages = 0;
@@ -4456,9 +4642,7 @@ void setup_per_zone_pages_min(void)
        calculate_totalreserve_pages();
 }
 
-/**
- * setup_per_zone_inactive_ratio - called when min_free_kbytes changes.
- *
+/*
  * The inactive anon list should be small enough that the VM never has to
  * do too much work, but large enough that each inactive page has a chance
  * to be referenced again before it is swapped out.
@@ -4479,21 +4663,26 @@ void setup_per_zone_pages_min(void)
  *    1TB     101        10GB
  *   10TB     320        32GB
  */
-static void setup_per_zone_inactive_ratio(void)
+void calculate_zone_inactive_ratio(struct zone *zone)
 {
-       struct zone *zone;
+       unsigned int gb, ratio;
 
-       for_each_zone(zone) {
-               unsigned int gb, ratio;
-
-               /* Zone size in gigabytes */
-               gb = zone->present_pages >> (30 - PAGE_SHIFT);
+       /* Zone size in gigabytes */
+       gb = zone->present_pages >> (30 - PAGE_SHIFT);
+       if (gb)
                ratio = int_sqrt(10 * gb);
-               if (!ratio)
-                       ratio = 1;
+       else
+               ratio = 1;
 
-               zone->inactive_ratio = ratio;
-       }
+       zone->inactive_ratio = ratio;
+}
+
+static void __init setup_per_zone_inactive_ratio(void)
+{
+       struct zone *zone;
+
+       for_each_zone(zone)
+               calculate_zone_inactive_ratio(zone);
 }
 
 /*
@@ -4520,7 +4709,7 @@ static void setup_per_zone_inactive_ratio(void)
  * 8192MB:     11584k
  * 16384MB:    16384k
  */
-static int __init init_per_zone_pages_min(void)
+static int __init init_per_zone_wmark_min(void)
 {
        unsigned long lowmem_kbytes;
 
@@ -4531,12 +4720,12 @@ static int __init init_per_zone_pages_min(void)
                min_free_kbytes = 128;
        if (min_free_kbytes > 65536)
                min_free_kbytes = 65536;
-       setup_per_zone_pages_min();
+       setup_per_zone_wmarks();
        setup_per_zone_lowmem_reserve();
        setup_per_zone_inactive_ratio();
        return 0;
 }
-module_init(init_per_zone_pages_min)
+module_init(init_per_zone_wmark_min)
 
 /*
  * min_free_kbytes_sysctl_handler - just a wrapper around proc_dointvec() so 
@@ -4544,22 +4733,22 @@ module_init(init_per_zone_pages_min)
  *     changes.
  */
 int min_free_kbytes_sysctl_handler(ctl_table *table, int write, 
-       struct file *file, void __user *buffer, size_t *length, loff_t *ppos)
+       void __user *buffer, size_t *length, loff_t *ppos)
 {
-       proc_dointvec(table, write, file, buffer, length, ppos);
+       proc_dointvec(table, write, buffer, length, ppos);
        if (write)
-               setup_per_zone_pages_min();
+               setup_per_zone_wmarks();
        return 0;
 }
 
 #ifdef CONFIG_NUMA
 int sysctl_min_unmapped_ratio_sysctl_handler(ctl_table *table, int write,
-       struct file *file, void __user *buffer, size_t *length, loff_t *ppos)
+       void __user *buffer, size_t *length, loff_t *ppos)
 {
        struct zone *zone;
        int rc;
 
-       rc = proc_dointvec_minmax(table, write, file, buffer, length, ppos);
+       rc = proc_dointvec_minmax(table, write, buffer, length, ppos);
        if (rc)
                return rc;
 
@@ -4570,12 +4759,12 @@ int sysctl_min_unmapped_ratio_sysctl_handler(ctl_table *table, int write,
 }
 
 int sysctl_min_slab_ratio_sysctl_handler(ctl_table *table, int write,
-       struct file *file, void __user *buffer, size_t *length, loff_t *ppos)
+       void __user *buffer, size_t *length, loff_t *ppos)
 {
        struct zone *zone;
        int rc;
 
-       rc = proc_dointvec_minmax(table, write, file, buffer, length, ppos);
+       rc = proc_dointvec_minmax(table, write, buffer, length, ppos);
        if (rc)
                return rc;
 
@@ -4596,9 +4785,9 @@ int sysctl_min_slab_ratio_sysctl_handler(ctl_table *table, int write,
  * if in function of the boot time zone sizes.
  */
 int lowmem_reserve_ratio_sysctl_handler(ctl_table *table, int write,
-       struct file *file, void __user *buffer, size_t *length, loff_t *ppos)
+       void __user *buffer, size_t *length, loff_t *ppos)
 {
-       proc_dointvec_minmax(table, write, file, buffer, length, ppos);
+       proc_dointvec_minmax(table, write, buffer, length, ppos);
        setup_per_zone_lowmem_reserve();
        return 0;
 }
@@ -4610,16 +4799,16 @@ int lowmem_reserve_ratio_sysctl_handler(ctl_table *table, int write,
  */
 
 int percpu_pagelist_fraction_sysctl_handler(ctl_table *table, int write,
-       struct file *file, void __user *buffer, size_t *length, loff_t *ppos)
+       void __user *buffer, size_t *length, loff_t *ppos)
 {
        struct zone *zone;
        unsigned int cpu;
        int ret;
 
-       ret = proc_dointvec_minmax(table, write, file, buffer, length, ppos);
+       ret = proc_dointvec_minmax(table, write, buffer, length, ppos);
        if (!write || (ret == -EINVAL))
                return ret;
-       for_each_zone(zone) {
+       for_each_populated_zone(zone) {
                for_each_online_cpu(cpu) {
                        unsigned long  high;
                        high = zone->present_pages / percpu_pagelist_fraction;
@@ -4676,7 +4865,14 @@ void *__init alloc_large_system_hash(const char *tablename,
                        numentries <<= (PAGE_SHIFT - scale);
 
                /* Make sure we've got at least a 0-order allocation.. */
-               if (unlikely((numentries * bucketsize) < PAGE_SIZE))
+               if (unlikely(flags & HASH_SMALL)) {
+                       /* Makes no sense without HASH_EARLY */
+                       WARN_ON(!(flags & HASH_EARLY));
+                       if (!(numentries >> *_hash_shift)) {
+                               numentries = 1UL << *_hash_shift;
+                               BUG_ON(!numentries);
+                       }
+               } else if (unlikely((numentries * bucketsize) < PAGE_SIZE))
                        numentries = PAGE_SIZE / bucketsize;
        }
        numentries = roundup_pow_of_two(numentries);
@@ -4699,25 +4895,14 @@ void *__init alloc_large_system_hash(const char *tablename,
                else if (hashdist)
                        table = __vmalloc(size, GFP_ATOMIC, PAGE_KERNEL);
                else {
-                       unsigned long order = get_order(size);
-
-                       if (order < MAX_ORDER)
-                               table = (void *)__get_free_pages(GFP_ATOMIC,
-                                                               order);
                        /*
                         * If bucketsize is not a power-of-two, we may free
-                        * some pages at the end of hash table.
+                        * some pages at the end of hash table which
+                        * alloc_pages_exact() automatically does
                         */
-                       if (table) {
-                               unsigned long alloc_end = (unsigned long)table +
-                                               (PAGE_SIZE << order);
-                               unsigned long used = (unsigned long)table +
-                                               PAGE_ALIGN(size);
-                               split_page(virt_to_page(table), order);
-                               while (used < alloc_end) {
-                                       free_page(used);
-                                       used += PAGE_SIZE;
-                               }
+                       if (get_order(size) < MAX_ORDER) {
+                               table = alloc_pages_exact(size, GFP_ATOMIC);
+                               kmemleak_alloc(table, size, 1, GFP_ATOMIC);
                        }
                }
        } while (!table && size > PAGE_SIZE && --log2qty);
@@ -4736,16 +4921,6 @@ void *__init alloc_large_system_hash(const char *tablename,
        if (_hash_mask)
                *_hash_mask = (1 << log2qty) - 1;
 
-       /*
-        * If hashdist is set, the table allocation is done with __vmalloc()
-        * which invokes the kmemleak_alloc() callback. This function may also
-        * be called before the slab and kmemleak are initialised when
-        * kmemleak simply buffers the request to be executed later
-        * (GFP_ATOMIC flag ignored in this case).
-        */
-       if (!hashdist)
-               kmemleak_alloc(table, size, 1, GFP_ATOMIC);
-
        return table;
 }
 
@@ -4837,20 +5012,65 @@ void set_pageblock_flags_group(struct page *page, unsigned long flags,
 int set_migratetype_isolate(struct page *page)
 {
        struct zone *zone;
-       unsigned long flags;
+       struct page *curr_page;
+       unsigned long flags, pfn, iter;
+       unsigned long immobile = 0;
+       struct memory_isolate_notify arg;
+       int notifier_ret;
        int ret = -EBUSY;
+       int zone_idx;
 
        zone = page_zone(page);
+       zone_idx = zone_idx(zone);
+
        spin_lock_irqsave(&zone->lock, flags);
+       if (get_pageblock_migratetype(page) == MIGRATE_MOVABLE ||
+           zone_idx == ZONE_MOVABLE) {
+               ret = 0;
+               goto out;
+       }
+
+       pfn = page_to_pfn(page);
+       arg.start_pfn = pfn;
+       arg.nr_pages = pageblock_nr_pages;
+       arg.pages_found = 0;
+
        /*
-        * In future, more migrate types will be able to be isolation target.
+        * It may be possible to isolate a pageblock even if the
+        * migratetype is not MIGRATE_MOVABLE. The memory isolation
+        * notifier chain is used by balloon drivers to return the
+        * number of pages in a range that are held by the balloon
+        * driver to shrink memory. If all the pages are accounted for
+        * by balloons, are free, or on the LRU, isolation can continue.
+        * Later, for example, when memory hotplug notifier runs, these
+        * pages reported as "can be isolated" should be isolated(freed)
+        * by the balloon driver through the memory notifier chain.
         */
-       if (get_pageblock_migratetype(page) != MIGRATE_MOVABLE)
+       notifier_ret = memory_isolate_notify(MEM_ISOLATE_COUNT, &arg);
+       notifier_ret = notifier_to_errno(notifier_ret);
+       if (notifier_ret || !arg.pages_found)
                goto out;
-       set_pageblock_migratetype(page, MIGRATE_ISOLATE);
-       move_freepages_block(zone, page, MIGRATE_ISOLATE);
-       ret = 0;
+
+       for (iter = pfn; iter < (pfn + pageblock_nr_pages); iter++) {
+               if (!pfn_valid_within(pfn))
+                       continue;
+
+               curr_page = pfn_to_page(iter);
+               if (!page_count(curr_page) || PageLRU(curr_page))
+                       continue;
+
+               immobile++;
+       }
+
+       if (arg.pages_found == immobile)
+               ret = 0;
+
 out:
+       if (!ret) {
+               set_pageblock_migratetype(page, MIGRATE_ISOLATE);
+               move_freepages_block(zone, page, MIGRATE_ISOLATE);
+       }
+
        spin_unlock_irqrestore(&zone->lock, flags);
        if (!ret)
                drain_all_pages();
@@ -4917,3 +5137,24 @@ __offline_isolated_pages(unsigned long start_pfn, unsigned long end_pfn)
        spin_unlock_irqrestore(&zone->lock, flags);
 }
 #endif
+
+#ifdef CONFIG_MEMORY_FAILURE
+bool is_free_buddy_page(struct page *page)
+{
+       struct zone *zone = page_zone(page);
+       unsigned long pfn = page_to_pfn(page);
+       unsigned long flags;
+       int order;
+
+       spin_lock_irqsave(&zone->lock, flags);
+       for (order = 0; order < MAX_ORDER; order++) {
+               struct page *page_head = page - (pfn & ((1 << order) - 1));
+
+               if (PageBuddy(page_head) && page_order(page_head) >= order)
+                       break;
+       }
+       spin_unlock_irqrestore(&zone->lock, flags);
+
+       return order < MAX_ORDER;
+}
+#endif