mempolicy: don't call mpol_set_nodemask() when no_context
[safe/jmp/linux-2.6] / mm / vmscan.c
index 8765688..3ff3311 100644 (file)
@@ -13,7 +13,7 @@
 
 #include <linux/mm.h>
 #include <linux/module.h>
-#include <linux/slab.h>
+#include <linux/gfp.h>
 #include <linux/kernel_stat.h>
 #include <linux/swap.h>
 #include <linux/pagemap.h>
@@ -37,6 +37,9 @@
 #include <linux/delay.h>
 #include <linux/kthread.h>
 #include <linux/freezer.h>
+#include <linux/memcontrol.h>
+#include <linux/delayacct.h>
+#include <linux/sysctl.h>
 
 #include <asm/tlbflush.h>
 #include <asm/div64.h>
@@ -49,25 +52,45 @@ struct scan_control {
        /* Incremented by the number of inactive pages that were scanned */
        unsigned long nr_scanned;
 
+       /* Number of pages freed so far during a call to shrink_zones() */
+       unsigned long nr_reclaimed;
+
+       /* How many pages shrink_list() should reclaim */
+       unsigned long nr_to_reclaim;
+
+       unsigned long hibernation_mode;
+
        /* This context's GFP mask */
        gfp_t gfp_mask;
 
        int may_writepage;
 
+       /* Can mapped pages be reclaimed? */
+       int may_unmap;
+
        /* Can pages be swapped as part of reclaim? */
        int may_swap;
 
-       /* This context's SWAP_CLUSTER_MAX. If freeing memory for
-        * suspend, we effectively ignore SWAP_CLUSTER_MAX.
-        * In this context, it doesn't matter that we scan the
-        * whole list at once. */
-       int swap_cluster_max;
-
        int swappiness;
 
        int all_unreclaimable;
 
        int order;
+
+       /* Which cgroup do we reclaim from */
+       struct mem_cgroup *mem_cgroup;
+
+       /*
+        * Nodemask of nodes allowed by the caller. If NULL, all nodes
+        * are scanned.
+        */
+       nodemask_t      *nodemask;
+
+       /* Pluggable isolate pages callback */
+       unsigned long (*isolate_pages)(unsigned long nr, struct list_head *dst,
+                       unsigned long *scanned, int order, int mode,
+                       struct zone *z, struct mem_cgroup *mem_cont,
+                       int active, int file);
 };
 
 #define lru_to_page(_head) (list_entry((_head)->prev, struct page, lru))
@@ -109,6 +132,31 @@ long vm_total_pages;       /* The total number of pages which the VM controls */
 static LIST_HEAD(shrinker_list);
 static DECLARE_RWSEM(shrinker_rwsem);
 
+#ifdef CONFIG_CGROUP_MEM_RES_CTLR
+#define scanning_global_lru(sc)        (!(sc)->mem_cgroup)
+#else
+#define scanning_global_lru(sc)        (1)
+#endif
+
+static struct zone_reclaim_stat *get_reclaim_stat(struct zone *zone,
+                                                 struct scan_control *sc)
+{
+       if (!scanning_global_lru(sc))
+               return mem_cgroup_get_reclaim_stat(sc->mem_cgroup, zone);
+
+       return &zone->reclaim_stat;
+}
+
+static unsigned long zone_nr_lru_pages(struct zone *zone,
+                               struct scan_control *sc, enum lru_list lru)
+{
+       if (!scanning_global_lru(sc))
+               return mem_cgroup_zone_nr_pages(sc->mem_cgroup, zone, lru);
+
+       return zone_page_state(zone, NR_LRU_BASE + lru);
+}
+
+
 /*
  * Add a shrinker callback to be called from the vm
  */
@@ -141,7 +189,7 @@ EXPORT_SYMBOL(unregister_shrinker);
  * percentages of the lru and ageable caches.  This should balance the seeks
  * generated by these structures.
  *
- * If the vm encounted mapped pages on the LRU it increase the pressure on
+ * If the vm encountered mapped pages on the LRU it increase the pressure on
  * slab to avoid swapping.
  *
  * We do weird things to avoid (scanned*seeks*entries) overflowing 32 bits.
@@ -174,8 +222,9 @@ unsigned long shrink_slab(unsigned long scanned, gfp_t gfp_mask,
                do_div(delta, lru_pages + 1);
                shrinker->nr += delta;
                if (shrinker->nr < 0) {
-                       printk(KERN_ERR "%s: nr=%ld\n",
-                                       __FUNCTION__, shrinker->nr);
+                       printk(KERN_ERR "shrink_slab: %pF negative objects to "
+                              "delete nr=%ld\n",
+                              shrinker->shrink, shrinker->nr);
                        shrinker->nr = max_pass;
                }
 
@@ -213,30 +262,14 @@ unsigned long shrink_slab(unsigned long scanned, gfp_t gfp_mask,
        return ret;
 }
 
-/* Called without lock on whether page is mapped, so answer is unstable */
-static inline int page_mapping_inuse(struct page *page)
-{
-       struct address_space *mapping;
-
-       /* Page is in somebody's page tables. */
-       if (page_mapped(page))
-               return 1;
-
-       /* Be more reluctant to reclaim swapcache than pagecache */
-       if (PageSwapCache(page))
-               return 1;
-
-       mapping = page_mapping(page);
-       if (!mapping)
-               return 0;
-
-       /* File is mmap'd by somebody? */
-       return mapping_mapped(mapping);
-}
-
 static inline int is_page_cache_freeable(struct page *page)
 {
-       return page_count(page) - !!PagePrivate(page) == 2;
+       /*
+        * A freeable page cache page is referenced only by the caller
+        * that isolated the page, the page cache radix tree and
+        * optional buffer heads at page->private.
+        */
+       return page_count(page) - page_has_private(page) == 2;
 }
 
 static int may_write_to_queue(struct backing_dev_info *bdi)
@@ -303,7 +336,7 @@ static pageout_t pageout(struct page *page, struct address_space *mapping,
         * stalls if we need to run get_block().  We could test
         * PagePrivate for that.
         *
-        * If this process is currently in generic_file_write() against
+        * If this process is currently in __generic_file_aio_write() against
         * this page's queue, we can perform writeback even if that
         * will block.
         *
@@ -311,7 +344,6 @@ static pageout_t pageout(struct page *page, struct address_space *mapping,
         * block, for some throttling. This happens by accident, because
         * swap_backing_dev_info is bust: it doesn't reflect the
         * congestion state of the swapdevs.  Easy to fix, if needed.
-        * See swapfile.c:page_queue_congested().
         */
        if (!is_page_cache_freeable(page))
                return PAGE_KEEP;
@@ -320,10 +352,10 @@ static pageout_t pageout(struct page *page, struct address_space *mapping,
                 * Some data journaling orphaned pages can have
                 * page->mapping == NULL while being dirty with clean buffers.
                 */
-               if (PagePrivate(page)) {
+               if (page_has_private(page)) {
                        if (try_to_free_buffers(page)) {
                                ClearPageDirty(page);
-                               printk("%s: orphaned page\n", __FUNCTION__);
+                               printk("%s: orphaned page\n", __func__);
                                return PAGE_CLEAN;
                        }
                }
@@ -374,17 +406,15 @@ static pageout_t pageout(struct page *page, struct address_space *mapping,
 }
 
 /*
- * Attempt to detach a locked page from its ->mapping.  If it is dirty or if
- * someone else has a ref on the page, abort and return 0.  If it was
- * successfully detached, return 1.  Assumes the caller has a single ref on
- * this page.
+ * Same as remove_mapping, but if the page is removed from the mapping, it
+ * gets returned with a refcount of 0.
  */
-int remove_mapping(struct address_space *mapping, struct page *page)
+static int __remove_mapping(struct address_space *mapping, struct page *page)
 {
        BUG_ON(!PageLocked(page));
        BUG_ON(mapping != page_mapping(page));
 
-       write_lock_irq(&mapping->tree_lock);
+       spin_lock_irq(&mapping->tree_lock);
        /*
         * The non racy check for a busy page.
         *
@@ -410,31 +440,183 @@ int remove_mapping(struct address_space *mapping, struct page *page)
         * Note that if SetPageDirty is always performed via set_page_dirty,
         * and thus under tree_lock, then this ordering is not required.
         */
-       if (unlikely(page_count(page) != 2))
+       if (!page_freeze_refs(page, 2))
                goto cannot_free;
-       smp_rmb();
-       if (unlikely(PageDirty(page)))
+       /* note: atomic_cmpxchg in page_freeze_refs provides the smp_rmb */
+       if (unlikely(PageDirty(page))) {
+               page_unfreeze_refs(page, 2);
                goto cannot_free;
+       }
 
        if (PageSwapCache(page)) {
                swp_entry_t swap = { .val = page_private(page) };
                __delete_from_swap_cache(page);
-               write_unlock_irq(&mapping->tree_lock);
-               swap_free(swap);
-               __put_page(page);       /* The pagecache ref */
-               return 1;
+               spin_unlock_irq(&mapping->tree_lock);
+               swapcache_free(swap, page);
+       } else {
+               __remove_from_page_cache(page);
+               spin_unlock_irq(&mapping->tree_lock);
+               mem_cgroup_uncharge_cache_page(page);
        }
 
-       __remove_from_page_cache(page);
-       write_unlock_irq(&mapping->tree_lock);
-       __put_page(page);
        return 1;
 
 cannot_free:
-       write_unlock_irq(&mapping->tree_lock);
+       spin_unlock_irq(&mapping->tree_lock);
+       return 0;
+}
+
+/*
+ * Attempt to detach a locked page from its ->mapping.  If it is dirty or if
+ * someone else has a ref on the page, abort and return 0.  If it was
+ * successfully detached, return 1.  Assumes the caller has a single ref on
+ * this page.
+ */
+int remove_mapping(struct address_space *mapping, struct page *page)
+{
+       if (__remove_mapping(mapping, page)) {
+               /*
+                * Unfreezing the refcount with 1 rather than 2 effectively
+                * drops the pagecache ref for us without requiring another
+                * atomic operation.
+                */
+               page_unfreeze_refs(page, 1);
+               return 1;
+       }
        return 0;
 }
 
+/**
+ * putback_lru_page - put previously isolated page onto appropriate LRU list
+ * @page: page to be put back to appropriate lru list
+ *
+ * Add previously isolated @page to appropriate LRU list.
+ * Page may still be unevictable for other reasons.
+ *
+ * lru_lock must not be held, interrupts must be enabled.
+ */
+void putback_lru_page(struct page *page)
+{
+       int lru;
+       int active = !!TestClearPageActive(page);
+       int was_unevictable = PageUnevictable(page);
+
+       VM_BUG_ON(PageLRU(page));
+
+redo:
+       ClearPageUnevictable(page);
+
+       if (page_evictable(page, NULL)) {
+               /*
+                * For evictable pages, we can use the cache.
+                * In event of a race, worst case is we end up with an
+                * unevictable page on [in]active list.
+                * We know how to handle that.
+                */
+               lru = active + page_lru_base_type(page);
+               lru_cache_add_lru(page, lru);
+       } else {
+               /*
+                * Put unevictable pages directly on zone's unevictable
+                * list.
+                */
+               lru = LRU_UNEVICTABLE;
+               add_page_to_unevictable_list(page);
+               /*
+                * When racing with an mlock clearing (page is
+                * unlocked), make sure that if the other thread does
+                * not observe our setting of PG_lru and fails
+                * isolation, we see PG_mlocked cleared below and move
+                * the page back to the evictable list.
+                *
+                * The other side is TestClearPageMlocked().
+                */
+               smp_mb();
+       }
+
+       /*
+        * page's status can change while we move it among lru. If an evictable
+        * page is on unevictable list, it never be freed. To avoid that,
+        * check after we added it to the list, again.
+        */
+       if (lru == LRU_UNEVICTABLE && page_evictable(page, NULL)) {
+               if (!isolate_lru_page(page)) {
+                       put_page(page);
+                       goto redo;
+               }
+               /* This means someone else dropped this page from LRU
+                * So, it will be freed or putback to LRU again. There is
+                * nothing to do here.
+                */
+       }
+
+       if (was_unevictable && lru != LRU_UNEVICTABLE)
+               count_vm_event(UNEVICTABLE_PGRESCUED);
+       else if (!was_unevictable && lru == LRU_UNEVICTABLE)
+               count_vm_event(UNEVICTABLE_PGCULLED);
+
+       put_page(page);         /* drop ref from isolate */
+}
+
+enum page_references {
+       PAGEREF_RECLAIM,
+       PAGEREF_RECLAIM_CLEAN,
+       PAGEREF_KEEP,
+       PAGEREF_ACTIVATE,
+};
+
+static enum page_references page_check_references(struct page *page,
+                                                 struct scan_control *sc)
+{
+       int referenced_ptes, referenced_page;
+       unsigned long vm_flags;
+
+       referenced_ptes = page_referenced(page, 1, sc->mem_cgroup, &vm_flags);
+       referenced_page = TestClearPageReferenced(page);
+
+       /* Lumpy reclaim - ignore references */
+       if (sc->order > PAGE_ALLOC_COSTLY_ORDER)
+               return PAGEREF_RECLAIM;
+
+       /*
+        * Mlock lost the isolation race with us.  Let try_to_unmap()
+        * move the page to the unevictable list.
+        */
+       if (vm_flags & VM_LOCKED)
+               return PAGEREF_RECLAIM;
+
+       if (referenced_ptes) {
+               if (PageAnon(page))
+                       return PAGEREF_ACTIVATE;
+               /*
+                * All mapped pages start out with page table
+                * references from the instantiating fault, so we need
+                * to look twice if a mapped file page is used more
+                * than once.
+                *
+                * Mark it and spare it for another trip around the
+                * inactive list.  Another page table reference will
+                * lead to its activation.
+                *
+                * Note: the mark is set for activated pages as well
+                * so that recently deactivated but used pages are
+                * quickly recovered.
+                */
+               SetPageReferenced(page);
+
+               if (referenced_page)
+                       return PAGEREF_ACTIVATE;
+
+               return PAGEREF_KEEP;
+       }
+
+       /* Reclaim if clean, defer dirty pages to writeback */
+       if (referenced_page)
+               return PAGEREF_RECLAIM_CLEAN;
+
+       return PAGEREF_RECLAIM;
+}
+
 /*
  * shrink_page_list() returns the number of reclaimed pages
  */
@@ -451,24 +633,27 @@ static unsigned long shrink_page_list(struct list_head *page_list,
 
        pagevec_init(&freed_pvec, 1);
        while (!list_empty(page_list)) {
+               enum page_references references;
                struct address_space *mapping;
                struct page *page;
                int may_enter_fs;
-               int referenced;
 
                cond_resched();
 
                page = lru_to_page(page_list);
                list_del(&page->lru);
 
-               if (TestSetPageLocked(page))
+               if (!trylock_page(page))
                        goto keep;
 
                VM_BUG_ON(PageActive(page));
 
                sc->nr_scanned++;
 
-               if (!sc->may_swap && page_mapped(page))
+               if (unlikely(!page_evictable(page, NULL)))
+                       goto cull_mlocked;
+
+               if (!sc->may_unmap && page_mapped(page))
                        goto keep_locked;
 
                /* Double the slab pressure for mapped and swapcache pages */
@@ -493,21 +678,28 @@ static unsigned long shrink_page_list(struct list_head *page_list,
                                goto keep_locked;
                }
 
-               referenced = page_referenced(page, 1);
-               /* In active use or really unfreeable?  Activate it. */
-               if (sc->order <= PAGE_ALLOC_COSTLY_ORDER &&
-                                       referenced && page_mapping_inuse(page))
+               references = page_check_references(page, sc);
+               switch (references) {
+               case PAGEREF_ACTIVATE:
                        goto activate_locked;
+               case PAGEREF_KEEP:
+                       goto keep_locked;
+               case PAGEREF_RECLAIM:
+               case PAGEREF_RECLAIM_CLEAN:
+                       ; /* try to reclaim the page below */
+               }
 
-#ifdef CONFIG_SWAP
                /*
                 * Anonymous process memory has backing store?
                 * Try to allocate it some swap space here.
                 */
-               if (PageAnon(page) && !PageSwapCache(page))
-                       if (!add_to_swap(page, GFP_ATOMIC))
+               if (PageAnon(page) && !PageSwapCache(page)) {
+                       if (!(sc->gfp_mask & __GFP_IO))
+                               goto keep_locked;
+                       if (!add_to_swap(page))
                                goto activate_locked;
-#endif /* CONFIG_SWAP */
+                       may_enter_fs = 1;
+               }
 
                mapping = page_mapping(page);
 
@@ -516,18 +708,20 @@ static unsigned long shrink_page_list(struct list_head *page_list,
                 * processes. Try to unmap it here.
                 */
                if (page_mapped(page) && mapping) {
-                       switch (try_to_unmap(page, 0)) {
+                       switch (try_to_unmap(page, TTU_UNMAP)) {
                        case SWAP_FAIL:
                                goto activate_locked;
                        case SWAP_AGAIN:
                                goto keep_locked;
+                       case SWAP_MLOCK:
+                               goto cull_mlocked;
                        case SWAP_SUCCESS:
                                ; /* try to free the page below */
                        }
                }
 
                if (PageDirty(page)) {
-                       if (sc->order <= PAGE_ALLOC_COSTLY_ORDER && referenced)
+                       if (references == PAGEREF_RECLAIM_CLEAN)
                                goto keep_locked;
                        if (!may_enter_fs)
                                goto keep_locked;
@@ -547,7 +741,7 @@ static unsigned long shrink_page_list(struct list_head *page_list,
                                 * A synchronous write - probably a ramdisk.  Go
                                 * ahead and try to reclaim the page.
                                 */
-                               if (TestSetPageLocked(page))
+                               if (!trylock_page(page))
                                        goto keep;
                                if (PageDirty(page) || PageWriteback(page))
                                        goto keep_locked;
@@ -567,7 +761,7 @@ static unsigned long shrink_page_list(struct list_head *page_list,
                 * possible for a page to have PageDirty set, but it is actually
                 * clean (all its buffers are clean).  This happens if the
                 * buffers were written out directly, with submit_bh(). ext3
-                * will do this, as well as the blockdev mapping. 
+                * will do this, as well as the blockdev mapping.
                 * try_to_release_page() will discover that cleanness and will
                 * drop the buffers and mark the page clean - it can be freed.
                 *
@@ -578,35 +772,69 @@ static unsigned long shrink_page_list(struct list_head *page_list,
                 * process address space (page_count == 1) it can be freed.
                 * Otherwise, leave the page on the LRU so it is swappable.
                 */
-               if (PagePrivate(page)) {
+               if (page_has_private(page)) {
                        if (!try_to_release_page(page, sc->gfp_mask))
                                goto activate_locked;
-                       if (!mapping && page_count(page) == 1)
-                               goto free_it;
+                       if (!mapping && page_count(page) == 1) {
+                               unlock_page(page);
+                               if (put_page_testzero(page))
+                                       goto free_it;
+                               else {
+                                       /*
+                                        * rare race with speculative reference.
+                                        * the speculative reference will free
+                                        * this page shortly, so we may
+                                        * increment nr_reclaimed here (and
+                                        * leave it off the LRU).
+                                        */
+                                       nr_reclaimed++;
+                                       continue;
+                               }
+                       }
                }
 
-               if (!mapping || !remove_mapping(mapping, page))
+               if (!mapping || !__remove_mapping(mapping, page))
                        goto keep_locked;
 
+               /*
+                * At this point, we have no other references and there is
+                * no way to pick any more up (removed from LRU, removed
+                * from pagecache). Can use non-atomic bitops now (and
+                * we obviously don't have to worry about waking up a process
+                * waiting on the page lock, because there are no references.
+                */
+               __clear_page_locked(page);
 free_it:
-               unlock_page(page);
                nr_reclaimed++;
-               if (!pagevec_add(&freed_pvec, page))
-                       __pagevec_release_nonlru(&freed_pvec);
+               if (!pagevec_add(&freed_pvec, page)) {
+                       __pagevec_free(&freed_pvec);
+                       pagevec_reinit(&freed_pvec);
+               }
+               continue;
+
+cull_mlocked:
+               if (PageSwapCache(page))
+                       try_to_free_swap(page);
+               unlock_page(page);
+               putback_lru_page(page);
                continue;
 
 activate_locked:
+               /* Not a candidate for swapping, so reclaim swap space. */
+               if (PageSwapCache(page) && vm_swap_full())
+                       try_to_free_swap(page);
+               VM_BUG_ON(PageActive(page));
                SetPageActive(page);
                pgactivate++;
 keep_locked:
                unlock_page(page);
 keep:
                list_add(&page->lru, &ret_pages);
-               VM_BUG_ON(PageLRU(page));
+               VM_BUG_ON(PageLRU(page) || PageUnevictable(page));
        }
        list_splice(&ret_pages, page_list);
        if (pagevec_count(&freed_pvec))
-               __pagevec_release_nonlru(&freed_pvec);
+               __pagevec_free(&freed_pvec);
        count_vm_events(PGACTIVATE, pgactivate);
        return nr_reclaimed;
 }
@@ -626,7 +854,7 @@ keep:
  *
  * returns 0 on success, -ve errno on failure.
  */
-static int __isolate_lru_page(struct page *page, int mode)
+int __isolate_lru_page(struct page *page, int mode, int file)
 {
        int ret = -EINVAL;
 
@@ -642,7 +870,19 @@ static int __isolate_lru_page(struct page *page, int mode)
        if (mode != ISOLATE_BOTH && (!PageActive(page) != !mode))
                return ret;
 
+       if (mode != ISOLATE_BOTH && page_is_file_cache(page) != file)
+               return ret;
+
+       /*
+        * When this function is being called for lumpy reclaim, we
+        * initially look into all LRU pages, active, inactive and
+        * unevictable; only give shrink_page_list evictable pages.
+        */
+       if (PageUnevictable(page))
+               return ret;
+
        ret = -EBUSY;
+
        if (likely(get_page_unless_zero(page))) {
                /*
                 * Be careful not to clear PageLRU until after we're
@@ -672,12 +912,13 @@ static int __isolate_lru_page(struct page *page, int mode)
  * @scanned:   The number of pages that were scanned.
  * @order:     The caller's attempted allocation order
  * @mode:      One of the LRU isolation modes
+ * @file:      True [1] if isolating file [!anon] pages
  *
  * returns how many pages were moved onto *@dst.
  */
 static unsigned long isolate_lru_pages(unsigned long nr_to_scan,
                struct list_head *src, struct list_head *dst,
-               unsigned long *scanned, int order, int mode)
+               unsigned long *scanned, int order, int mode, int file)
 {
        unsigned long nr_taken = 0;
        unsigned long scan;
@@ -694,15 +935,17 @@ static unsigned long isolate_lru_pages(unsigned long nr_to_scan,
 
                VM_BUG_ON(!PageLRU(page));
 
-               switch (__isolate_lru_page(page, mode)) {
+               switch (__isolate_lru_page(page, mode, file)) {
                case 0:
                        list_move(&page->lru, dst);
+                       mem_cgroup_del_lru(page);
                        nr_taken++;
                        break;
 
                case -EBUSY:
                        /* else it is being freed elsewhere */
                        list_move(&page->lru, src);
+                       mem_cgroup_rotate_lru_list(page, page_lru(page));
                        continue;
 
                default:
@@ -737,21 +980,25 @@ static unsigned long isolate_lru_pages(unsigned long nr_to_scan,
                                break;
 
                        cursor_page = pfn_to_page(pfn);
+
                        /* Check that we have not crossed a zone boundary. */
                        if (unlikely(page_zone_id(cursor_page) != zone_id))
                                continue;
-                       switch (__isolate_lru_page(cursor_page, mode)) {
-                       case 0:
+
+                       /*
+                        * If we don't have enough swap space, reclaiming of
+                        * anon page which don't already have a swap slot is
+                        * pointless.
+                        */
+                       if (nr_swap_pages <= 0 && PageAnon(cursor_page) &&
+                                       !PageSwapCache(cursor_page))
+                               continue;
+
+                       if (__isolate_lru_page(cursor_page, mode, file) == 0) {
                                list_move(&cursor_page->lru, dst);
+                               mem_cgroup_del_lru(cursor_page);
                                nr_taken++;
                                scan++;
-                               break;
-
-                       case -EBUSY:
-                               /* else it is being freed elsewhere */
-                               list_move(&cursor_page->lru, src);
-                       default:
-                               break;
                        }
                }
        }
@@ -760,35 +1007,150 @@ static unsigned long isolate_lru_pages(unsigned long nr_to_scan,
        return nr_taken;
 }
 
+static unsigned long isolate_pages_global(unsigned long nr,
+                                       struct list_head *dst,
+                                       unsigned long *scanned, int order,
+                                       int mode, struct zone *z,
+                                       struct mem_cgroup *mem_cont,
+                                       int active, int file)
+{
+       int lru = LRU_BASE;
+       if (active)
+               lru += LRU_ACTIVE;
+       if (file)
+               lru += LRU_FILE;
+       return isolate_lru_pages(nr, &z->lru[lru].list, dst, scanned, order,
+                                                               mode, file);
+}
+
 /*
  * clear_active_flags() is a helper for shrink_active_list(), clearing
  * any active bits from the pages in the list.
  */
-static unsigned long clear_active_flags(struct list_head *page_list)
+static unsigned long clear_active_flags(struct list_head *page_list,
+                                       unsigned int *count)
 {
        int nr_active = 0;
+       int lru;
        struct page *page;
 
-       list_for_each_entry(page, page_list, lru)
+       list_for_each_entry(page, page_list, lru) {
+               lru = page_lru_base_type(page);
                if (PageActive(page)) {
+                       lru += LRU_ACTIVE;
                        ClearPageActive(page);
                        nr_active++;
                }
+               count[lru]++;
+       }
 
        return nr_active;
 }
 
+/**
+ * isolate_lru_page - tries to isolate a page from its LRU list
+ * @page: page to isolate from its LRU list
+ *
+ * Isolates a @page from an LRU list, clears PageLRU and adjusts the
+ * vmstat statistic corresponding to whatever LRU list the page was on.
+ *
+ * Returns 0 if the page was removed from an LRU list.
+ * Returns -EBUSY if the page was not on an LRU list.
+ *
+ * The returned page will have PageLRU() cleared.  If it was found on
+ * the active list, it will have PageActive set.  If it was found on
+ * the unevictable list, it will have the PageUnevictable bit set. That flag
+ * may need to be cleared by the caller before letting the page go.
+ *
+ * The vmstat statistic corresponding to the list on which the page was
+ * found will be decremented.
+ *
+ * Restrictions:
+ * (1) Must be called with an elevated refcount on the page. This is a
+ *     fundamentnal difference from isolate_lru_pages (which is called
+ *     without a stable reference).
+ * (2) the lru_lock must not be held.
+ * (3) interrupts must be enabled.
+ */
+int isolate_lru_page(struct page *page)
+{
+       int ret = -EBUSY;
+
+       if (PageLRU(page)) {
+               struct zone *zone = page_zone(page);
+
+               spin_lock_irq(&zone->lru_lock);
+               if (PageLRU(page) && get_page_unless_zero(page)) {
+                       int lru = page_lru(page);
+                       ret = 0;
+                       ClearPageLRU(page);
+
+                       del_page_from_lru_list(zone, page, lru);
+               }
+               spin_unlock_irq(&zone->lru_lock);
+       }
+       return ret;
+}
+
+/*
+ * Are there way too many processes in the direct reclaim path already?
+ */
+static int too_many_isolated(struct zone *zone, int file,
+               struct scan_control *sc)
+{
+       unsigned long inactive, isolated;
+
+       if (current_is_kswapd())
+               return 0;
+
+       if (!scanning_global_lru(sc))
+               return 0;
+
+       if (file) {
+               inactive = zone_page_state(zone, NR_INACTIVE_FILE);
+               isolated = zone_page_state(zone, NR_ISOLATED_FILE);
+       } else {
+               inactive = zone_page_state(zone, NR_INACTIVE_ANON);
+               isolated = zone_page_state(zone, NR_ISOLATED_ANON);
+       }
+
+       return isolated > inactive;
+}
+
 /*
  * shrink_inactive_list() is a helper for shrink_zone().  It returns the number
  * of reclaimed pages
  */
 static unsigned long shrink_inactive_list(unsigned long max_scan,
-                               struct zone *zone, struct scan_control *sc)
+                       struct zone *zone, struct scan_control *sc,
+                       int priority, int file)
 {
        LIST_HEAD(page_list);
        struct pagevec pvec;
        unsigned long nr_scanned = 0;
        unsigned long nr_reclaimed = 0;
+       struct zone_reclaim_stat *reclaim_stat = get_reclaim_stat(zone, sc);
+       int lumpy_reclaim = 0;
+
+       while (unlikely(too_many_isolated(zone, file, sc))) {
+               congestion_wait(BLK_RW_ASYNC, HZ/10);
+
+               /* We are about to die and free our memory. Return now. */
+               if (fatal_signal_pending(current))
+                       return SWAP_CLUSTER_MAX;
+       }
+
+       /*
+        * If we need a large contiguous chunk of memory, or have
+        * trouble getting a small set of contiguous pages, we
+        * will reclaim both active and inactive pages.
+        *
+        * We use the same threshold as pageout congestion_wait below.
+        */
+       if (sc->order > PAGE_ALLOC_COSTLY_ORDER)
+               lumpy_reclaim = 1;
+       else if (sc->order && priority < DEF_PRIORITY - 2)
+               lumpy_reclaim = 1;
 
        pagevec_init(&pvec, 1);
 
@@ -800,19 +1162,48 @@ static unsigned long shrink_inactive_list(unsigned long max_scan,
                unsigned long nr_scan;
                unsigned long nr_freed;
                unsigned long nr_active;
+               unsigned int count[NR_LRU_LISTS] = { 0, };
+               int mode = lumpy_reclaim ? ISOLATE_BOTH : ISOLATE_INACTIVE;
+               unsigned long nr_anon;
+               unsigned long nr_file;
+
+               nr_taken = sc->isolate_pages(SWAP_CLUSTER_MAX,
+                            &page_list, &nr_scan, sc->order, mode,
+                               zone, sc->mem_cgroup, 0, file);
+
+               if (scanning_global_lru(sc)) {
+                       zone->pages_scanned += nr_scan;
+                       if (current_is_kswapd())
+                               __count_zone_vm_events(PGSCAN_KSWAPD, zone,
+                                                      nr_scan);
+                       else
+                               __count_zone_vm_events(PGSCAN_DIRECT, zone,
+                                                      nr_scan);
+               }
+
+               if (nr_taken == 0)
+                       goto done;
 
-               nr_taken = isolate_lru_pages(sc->swap_cluster_max,
-                            &zone->inactive_list,
-                            &page_list, &nr_scan, sc->order,
-                            (sc->order > PAGE_ALLOC_COSTLY_ORDER)?
-                                            ISOLATE_BOTH : ISOLATE_INACTIVE);
-               nr_active = clear_active_flags(&page_list);
+               nr_active = clear_active_flags(&page_list, count);
                __count_vm_events(PGDEACTIVATE, nr_active);
 
-               __mod_zone_page_state(zone, NR_ACTIVE, -nr_active);
-               __mod_zone_page_state(zone, NR_INACTIVE,
-                                               -(nr_taken - nr_active));
-               zone->pages_scanned += nr_scan;
+               __mod_zone_page_state(zone, NR_ACTIVE_FILE,
+                                               -count[LRU_ACTIVE_FILE]);
+               __mod_zone_page_state(zone, NR_INACTIVE_FILE,
+                                               -count[LRU_INACTIVE_FILE]);
+               __mod_zone_page_state(zone, NR_ACTIVE_ANON,
+                                               -count[LRU_ACTIVE_ANON]);
+               __mod_zone_page_state(zone, NR_INACTIVE_ANON,
+                                               -count[LRU_INACTIVE_ANON]);
+
+               nr_anon = count[LRU_ACTIVE_ANON] + count[LRU_INACTIVE_ANON];
+               nr_file = count[LRU_ACTIVE_FILE] + count[LRU_INACTIVE_FILE];
+               __mod_zone_page_state(zone, NR_ISOLATED_ANON, nr_anon);
+               __mod_zone_page_state(zone, NR_ISOLATED_FILE, nr_file);
+
+               reclaim_stat->recent_scanned[0] += nr_anon;
+               reclaim_stat->recent_scanned[1] += nr_file;
+
                spin_unlock_irq(&zone->lru_lock);
 
                nr_scanned += nr_scan;
@@ -825,14 +1216,14 @@ static unsigned long shrink_inactive_list(unsigned long max_scan,
                 * but that should be acceptable to the caller
                 */
                if (nr_freed < nr_taken && !current_is_kswapd() &&
-                                       sc->order > PAGE_ALLOC_COSTLY_ORDER) {
-                       congestion_wait(WRITE, HZ/10);
+                   lumpy_reclaim) {
+                       congestion_wait(BLK_RW_ASYNC, HZ/10);
 
                        /*
                         * The attempt at page out may have made some
                         * of the pages active, mark them inactive again.
                         */
-                       nr_active = clear_active_flags(&page_list);
+                       nr_active = clear_active_flags(&page_list, count);
                        count_vm_events(PGDEACTIVATE, nr_active);
 
                        nr_freed += shrink_page_list(&page_list, sc,
@@ -840,40 +1231,47 @@ static unsigned long shrink_inactive_list(unsigned long max_scan,
                }
 
                nr_reclaimed += nr_freed;
+
                local_irq_disable();
-               if (current_is_kswapd()) {
-                       __count_zone_vm_events(PGSCAN_KSWAPD, zone, nr_scan);
+               if (current_is_kswapd())
                        __count_vm_events(KSWAPD_STEAL, nr_freed);
-               } else
-                       __count_zone_vm_events(PGSCAN_DIRECT, zone, nr_scan);
                __count_zone_vm_events(PGSTEAL, zone, nr_freed);
 
-               if (nr_taken == 0)
-                       goto done;
-
                spin_lock(&zone->lru_lock);
                /*
                 * Put back any unfreeable pages.
                 */
                while (!list_empty(&page_list)) {
+                       int lru;
                        page = lru_to_page(&page_list);
                        VM_BUG_ON(PageLRU(page));
-                       SetPageLRU(page);
                        list_del(&page->lru);
-                       if (PageActive(page))
-                               add_page_to_active_list(zone, page);
-                       else
-                               add_page_to_inactive_list(zone, page);
+                       if (unlikely(!page_evictable(page, NULL))) {
+                               spin_unlock_irq(&zone->lru_lock);
+                               putback_lru_page(page);
+                               spin_lock_irq(&zone->lru_lock);
+                               continue;
+                       }
+                       SetPageLRU(page);
+                       lru = page_lru(page);
+                       add_page_to_lru_list(zone, page, lru);
+                       if (is_active_lru(lru)) {
+                               int file = is_file_lru(lru);
+                               reclaim_stat->recent_rotated[file]++;
+                       }
                        if (!pagevec_add(&pvec, page)) {
                                spin_unlock_irq(&zone->lru_lock);
                                __pagevec_release(&pvec);
                                spin_lock_irq(&zone->lru_lock);
                        }
                }
+               __mod_zone_page_state(zone, NR_ISOLATED_ANON, -nr_anon);
+               __mod_zone_page_state(zone, NR_ISOLATED_FILE, -nr_file);
+
        } while (nr_scanned < max_scan);
-       spin_unlock(&zone->lru_lock);
+
 done:
-       local_irq_enable();
+       spin_unlock_irq(&zone->lru_lock);
        pagevec_release(&pvec);
        return nr_reclaimed;
 }
@@ -892,12 +1290,6 @@ static inline void note_zone_scanning_priority(struct zone *zone, int priority)
                zone->prev_priority = priority;
 }
 
-static inline int zone_is_near_oom(struct zone *zone)
-{
-       return zone->pages_scanned >= (zone_page_state(zone, NR_ACTIVE)
-                               + zone_page_state(zone, NR_INACTIVE))*3;
-}
-
 /*
  * This moves pages from the active list to the inactive list.
  *
@@ -915,201 +1307,388 @@ static inline int zone_is_near_oom(struct zone *zone)
  * The downside is that we have to touch page->_count against each page.
  * But we had to alter page->flags anyway.
  */
-static void shrink_active_list(unsigned long nr_pages, struct zone *zone,
-                               struct scan_control *sc, int priority)
+
+static void move_active_pages_to_lru(struct zone *zone,
+                                    struct list_head *list,
+                                    enum lru_list lru)
 {
-       unsigned long pgmoved;
-       int pgdeactivate = 0;
-       unsigned long pgscanned;
-       LIST_HEAD(l_hold);      /* The pages which were snipped off */
-       LIST_HEAD(l_inactive);  /* Pages to go onto the inactive_list */
-       LIST_HEAD(l_active);    /* Pages to go onto the active_list */
-       struct page *page;
+       unsigned long pgmoved = 0;
        struct pagevec pvec;
-       int reclaim_mapped = 0;
-
-       if (sc->may_swap) {
-               long mapped_ratio;
-               long distress;
-               long swap_tendency;
+       struct page *page;
 
-               if (zone_is_near_oom(zone))
-                       goto force_reclaim_mapped;
+       pagevec_init(&pvec, 1);
 
-               /*
-                * `distress' is a measure of how much trouble we're having
-                * reclaiming pages.  0 -> no problems.  100 -> great trouble.
-                */
-               distress = 100 >> min(zone->prev_priority, priority);
+       while (!list_empty(list)) {
+               page = lru_to_page(list);
 
-               /*
-                * The point of this algorithm is to decide when to start
-                * reclaiming mapped memory instead of just pagecache.  Work out
-                * how much memory
-                * is mapped.
-                */
-               mapped_ratio = ((global_page_state(NR_FILE_MAPPED) +
-                               global_page_state(NR_ANON_PAGES)) * 100) /
-                                       vm_total_pages;
+               VM_BUG_ON(PageLRU(page));
+               SetPageLRU(page);
 
-               /*
-                * Now decide how much we really want to unmap some pages.  The
-                * mapped ratio is downgraded - just because there's a lot of
-                * mapped memory doesn't necessarily mean that page reclaim
-                * isn't succeeding.
-                *
-                * The distress ratio is important - we don't want to start
-                * going oom.
-                *
-                * A 100% value of vm_swappiness overrides this algorithm
-                * altogether.
-                */
-               swap_tendency = mapped_ratio / 2 + distress + sc->swappiness;
+               list_move(&page->lru, &zone->lru[lru].list);
+               mem_cgroup_add_lru_list(page, lru);
+               pgmoved++;
 
-               /*
-                * Now use this metric to decide whether to start moving mapped
-                * memory onto the inactive list.
-                */
-               if (swap_tendency >= 100)
-force_reclaim_mapped:
-                       reclaim_mapped = 1;
+               if (!pagevec_add(&pvec, page) || list_empty(list)) {
+                       spin_unlock_irq(&zone->lru_lock);
+                       if (buffer_heads_over_limit)
+                               pagevec_strip(&pvec);
+                       __pagevec_release(&pvec);
+                       spin_lock_irq(&zone->lru_lock);
+               }
        }
+       __mod_zone_page_state(zone, NR_LRU_BASE + lru, pgmoved);
+       if (!is_active_lru(lru))
+               __count_vm_events(PGDEACTIVATE, pgmoved);
+}
+
+static void shrink_active_list(unsigned long nr_pages, struct zone *zone,
+                       struct scan_control *sc, int priority, int file)
+{
+       unsigned long nr_taken;
+       unsigned long pgscanned;
+       unsigned long vm_flags;
+       LIST_HEAD(l_hold);      /* The pages which were snipped off */
+       LIST_HEAD(l_active);
+       LIST_HEAD(l_inactive);
+       struct page *page;
+       struct zone_reclaim_stat *reclaim_stat = get_reclaim_stat(zone, sc);
+       unsigned long nr_rotated = 0;
 
        lru_add_drain();
        spin_lock_irq(&zone->lru_lock);
-       pgmoved = isolate_lru_pages(nr_pages, &zone->active_list,
-                           &l_hold, &pgscanned, sc->order, ISOLATE_ACTIVE);
-       zone->pages_scanned += pgscanned;
-       __mod_zone_page_state(zone, NR_ACTIVE, -pgmoved);
+       nr_taken = sc->isolate_pages(nr_pages, &l_hold, &pgscanned, sc->order,
+                                       ISOLATE_ACTIVE, zone,
+                                       sc->mem_cgroup, 1, file);
+       /*
+        * zone->pages_scanned is used for detect zone's oom
+        * mem_cgroup remembers nr_scan by itself.
+        */
+       if (scanning_global_lru(sc)) {
+               zone->pages_scanned += pgscanned;
+       }
+       reclaim_stat->recent_scanned[file] += nr_taken;
+
+       __count_zone_vm_events(PGREFILL, zone, pgscanned);
+       if (file)
+               __mod_zone_page_state(zone, NR_ACTIVE_FILE, -nr_taken);
+       else
+               __mod_zone_page_state(zone, NR_ACTIVE_ANON, -nr_taken);
+       __mod_zone_page_state(zone, NR_ISOLATED_ANON + file, nr_taken);
        spin_unlock_irq(&zone->lru_lock);
 
        while (!list_empty(&l_hold)) {
                cond_resched();
                page = lru_to_page(&l_hold);
                list_del(&page->lru);
-               if (page_mapped(page)) {
-                       if (!reclaim_mapped ||
-                           (total_swap_pages == 0 && PageAnon(page)) ||
-                           page_referenced(page, 0)) {
+
+               if (unlikely(!page_evictable(page, NULL))) {
+                       putback_lru_page(page);
+                       continue;
+               }
+
+               if (page_referenced(page, 0, sc->mem_cgroup, &vm_flags)) {
+                       nr_rotated++;
+                       /*
+                        * Identify referenced, file-backed active pages and
+                        * give them one more trip around the active list. So
+                        * that executable code get better chances to stay in
+                        * memory under moderate memory pressure.  Anon pages
+                        * are not likely to be evicted by use-once streaming
+                        * IO, plus JVM can create lots of anon VM_EXEC pages,
+                        * so we ignore them here.
+                        */
+                       if ((vm_flags & VM_EXEC) && page_is_file_cache(page)) {
                                list_add(&page->lru, &l_active);
                                continue;
                        }
                }
+
+               ClearPageActive(page);  /* we are de-activating */
                list_add(&page->lru, &l_inactive);
        }
 
-       pagevec_init(&pvec, 1);
-       pgmoved = 0;
+       /*
+        * Move pages back to the lru list.
+        */
        spin_lock_irq(&zone->lru_lock);
-       while (!list_empty(&l_inactive)) {
-               page = lru_to_page(&l_inactive);
-               prefetchw_prev_lru_page(page, &l_inactive, flags);
-               VM_BUG_ON(PageLRU(page));
-               SetPageLRU(page);
-               VM_BUG_ON(!PageActive(page));
-               ClearPageActive(page);
+       /*
+        * Count referenced pages from currently used mappings as rotated,
+        * even though only some of them are actually re-activated.  This
+        * helps balance scan pressure between file and anonymous pages in
+        * get_scan_ratio.
+        */
+       reclaim_stat->recent_rotated[file] += nr_rotated;
 
-               list_move(&page->lru, &zone->inactive_list);
-               pgmoved++;
-               if (!pagevec_add(&pvec, page)) {
-                       __mod_zone_page_state(zone, NR_INACTIVE, pgmoved);
-                       spin_unlock_irq(&zone->lru_lock);
-                       pgdeactivate += pgmoved;
-                       pgmoved = 0;
-                       if (buffer_heads_over_limit)
-                               pagevec_strip(&pvec);
-                       __pagevec_release(&pvec);
-                       spin_lock_irq(&zone->lru_lock);
+       move_active_pages_to_lru(zone, &l_active,
+                                               LRU_ACTIVE + file * LRU_FILE);
+       move_active_pages_to_lru(zone, &l_inactive,
+                                               LRU_BASE   + file * LRU_FILE);
+       __mod_zone_page_state(zone, NR_ISOLATED_ANON + file, -nr_taken);
+       spin_unlock_irq(&zone->lru_lock);
+}
+
+static int inactive_anon_is_low_global(struct zone *zone)
+{
+       unsigned long active, inactive;
+
+       active = zone_page_state(zone, NR_ACTIVE_ANON);
+       inactive = zone_page_state(zone, NR_INACTIVE_ANON);
+
+       if (inactive * zone->inactive_ratio < active)
+               return 1;
+
+       return 0;
+}
+
+/**
+ * inactive_anon_is_low - check if anonymous pages need to be deactivated
+ * @zone: zone to check
+ * @sc:   scan control of this context
+ *
+ * Returns true if the zone does not have enough inactive anon pages,
+ * meaning some active anon pages need to be deactivated.
+ */
+static int inactive_anon_is_low(struct zone *zone, struct scan_control *sc)
+{
+       int low;
+
+       if (scanning_global_lru(sc))
+               low = inactive_anon_is_low_global(zone);
+       else
+               low = mem_cgroup_inactive_anon_is_low(sc->mem_cgroup);
+       return low;
+}
+
+static int inactive_file_is_low_global(struct zone *zone)
+{
+       unsigned long active, inactive;
+
+       active = zone_page_state(zone, NR_ACTIVE_FILE);
+       inactive = zone_page_state(zone, NR_INACTIVE_FILE);
+
+       return (active > inactive);
+}
+
+/**
+ * inactive_file_is_low - check if file pages need to be deactivated
+ * @zone: zone to check
+ * @sc:   scan control of this context
+ *
+ * When the system is doing streaming IO, memory pressure here
+ * ensures that active file pages get deactivated, until more
+ * than half of the file pages are on the inactive list.
+ *
+ * Once we get to that situation, protect the system's working
+ * set from being evicted by disabling active file page aging.
+ *
+ * This uses a different ratio than the anonymous pages, because
+ * the page cache uses a use-once replacement algorithm.
+ */
+static int inactive_file_is_low(struct zone *zone, struct scan_control *sc)
+{
+       int low;
+
+       if (scanning_global_lru(sc))
+               low = inactive_file_is_low_global(zone);
+       else
+               low = mem_cgroup_inactive_file_is_low(sc->mem_cgroup);
+       return low;
+}
+
+static int inactive_list_is_low(struct zone *zone, struct scan_control *sc,
+                               int file)
+{
+       if (file)
+               return inactive_file_is_low(zone, sc);
+       else
+               return inactive_anon_is_low(zone, sc);
+}
+
+static unsigned long shrink_list(enum lru_list lru, unsigned long nr_to_scan,
+       struct zone *zone, struct scan_control *sc, int priority)
+{
+       int file = is_file_lru(lru);
+
+       if (is_active_lru(lru)) {
+               if (inactive_list_is_low(zone, sc, file))
+                   shrink_active_list(nr_to_scan, zone, sc, priority, file);
+               return 0;
+       }
+
+       return shrink_inactive_list(nr_to_scan, zone, sc, priority, file);
+}
+
+/*
+ * Determine how aggressively the anon and file LRU lists should be
+ * scanned.  The relative value of each set of LRU lists is determined
+ * by looking at the fraction of the pages scanned we did rotate back
+ * onto the active list instead of evict.
+ *
+ * percent[0] specifies how much pressure to put on ram/swap backed
+ * memory, while percent[1] determines pressure on the file LRUs.
+ */
+static void get_scan_ratio(struct zone *zone, struct scan_control *sc,
+                                       unsigned long *percent)
+{
+       unsigned long anon, file, free;
+       unsigned long anon_prio, file_prio;
+       unsigned long ap, fp;
+       struct zone_reclaim_stat *reclaim_stat = get_reclaim_stat(zone, sc);
+
+       anon  = zone_nr_lru_pages(zone, sc, LRU_ACTIVE_ANON) +
+               zone_nr_lru_pages(zone, sc, LRU_INACTIVE_ANON);
+       file  = zone_nr_lru_pages(zone, sc, LRU_ACTIVE_FILE) +
+               zone_nr_lru_pages(zone, sc, LRU_INACTIVE_FILE);
+
+       if (scanning_global_lru(sc)) {
+               free  = zone_page_state(zone, NR_FREE_PAGES);
+               /* If we have very few page cache pages,
+                  force-scan anon pages. */
+               if (unlikely(file + free <= high_wmark_pages(zone))) {
+                       percent[0] = 100;
+                       percent[1] = 0;
+                       return;
                }
        }
-       __mod_zone_page_state(zone, NR_INACTIVE, pgmoved);
-       pgdeactivate += pgmoved;
-       if (buffer_heads_over_limit) {
-               spin_unlock_irq(&zone->lru_lock);
-               pagevec_strip(&pvec);
+
+       /*
+        * OK, so we have swap space and a fair amount of page cache
+        * pages.  We use the recently rotated / recently scanned
+        * ratios to determine how valuable each cache is.
+        *
+        * Because workloads change over time (and to avoid overflow)
+        * we keep these statistics as a floating average, which ends
+        * up weighing recent references more than old ones.
+        *
+        * anon in [0], file in [1]
+        */
+       if (unlikely(reclaim_stat->recent_scanned[0] > anon / 4)) {
                spin_lock_irq(&zone->lru_lock);
+               reclaim_stat->recent_scanned[0] /= 2;
+               reclaim_stat->recent_rotated[0] /= 2;
+               spin_unlock_irq(&zone->lru_lock);
        }
 
-       pgmoved = 0;
-       while (!list_empty(&l_active)) {
-               page = lru_to_page(&l_active);
-               prefetchw_prev_lru_page(page, &l_active, flags);
-               VM_BUG_ON(PageLRU(page));
-               SetPageLRU(page);
-               VM_BUG_ON(!PageActive(page));
-               list_move(&page->lru, &zone->active_list);
-               pgmoved++;
-               if (!pagevec_add(&pvec, page)) {
-                       __mod_zone_page_state(zone, NR_ACTIVE, pgmoved);
-                       pgmoved = 0;
-                       spin_unlock_irq(&zone->lru_lock);
-                       __pagevec_release(&pvec);
-                       spin_lock_irq(&zone->lru_lock);
-               }
+       if (unlikely(reclaim_stat->recent_scanned[1] > file / 4)) {
+               spin_lock_irq(&zone->lru_lock);
+               reclaim_stat->recent_scanned[1] /= 2;
+               reclaim_stat->recent_rotated[1] /= 2;
+               spin_unlock_irq(&zone->lru_lock);
        }
-       __mod_zone_page_state(zone, NR_ACTIVE, pgmoved);
 
-       __count_zone_vm_events(PGREFILL, zone, pgscanned);
-       __count_vm_events(PGDEACTIVATE, pgdeactivate);
-       spin_unlock_irq(&zone->lru_lock);
+       /*
+        * With swappiness at 100, anonymous and file have the same priority.
+        * This scanning priority is essentially the inverse of IO cost.
+        */
+       anon_prio = sc->swappiness;
+       file_prio = 200 - sc->swappiness;
 
-       pagevec_release(&pvec);
+       /*
+        * The amount of pressure on anon vs file pages is inversely
+        * proportional to the fraction of recently scanned pages on
+        * each list that were recently referenced and in active use.
+        */
+       ap = (anon_prio + 1) * (reclaim_stat->recent_scanned[0] + 1);
+       ap /= reclaim_stat->recent_rotated[0] + 1;
+
+       fp = (file_prio + 1) * (reclaim_stat->recent_scanned[1] + 1);
+       fp /= reclaim_stat->recent_rotated[1] + 1;
+
+       /* Normalize to percentages */
+       percent[0] = 100 * ap / (ap + fp + 1);
+       percent[1] = 100 - percent[0];
 }
 
 /*
- * This is a basic per-zone page freer.  Used by both kswapd and direct reclaim.
+ * Smallish @nr_to_scan's are deposited in @nr_saved_scan,
+ * until we collected @swap_cluster_max pages to scan.
  */
-static unsigned long shrink_zone(int priority, struct zone *zone,
-                               struct scan_control *sc)
+static unsigned long nr_scan_try_batch(unsigned long nr_to_scan,
+                                      unsigned long *nr_saved_scan)
 {
-       unsigned long nr_active;
-       unsigned long nr_inactive;
-       unsigned long nr_to_scan;
-       unsigned long nr_reclaimed = 0;
+       unsigned long nr;
 
-       atomic_inc(&zone->reclaim_in_progress);
+       *nr_saved_scan += nr_to_scan;
+       nr = *nr_saved_scan;
 
-       /*
-        * Add one to `nr_to_scan' just to make sure that the kernel will
-        * slowly sift through the active list.
-        */
-       zone->nr_scan_active +=
-               (zone_page_state(zone, NR_ACTIVE) >> priority) + 1;
-       nr_active = zone->nr_scan_active;
-       if (nr_active >= sc->swap_cluster_max)
-               zone->nr_scan_active = 0;
+       if (nr >= SWAP_CLUSTER_MAX)
+               *nr_saved_scan = 0;
        else
-               nr_active = 0;
+               nr = 0;
 
-       zone->nr_scan_inactive +=
-               (zone_page_state(zone, NR_INACTIVE) >> priority) + 1;
-       nr_inactive = zone->nr_scan_inactive;
-       if (nr_inactive >= sc->swap_cluster_max)
-               zone->nr_scan_inactive = 0;
-       else
-               nr_inactive = 0;
-
-       while (nr_active || nr_inactive) {
-               if (nr_active) {
-                       nr_to_scan = min(nr_active,
-                                       (unsigned long)sc->swap_cluster_max);
-                       nr_active -= nr_to_scan;
-                       shrink_active_list(nr_to_scan, zone, sc, priority);
+       return nr;
+}
+
+/*
+ * This is a basic per-zone page freer.  Used by both kswapd and direct reclaim.
+ */
+static void shrink_zone(int priority, struct zone *zone,
+                               struct scan_control *sc)
+{
+       unsigned long nr[NR_LRU_LISTS];
+       unsigned long nr_to_scan;
+       unsigned long percent[2];       /* anon @ 0; file @ 1 */
+       enum lru_list l;
+       unsigned long nr_reclaimed = sc->nr_reclaimed;
+       unsigned long nr_to_reclaim = sc->nr_to_reclaim;
+       struct zone_reclaim_stat *reclaim_stat = get_reclaim_stat(zone, sc);
+       int noswap = 0;
+
+       /* If we have no swap space, do not bother scanning anon pages. */
+       if (!sc->may_swap || (nr_swap_pages <= 0)) {
+               noswap = 1;
+               percent[0] = 0;
+               percent[1] = 100;
+       } else
+               get_scan_ratio(zone, sc, percent);
+
+       for_each_evictable_lru(l) {
+               int file = is_file_lru(l);
+               unsigned long scan;
+
+               scan = zone_nr_lru_pages(zone, sc, l);
+               if (priority || noswap) {
+                       scan >>= priority;
+                       scan = (scan * percent[file]) / 100;
                }
+               nr[l] = nr_scan_try_batch(scan,
+                                         &reclaim_stat->nr_saved_scan[l]);
+       }
 
-               if (nr_inactive) {
-                       nr_to_scan = min(nr_inactive,
-                                       (unsigned long)sc->swap_cluster_max);
-                       nr_inactive -= nr_to_scan;
-                       nr_reclaimed += shrink_inactive_list(nr_to_scan, zone,
-                                                               sc);
+       while (nr[LRU_INACTIVE_ANON] || nr[LRU_ACTIVE_FILE] ||
+                                       nr[LRU_INACTIVE_FILE]) {
+               for_each_evictable_lru(l) {
+                       if (nr[l]) {
+                               nr_to_scan = min_t(unsigned long,
+                                                  nr[l], SWAP_CLUSTER_MAX);
+                               nr[l] -= nr_to_scan;
+
+                               nr_reclaimed += shrink_list(l, nr_to_scan,
+                                                           zone, sc, priority);
+                       }
                }
+               /*
+                * On large memory systems, scan >> priority can become
+                * really large. This is fine for the starting priority;
+                * we want to put equal scanning pressure on each zone.
+                * However, if the VM has a harder time of freeing pages,
+                * with multiple processes reclaiming pages, the total
+                * freeing target can get unreasonably large.
+                */
+               if (nr_reclaimed >= nr_to_reclaim && priority < DEF_PRIORITY)
+                       break;
        }
 
-       throttle_vm_writeout(sc->gfp_mask);
+       sc->nr_reclaimed = nr_reclaimed;
 
-       atomic_dec(&zone->reclaim_in_progress);
-       return nr_reclaimed;
+       /*
+        * Even if we did not try to evict anon pages at all, we want to
+        * rebalance the anon lru active/inactive ratio.
+        */
+       if (inactive_anon_is_low(zone, sc) && nr_swap_pages > 0)
+               shrink_active_list(SWAP_CLUSTER_MAX, zone, sc, priority, 0);
+
+       throttle_vm_writeout(sc->gfp_mask);
 }
 
 /*
@@ -1117,45 +1696,55 @@ static unsigned long shrink_zone(int priority, struct zone *zone,
  * try to reclaim pages from zones which will satisfy the caller's allocation
  * request.
  *
- * We reclaim from a zone even if that zone is over pages_high.  Because:
+ * We reclaim from a zone even if that zone is over high_wmark_pages(zone).
+ * Because:
  * a) The caller may be trying to free *extra* pages to satisfy a higher-order
  *    allocation or
- * b) The zones may be over pages_high but they must go *over* pages_high to
- *    satisfy the `incremental min' zone defense algorithm.
- *
- * Returns the number of reclaimed pages.
+ * b) The target zone may be at high_wmark_pages(zone) but the lower zones
+ *    must go *over* high_wmark_pages(zone) to satisfy the `incremental min'
+ *    zone defense algorithm.
  *
  * If a zone is deemed to be full of pinned pages then just give it a light
  * scan then give up on it.
  */
-static unsigned long shrink_zones(int priority, struct zone **zones,
+static void shrink_zones(int priority, struct zonelist *zonelist,
                                        struct scan_control *sc)
 {
-       unsigned long nr_reclaimed = 0;
-       int i;
+       enum zone_type high_zoneidx = gfp_zone(sc->gfp_mask);
+       struct zoneref *z;
+       struct zone *zone;
 
        sc->all_unreclaimable = 1;
-       for (i = 0; zones[i] != NULL; i++) {
-               struct zone *zone = zones[i];
-
+       for_each_zone_zonelist_nodemask(zone, z, zonelist, high_zoneidx,
+                                       sc->nodemask) {
                if (!populated_zone(zone))
                        continue;
+               /*
+                * Take care memory controller reclaiming has small influence
+                * to global LRU.
+                */
+               if (scanning_global_lru(sc)) {
+                       if (!cpuset_zone_allowed_hardwall(zone, GFP_KERNEL))
+                               continue;
+                       note_zone_scanning_priority(zone, priority);
 
-               if (!cpuset_zone_allowed_hardwall(zone, GFP_KERNEL))
-                       continue;
-
-               note_zone_scanning_priority(zone, priority);
-
-               if (zone->all_unreclaimable && priority != DEF_PRIORITY)
-                       continue;       /* Let kswapd poll it */
-
-               sc->all_unreclaimable = 0;
+                       if (zone->all_unreclaimable && priority != DEF_PRIORITY)
+                               continue;       /* Let kswapd poll it */
+                       sc->all_unreclaimable = 0;
+               } else {
+                       /*
+                        * Ignore cpuset limitation here. We just want to reduce
+                        * # of used pages by us regardless of memory shortage.
+                        */
+                       sc->all_unreclaimable = 0;
+                       mem_cgroup_note_reclaim_priority(sc->mem_cgroup,
+                                                       priority);
+               }
 
-               nr_reclaimed += shrink_zone(priority, zone, sc);
+               shrink_zone(priority, zone, sc);
        }
-       return nr_reclaimed;
 }
+
 /*
  * This is the main entry point to direct page reclaim.
  *
@@ -1164,54 +1753,63 @@ static unsigned long shrink_zones(int priority, struct zone **zones,
  *
  * If the caller is !__GFP_FS then the probability of a failure is reasonably
  * high - the zone may be full of dirty or under-writeback pages, which this
- * caller can't do much about.  We kick pdflush and take explicit naps in the
- * hope that some of these pages can be written.  But if the allocating task
- * holds filesystem locks which prevent writeout this might not work, and the
- * allocation attempt will fail.
+ * caller can't do much about.  We kick the writeback threads and take explicit
+ * naps in the hope that some of these pages can be written.  But if the
+ * allocating task holds filesystem locks which prevent writeout this might not
+ * work, and the allocation attempt will fail.
+ *
+ * returns:    0, if no pages reclaimed
+ *             else, the number of pages reclaimed
  */
-unsigned long try_to_free_pages(struct zone **zones, int order, gfp_t gfp_mask)
+static unsigned long do_try_to_free_pages(struct zonelist *zonelist,
+                                       struct scan_control *sc)
 {
        int priority;
-       int ret = 0;
+       unsigned long ret = 0;
        unsigned long total_scanned = 0;
-       unsigned long nr_reclaimed = 0;
        struct reclaim_state *reclaim_state = current->reclaim_state;
        unsigned long lru_pages = 0;
-       int i;
-       struct scan_control sc = {
-               .gfp_mask = gfp_mask,
-               .may_writepage = !laptop_mode,
-               .swap_cluster_max = SWAP_CLUSTER_MAX,
-               .may_swap = 1,
-               .swappiness = vm_swappiness,
-               .order = order,
-       };
+       struct zoneref *z;
+       struct zone *zone;
+       enum zone_type high_zoneidx = gfp_zone(sc->gfp_mask);
+       unsigned long writeback_threshold;
 
-       count_vm_event(ALLOCSTALL);
+       delayacct_freepages_start();
 
-       for (i = 0; zones[i] != NULL; i++) {
-               struct zone *zone = zones[i];
+       if (scanning_global_lru(sc))
+               count_vm_event(ALLOCSTALL);
+       /*
+        * mem_cgroup will not do shrink_slab.
+        */
+       if (scanning_global_lru(sc)) {
+               for_each_zone_zonelist(zone, z, zonelist, high_zoneidx) {
 
-               if (!cpuset_zone_allowed_hardwall(zone, GFP_KERNEL))
-                       continue;
+                       if (!cpuset_zone_allowed_hardwall(zone, GFP_KERNEL))
+                               continue;
 
-               lru_pages += zone_page_state(zone, NR_ACTIVE)
-                               + zone_page_state(zone, NR_INACTIVE);
+                       lru_pages += zone_reclaimable_pages(zone);
+               }
        }
 
        for (priority = DEF_PRIORITY; priority >= 0; priority--) {
-               sc.nr_scanned = 0;
+               sc->nr_scanned = 0;
                if (!priority)
                        disable_swap_token();
-               nr_reclaimed += shrink_zones(priority, zones, &sc);
-               shrink_slab(sc.nr_scanned, gfp_mask, lru_pages);
-               if (reclaim_state) {
-                       nr_reclaimed += reclaim_state->reclaimed_slab;
-                       reclaim_state->reclaimed_slab = 0;
+               shrink_zones(priority, zonelist, sc);
+               /*
+                * Don't shrink slabs when reclaiming memory from
+                * over limit cgroups
+                */
+               if (scanning_global_lru(sc)) {
+                       shrink_slab(sc->nr_scanned, sc->gfp_mask, lru_pages);
+                       if (reclaim_state) {
+                               sc->nr_reclaimed += reclaim_state->reclaimed_slab;
+                               reclaim_state->reclaimed_slab = 0;
+                       }
                }
-               total_scanned += sc.nr_scanned;
-               if (nr_reclaimed >= sc.swap_cluster_max) {
-                       ret = 1;
+               total_scanned += sc->nr_scanned;
+               if (sc->nr_reclaimed >= sc->nr_to_reclaim) {
+                       ret = sc->nr_reclaimed;
                        goto out;
                }
 
@@ -1222,19 +1820,20 @@ unsigned long try_to_free_pages(struct zone **zones, int order, gfp_t gfp_mask)
                 * that's undesirable in laptop mode, where we *want* lumpy
                 * writeout.  So in laptop mode, write out the whole world.
                 */
-               if (total_scanned > sc.swap_cluster_max +
-                                       sc.swap_cluster_max / 2) {
-                       wakeup_pdflush(laptop_mode ? 0 : total_scanned);
-                       sc.may_writepage = 1;
+               writeback_threshold = sc->nr_to_reclaim + sc->nr_to_reclaim / 2;
+               if (total_scanned > writeback_threshold) {
+                       wakeup_flusher_threads(laptop_mode ? 0 : total_scanned);
+                       sc->may_writepage = 1;
                }
 
                /* Take a nap, wait for some writeback to complete */
-               if (sc.nr_scanned && priority < DEF_PRIORITY - 2)
-                       congestion_wait(WRITE, HZ/10);
+               if (!sc->hibernation_mode && sc->nr_scanned &&
+                   priority < DEF_PRIORITY - 2)
+                       congestion_wait(BLK_RW_ASYNC, HZ/10);
        }
-       /* top priority shrink_caches still had more to do? don't OOM, then */
-       if (!sc.all_unreclaimable)
-               ret = 1;
+       /* top priority shrink_zones still had more to do? don't OOM, then */
+       if (!sc->all_unreclaimable && scanning_global_lru(sc))
+               ret = sc->nr_reclaimed;
 out:
        /*
         * Now that we've scanned all the zones at this priority level, note
@@ -1245,20 +1844,131 @@ out:
         */
        if (priority < 0)
                priority = 0;
-       for (i = 0; zones[i] != 0; i++) {
-               struct zone *zone = zones[i];
 
-               if (!cpuset_zone_allowed_hardwall(zone, GFP_KERNEL))
+       if (scanning_global_lru(sc)) {
+               for_each_zone_zonelist(zone, z, zonelist, high_zoneidx) {
+
+                       if (!cpuset_zone_allowed_hardwall(zone, GFP_KERNEL))
+                               continue;
+
+                       zone->prev_priority = priority;
+               }
+       } else
+               mem_cgroup_record_reclaim_priority(sc->mem_cgroup, priority);
+
+       delayacct_freepages_end();
+
+       return ret;
+}
+
+unsigned long try_to_free_pages(struct zonelist *zonelist, int order,
+                               gfp_t gfp_mask, nodemask_t *nodemask)
+{
+       struct scan_control sc = {
+               .gfp_mask = gfp_mask,
+               .may_writepage = !laptop_mode,
+               .nr_to_reclaim = SWAP_CLUSTER_MAX,
+               .may_unmap = 1,
+               .may_swap = 1,
+               .swappiness = vm_swappiness,
+               .order = order,
+               .mem_cgroup = NULL,
+               .isolate_pages = isolate_pages_global,
+               .nodemask = nodemask,
+       };
+
+       return do_try_to_free_pages(zonelist, &sc);
+}
+
+#ifdef CONFIG_CGROUP_MEM_RES_CTLR
+
+unsigned long mem_cgroup_shrink_node_zone(struct mem_cgroup *mem,
+                                               gfp_t gfp_mask, bool noswap,
+                                               unsigned int swappiness,
+                                               struct zone *zone, int nid)
+{
+       struct scan_control sc = {
+               .may_writepage = !laptop_mode,
+               .may_unmap = 1,
+               .may_swap = !noswap,
+               .swappiness = swappiness,
+               .order = 0,
+               .mem_cgroup = mem,
+               .isolate_pages = mem_cgroup_isolate_pages,
+       };
+       nodemask_t nm  = nodemask_of_node(nid);
+
+       sc.gfp_mask = (gfp_mask & GFP_RECLAIM_MASK) |
+                       (GFP_HIGHUSER_MOVABLE & ~GFP_RECLAIM_MASK);
+       sc.nodemask = &nm;
+       sc.nr_reclaimed = 0;
+       sc.nr_scanned = 0;
+       /*
+        * NOTE: Although we can get the priority field, using it
+        * here is not a good idea, since it limits the pages we can scan.
+        * if we don't reclaim here, the shrink_zone from balance_pgdat
+        * will pick up pages from other mem cgroup's as well. We hack
+        * the priority and make it zero.
+        */
+       shrink_zone(0, zone, &sc);
+       return sc.nr_reclaimed;
+}
+
+unsigned long try_to_free_mem_cgroup_pages(struct mem_cgroup *mem_cont,
+                                          gfp_t gfp_mask,
+                                          bool noswap,
+                                          unsigned int swappiness)
+{
+       struct zonelist *zonelist;
+       struct scan_control sc = {
+               .may_writepage = !laptop_mode,
+               .may_unmap = 1,
+               .may_swap = !noswap,
+               .nr_to_reclaim = SWAP_CLUSTER_MAX,
+               .swappiness = swappiness,
+               .order = 0,
+               .mem_cgroup = mem_cont,
+               .isolate_pages = mem_cgroup_isolate_pages,
+               .nodemask = NULL, /* we don't care the placement */
+       };
+
+       sc.gfp_mask = (gfp_mask & GFP_RECLAIM_MASK) |
+                       (GFP_HIGHUSER_MOVABLE & ~GFP_RECLAIM_MASK);
+       zonelist = NODE_DATA(numa_node_id())->node_zonelists;
+       return do_try_to_free_pages(zonelist, &sc);
+}
+#endif
+
+/* is kswapd sleeping prematurely? */
+static int sleeping_prematurely(pg_data_t *pgdat, int order, long remaining)
+{
+       int i;
+
+       /* If a direct reclaimer woke kswapd within HZ/10, it's premature */
+       if (remaining)
+               return 1;
+
+       /* If after HZ/10, a zone is below the high mark, it's premature */
+       for (i = 0; i < pgdat->nr_zones; i++) {
+               struct zone *zone = pgdat->node_zones + i;
+
+               if (!populated_zone(zone))
                        continue;
 
-               zone->prev_priority = priority;
+               if (zone->all_unreclaimable)
+                       continue;
+
+               if (!zone_watermark_ok(zone, order, high_wmark_pages(zone),
+                                                               0, 0))
+                       return 1;
        }
-       return ret;
+
+       return 0;
 }
 
 /*
  * For kswapd, balance_pgdat() will work across all this node's zones until
- * they are all at pages_high.
+ * they are all at high_wmark_pages(zone).
  *
  * Returns the number of pages which were actually freed.
  *
@@ -1271,11 +1981,11 @@ out:
  * the zone for when the problem goes away.
  *
  * kswapd scans the zones in the highmem->normal->dma direction.  It skips
- * zones which have free_pages > pages_high, but once a zone is found to have
- * free_pages <= pages_high, we scan that zone and the lower zones regardless
- * of the number of free pages in the lower zones.  This interoperates with
- * the page allocator fallback scheme to ensure that aging of pages is balanced
- * across the zones.
+ * zones which have free_pages > high_wmark_pages(zone), but once a zone is
+ * found to have free_pages <= high_wmark_pages(zone), we scan that zone and the
+ * lower zones regardless of the number of free pages in the lower zones. This
+ * interoperates with the page allocator fallback scheme to ensure that aging
+ * of pages is balanced across the zones.
  */
 static unsigned long balance_pgdat(pg_data_t *pgdat, int order)
 {
@@ -1283,24 +1993,31 @@ static unsigned long balance_pgdat(pg_data_t *pgdat, int order)
        int priority;
        int i;
        unsigned long total_scanned;
-       unsigned long nr_reclaimed;
        struct reclaim_state *reclaim_state = current->reclaim_state;
        struct scan_control sc = {
                .gfp_mask = GFP_KERNEL,
+               .may_unmap = 1,
                .may_swap = 1,
-               .swap_cluster_max = SWAP_CLUSTER_MAX,
+               /*
+                * kswapd doesn't want to be bailed out while reclaim. because
+                * we want to put equal scanning pressure on each zone.
+                */
+               .nr_to_reclaim = ULONG_MAX,
                .swappiness = vm_swappiness,
                .order = order,
+               .mem_cgroup = NULL,
+               .isolate_pages = isolate_pages_global,
        };
        /*
         * temp_priority is used to remember the scanning priority at which
-        * this zone was successfully refilled to free_pages == pages_high.
+        * this zone was successfully refilled to
+        * free_pages == high_wmark_pages(zone).
         */
        int temp_priority[MAX_NR_ZONES];
 
 loop_again:
        total_scanned = 0;
-       nr_reclaimed = 0;
+       sc.nr_reclaimed = 0;
        sc.may_writepage = !laptop_mode;
        count_vm_event(PAGEOUTRUN);
 
@@ -1310,6 +2027,7 @@ loop_again:
        for (priority = DEF_PRIORITY; priority >= 0; priority--) {
                int end_zone = 0;       /* Inclusive.  0 = ZONE_DMA */
                unsigned long lru_pages = 0;
+               int has_under_min_watermark_zone = 0;
 
                /* The swap token gets in the way of swapout... */
                if (!priority)
@@ -1330,8 +2048,16 @@ loop_again:
                        if (zone->all_unreclaimable && priority != DEF_PRIORITY)
                                continue;
 
-                       if (!zone_watermark_ok(zone, order, zone->pages_high,
-                                              0, 0)) {
+                       /*
+                        * Do some background aging of the anon list, to give
+                        * pages a chance to be referenced before reclaiming.
+                        */
+                       if (inactive_anon_is_low(zone, &sc))
+                               shrink_active_list(SWAP_CLUSTER_MAX, zone,
+                                                       &sc, priority, 0);
+
+                       if (!zone_watermark_ok(zone, order,
+                                       high_wmark_pages(zone), 0, 0)) {
                                end_zone = i;
                                break;
                        }
@@ -1342,8 +2068,7 @@ loop_again:
                for (i = 0; i <= end_zone; i++) {
                        struct zone *zone = pgdat->node_zones + i;
 
-                       lru_pages += zone_page_state(zone, NR_ACTIVE)
-                                       + zone_page_state(zone, NR_INACTIVE);
+                       lru_pages += zone_reclaimable_pages(zone);
                }
 
                /*
@@ -1358,6 +2083,7 @@ loop_again:
                for (i = 0; i <= end_zone; i++) {
                        struct zone *zone = pgdat->node_zones + i;
                        int nr_slab;
+                       int nid, zid;
 
                        if (!populated_zone(zone))
                                continue;
@@ -1365,38 +2091,57 @@ loop_again:
                        if (zone->all_unreclaimable && priority != DEF_PRIORITY)
                                continue;
 
-                       if (!zone_watermark_ok(zone, order, zone->pages_high,
-                                              end_zone, 0))
-                               all_zones_ok = 0;
                        temp_priority[i] = priority;
                        sc.nr_scanned = 0;
                        note_zone_scanning_priority(zone, priority);
+
+                       nid = pgdat->node_id;
+                       zid = zone_idx(zone);
+                       /*
+                        * Call soft limit reclaim before calling shrink_zone.
+                        * For now we ignore the return value
+                        */
+                       mem_cgroup_soft_limit_reclaim(zone, order, sc.gfp_mask,
+                                                       nid, zid);
                        /*
                         * We put equal pressure on every zone, unless one
                         * zone has way too many pages free already.
                         */
-                       if (!zone_watermark_ok(zone, order, 8*zone->pages_high,
-                                               end_zone, 0))
-                               nr_reclaimed += shrink_zone(priority, zone, &sc);
+                       if (!zone_watermark_ok(zone, order,
+                                       8*high_wmark_pages(zone), end_zone, 0))
+                               shrink_zone(priority, zone, &sc);
                        reclaim_state->reclaimed_slab = 0;
                        nr_slab = shrink_slab(sc.nr_scanned, GFP_KERNEL,
                                                lru_pages);
-                       nr_reclaimed += reclaim_state->reclaimed_slab;
+                       sc.nr_reclaimed += reclaim_state->reclaimed_slab;
                        total_scanned += sc.nr_scanned;
                        if (zone->all_unreclaimable)
                                continue;
-                       if (nr_slab == 0 && zone->pages_scanned >=
-                               (zone_page_state(zone, NR_ACTIVE)
-                               + zone_page_state(zone, NR_INACTIVE)) * 6)
-                                       zone->all_unreclaimable = 1;
+                       if (nr_slab == 0 &&
+                           zone->pages_scanned >= (zone_reclaimable_pages(zone) * 6))
+                               zone->all_unreclaimable = 1;
                        /*
                         * If we've done a decent amount of scanning and
                         * the reclaim ratio is low, start doing writepage
                         * even in laptop mode
                         */
                        if (total_scanned > SWAP_CLUSTER_MAX * 2 &&
-                           total_scanned > nr_reclaimed + nr_reclaimed / 2)
+                           total_scanned > sc.nr_reclaimed + sc.nr_reclaimed / 2)
                                sc.may_writepage = 1;
+
+                       if (!zone_watermark_ok(zone, order,
+                                       high_wmark_pages(zone), end_zone, 0)) {
+                               all_zones_ok = 0;
+                               /*
+                                * We are still under min water mark.  This
+                                * means that we have a GFP_ATOMIC allocation
+                                * failure risk. Hurry up!
+                                */
+                               if (!zone_watermark_ok(zone, order,
+                                           min_wmark_pages(zone), end_zone, 0))
+                                       has_under_min_watermark_zone = 1;
+                       }
+
                }
                if (all_zones_ok)
                        break;          /* kswapd: all done */
@@ -1404,8 +2149,12 @@ loop_again:
                 * OK, kswapd is getting into trouble.  Take a nap, then take
                 * another pass across the zones.
                 */
-               if (total_scanned && priority < DEF_PRIORITY - 2)
-                       congestion_wait(WRITE, HZ/10);
+               if (total_scanned && (priority < DEF_PRIORITY - 2)) {
+                       if (has_under_min_watermark_zone)
+                               count_vm_event(KSWAPD_SKIP_CONGESTION_WAIT);
+                       else
+                               congestion_wait(BLK_RW_ASYNC, HZ/10);
+               }
 
                /*
                 * We do this so kswapd doesn't build up large priorities for
@@ -1413,7 +2162,7 @@ loop_again:
                 * matches the direct reclaim path behaviour in terms of impact
                 * on zone->*_priority.
                 */
-               if (nr_reclaimed >= SWAP_CLUSTER_MAX)
+               if (sc.nr_reclaimed >= SWAP_CLUSTER_MAX)
                        break;
        }
 out:
@@ -1432,15 +2181,32 @@ out:
 
                try_to_freeze();
 
+               /*
+                * Fragmentation may mean that the system cannot be
+                * rebalanced for high-order allocations in all zones.
+                * At this point, if nr_reclaimed < SWAP_CLUSTER_MAX,
+                * it means the zones have been fully scanned and are still
+                * not balanced. For high-order allocations, there is
+                * little point trying all over again as kswapd may
+                * infinite loop.
+                *
+                * Instead, recheck all watermarks at order-0 as they
+                * are the most important. If watermarks are ok, kswapd will go
+                * back to sleep. High-order users can still perform direct
+                * reclaim if they wish.
+                */
+               if (sc.nr_reclaimed < SWAP_CLUSTER_MAX)
+                       order = sc.order = 0;
+
                goto loop_again;
        }
 
-       return nr_reclaimed;
+       return sc.nr_reclaimed;
 }
 
 /*
  * The background pageout daemon, started as a kernel thread
- * from the init process. 
+ * from the init process.
  *
  * This basically trickles out pages so that we have _some_
  * free memory available even if there is no other activity
@@ -1460,11 +2226,12 @@ static int kswapd(void *p)
        struct reclaim_state reclaim_state = {
                .reclaimed_slab = 0,
        };
-       cpumask_t cpumask;
+       const struct cpumask *cpumask = cpumask_of_node(pgdat->node_id);
+
+       lockdep_set_current_reclaim_state(GFP_KERNEL);
 
-       cpumask = node_to_cpumask(pgdat->node_id);
-       if (!cpus_empty(cpumask))
-               set_cpus_allowed(tsk, cpumask);
+       if (!cpumask_empty(cpumask))
+               set_cpus_allowed_ptr(tsk, cpumask);
        current->reclaim_state = &reclaim_state;
 
        /*
@@ -1485,6 +2252,7 @@ static int kswapd(void *p)
        order = 0;
        for ( ; ; ) {
                unsigned long new_order;
+               int ret;
 
                prepare_to_wait(&pgdat->kswapd_wait, &wait, TASK_INTERRUPTIBLE);
                new_order = pgdat->kswapd_max_order;
@@ -1496,19 +2264,45 @@ static int kswapd(void *p)
                         */
                        order = new_order;
                } else {
-                       if (!freezing(current))
-                               schedule();
+                       if (!freezing(current) && !kthread_should_stop()) {
+                               long remaining = 0;
+
+                               /* Try to sleep for a short interval */
+                               if (!sleeping_prematurely(pgdat, order, remaining)) {
+                                       remaining = schedule_timeout(HZ/10);
+                                       finish_wait(&pgdat->kswapd_wait, &wait);
+                                       prepare_to_wait(&pgdat->kswapd_wait, &wait, TASK_INTERRUPTIBLE);
+                               }
+
+                               /*
+                                * After a short sleep, check if it was a
+                                * premature sleep. If not, then go fully
+                                * to sleep until explicitly woken up
+                                */
+                               if (!sleeping_prematurely(pgdat, order, remaining))
+                                       schedule();
+                               else {
+                                       if (remaining)
+                                               count_vm_event(KSWAPD_LOW_WMARK_HIT_QUICKLY);
+                                       else
+                                               count_vm_event(KSWAPD_HIGH_WMARK_HIT_QUICKLY);
+                               }
+                       }
 
                        order = pgdat->kswapd_max_order;
                }
                finish_wait(&pgdat->kswapd_wait, &wait);
 
-               if (!try_to_freeze()) {
-                       /* We can speed up thawing tasks if we don't call
-                        * balance_pgdat after returning from the refrigerator
-                        */
+               ret = try_to_freeze();
+               if (kthread_should_stop())
+                       break;
+
+               /*
+                * We can speed up thawing tasks if we don't call balance_pgdat
+                * after returning from the refrigerator
+                */
+               if (!ret)
                        balance_pgdat(pgdat, order);
-               }
        }
        return 0;
 }
@@ -1524,7 +2318,7 @@ void wakeup_kswapd(struct zone *zone, int order)
                return;
 
        pgdat = zone->zone_pgdat;
-       if (zone_watermark_ok(zone, order, zone->pages_low, 0, 0))
+       if (zone_watermark_ok(zone, order, low_wmark_pages(zone), 0, 0))
                return;
        if (pgdat->kswapd_max_order < order)
                pgdat->kswapd_max_order = order;
@@ -1535,155 +2329,82 @@ void wakeup_kswapd(struct zone *zone, int order)
        wake_up_interruptible(&pgdat->kswapd_wait);
 }
 
-#ifdef CONFIG_PM
 /*
- * Helper function for shrink_all_memory().  Tries to reclaim 'nr_pages' pages
- * from LRU lists system-wide, for given pass and priority, and returns the
- * number of reclaimed pages
- *
- * For pass > 3 we also try to shrink the LRU lists that contain a few pages
+ * The reclaimable count would be mostly accurate.
+ * The less reclaimable pages may be
+ * - mlocked pages, which will be moved to unevictable list when encountered
+ * - mapped pages, which may require several travels to be reclaimed
+ * - dirty pages, which is not "instantly" reclaimable
  */
-static unsigned long shrink_all_zones(unsigned long nr_pages, int prio,
-                                     int pass, struct scan_control *sc)
+unsigned long global_reclaimable_pages(void)
 {
-       struct zone *zone;
-       unsigned long nr_to_scan, ret = 0;
+       int nr;
 
-       for_each_zone(zone) {
+       nr = global_page_state(NR_ACTIVE_FILE) +
+            global_page_state(NR_INACTIVE_FILE);
 
-               if (!populated_zone(zone))
-                       continue;
+       if (nr_swap_pages > 0)
+               nr += global_page_state(NR_ACTIVE_ANON) +
+                     global_page_state(NR_INACTIVE_ANON);
 
-               if (zone->all_unreclaimable && prio != DEF_PRIORITY)
-                       continue;
+       return nr;
+}
 
-               /* For pass = 0 we don't shrink the active list */
-               if (pass > 0) {
-                       zone->nr_scan_active +=
-                               (zone_page_state(zone, NR_ACTIVE) >> prio) + 1;
-                       if (zone->nr_scan_active >= nr_pages || pass > 3) {
-                               zone->nr_scan_active = 0;
-                               nr_to_scan = min(nr_pages,
-                                       zone_page_state(zone, NR_ACTIVE));
-                               shrink_active_list(nr_to_scan, zone, sc, prio);
-                       }
-               }
+unsigned long zone_reclaimable_pages(struct zone *zone)
+{
+       int nr;
 
-               zone->nr_scan_inactive +=
-                       (zone_page_state(zone, NR_INACTIVE) >> prio) + 1;
-               if (zone->nr_scan_inactive >= nr_pages || pass > 3) {
-                       zone->nr_scan_inactive = 0;
-                       nr_to_scan = min(nr_pages,
-                               zone_page_state(zone, NR_INACTIVE));
-                       ret += shrink_inactive_list(nr_to_scan, zone, sc);
-                       if (ret >= nr_pages)
-                               return ret;
-               }
-       }
+       nr = zone_page_state(zone, NR_ACTIVE_FILE) +
+            zone_page_state(zone, NR_INACTIVE_FILE);
 
-       return ret;
-}
+       if (nr_swap_pages > 0)
+               nr += zone_page_state(zone, NR_ACTIVE_ANON) +
+                     zone_page_state(zone, NR_INACTIVE_ANON);
 
-static unsigned long count_lru_pages(void)
-{
-       return global_page_state(NR_ACTIVE) + global_page_state(NR_INACTIVE);
+       return nr;
 }
 
+#ifdef CONFIG_HIBERNATION
 /*
- * Try to free `nr_pages' of memory, system-wide, and return the number of
+ * Try to free `nr_to_reclaim' of memory, system-wide, and return the number of
  * freed pages.
  *
  * Rather than trying to age LRUs the aim is to preserve the overall
  * LRU order by reclaiming preferentially
  * inactive > active > active referenced > active mapped
  */
-unsigned long shrink_all_memory(unsigned long nr_pages)
+unsigned long shrink_all_memory(unsigned long nr_to_reclaim)
 {
-       unsigned long lru_pages, nr_slab;
-       unsigned long ret = 0;
-       int pass;
        struct reclaim_state reclaim_state;
        struct scan_control sc = {
-               .gfp_mask = GFP_KERNEL,
-               .may_swap = 0,
-               .swap_cluster_max = nr_pages,
+               .gfp_mask = GFP_HIGHUSER_MOVABLE,
+               .may_swap = 1,
+               .may_unmap = 1,
                .may_writepage = 1,
+               .nr_to_reclaim = nr_to_reclaim,
+               .hibernation_mode = 1,
                .swappiness = vm_swappiness,
+               .order = 0,
+               .isolate_pages = isolate_pages_global,
        };
+       struct zonelist * zonelist = node_zonelist(numa_node_id(), sc.gfp_mask);
+       struct task_struct *p = current;
+       unsigned long nr_reclaimed;
 
-       current->reclaim_state = &reclaim_state;
-
-       lru_pages = count_lru_pages();
-       nr_slab = global_page_state(NR_SLAB_RECLAIMABLE);
-       /* If slab caches are huge, it's better to hit them first */
-       while (nr_slab >= lru_pages) {
-               reclaim_state.reclaimed_slab = 0;
-               shrink_slab(nr_pages, sc.gfp_mask, lru_pages);
-               if (!reclaim_state.reclaimed_slab)
-                       break;
-
-               ret += reclaim_state.reclaimed_slab;
-               if (ret >= nr_pages)
-                       goto out;
-
-               nr_slab -= reclaim_state.reclaimed_slab;
-       }
-
-       /*
-        * We try to shrink LRUs in 5 passes:
-        * 0 = Reclaim from inactive_list only
-        * 1 = Reclaim from active list but don't reclaim mapped
-        * 2 = 2nd pass of type 1
-        * 3 = Reclaim mapped (normal reclaim)
-        * 4 = 2nd pass of type 3
-        */
-       for (pass = 0; pass < 5; pass++) {
-               int prio;
-
-               /* Force reclaiming mapped pages in the passes #3 and #4 */
-               if (pass > 2) {
-                       sc.may_swap = 1;
-                       sc.swappiness = 100;
-               }
-
-               for (prio = DEF_PRIORITY; prio >= 0; prio--) {
-                       unsigned long nr_to_scan = nr_pages - ret;
-
-                       sc.nr_scanned = 0;
-                       ret += shrink_all_zones(nr_to_scan, prio, pass, &sc);
-                       if (ret >= nr_pages)
-                               goto out;
-
-                       reclaim_state.reclaimed_slab = 0;
-                       shrink_slab(sc.nr_scanned, sc.gfp_mask,
-                                       count_lru_pages());
-                       ret += reclaim_state.reclaimed_slab;
-                       if (ret >= nr_pages)
-                               goto out;
-
-                       if (sc.nr_scanned && prio < DEF_PRIORITY - 2)
-                               congestion_wait(WRITE, HZ / 10);
-               }
-       }
+       p->flags |= PF_MEMALLOC;
+       lockdep_set_current_reclaim_state(sc.gfp_mask);
+       reclaim_state.reclaimed_slab = 0;
+       p->reclaim_state = &reclaim_state;
 
-       /*
-        * If ret = 0, we could not shrink LRUs, but there may be something
-        * in slab caches
-        */
-       if (!ret) {
-               do {
-                       reclaim_state.reclaimed_slab = 0;
-                       shrink_slab(nr_pages, sc.gfp_mask, count_lru_pages());
-                       ret += reclaim_state.reclaimed_slab;
-               } while (ret < nr_pages && reclaim_state.reclaimed_slab > 0);
-       }
+       nr_reclaimed = do_try_to_free_pages(zonelist, &sc);
 
-out:
-       current->reclaim_state = NULL;
+       p->reclaim_state = NULL;
+       lockdep_clear_current_reclaim_state();
+       p->flags &= ~PF_MEMALLOC;
 
-       return ret;
+       return nr_reclaimed;
 }
-#endif
+#endif /* CONFIG_HIBERNATION */
 
 /* It's optimal to keep kswapds on the same CPUs as their memory, but
    not required for correctness.  So if the last cpu in a node goes
@@ -1692,15 +2413,18 @@ out:
 static int __devinit cpu_callback(struct notifier_block *nfb,
                                  unsigned long action, void *hcpu)
 {
-       pg_data_t *pgdat;
-       cpumask_t mask;
+       int nid;
 
        if (action == CPU_ONLINE || action == CPU_ONLINE_FROZEN) {
-               for_each_online_pgdat(pgdat) {
-                       mask = node_to_cpumask(pgdat->node_id);
-                       if (any_online_cpu(mask) != NR_CPUS)
+               for_each_node_state(nid, N_HIGH_MEMORY) {
+                       pg_data_t *pgdat = NODE_DATA(nid);
+                       const struct cpumask *mask;
+
+                       mask = cpumask_of_node(pgdat->node_id);
+
+                       if (cpumask_any_and(cpu_online_mask, mask) < nr_cpu_ids)
                                /* One of our CPUs online: restore mask */
-                               set_cpus_allowed(pgdat->kswapd, mask);
+                               set_cpus_allowed_ptr(pgdat->kswapd, mask);
                }
        }
        return NOTIFY_OK;
@@ -1728,6 +2452,17 @@ int kswapd_run(int nid)
        return ret;
 }
 
+/*
+ * Called by memory hotplug when all memory in a node is offlined.
+ */
+void kswapd_stop(int nid)
+{
+       struct task_struct *kswapd = NODE_DATA(nid)->kswapd;
+
+       if (kswapd)
+               kthread_stop(kswapd);
+}
+
 static int __init kswapd_init(void)
 {
        int nid;
@@ -1751,7 +2486,7 @@ module_init(kswapd_init)
 int zone_reclaim_mode __read_mostly;
 
 #define RECLAIM_OFF 0
-#define RECLAIM_ZONE (1<<0)    /* Run shrink_cache on the zone */
+#define RECLAIM_ZONE (1<<0)    /* Run shrink_inactive_list on the zone */
 #define RECLAIM_WRITE (1<<1)   /* Writeout pages during reclaim */
 #define RECLAIM_SWAP (1<<2)    /* Swap pages out during reclaim */
 
@@ -1774,6 +2509,48 @@ int sysctl_min_unmapped_ratio = 1;
  */
 int sysctl_min_slab_ratio = 5;
 
+static inline unsigned long zone_unmapped_file_pages(struct zone *zone)
+{
+       unsigned long file_mapped = zone_page_state(zone, NR_FILE_MAPPED);
+       unsigned long file_lru = zone_page_state(zone, NR_INACTIVE_FILE) +
+               zone_page_state(zone, NR_ACTIVE_FILE);
+
+       /*
+        * It's possible for there to be more file mapped pages than
+        * accounted for by the pages on the file LRU lists because
+        * tmpfs pages accounted for as ANON can also be FILE_MAPPED
+        */
+       return (file_lru > file_mapped) ? (file_lru - file_mapped) : 0;
+}
+
+/* Work out how many page cache pages we can reclaim in this reclaim_mode */
+static long zone_pagecache_reclaimable(struct zone *zone)
+{
+       long nr_pagecache_reclaimable;
+       long delta = 0;
+
+       /*
+        * If RECLAIM_SWAP is set, then all file pages are considered
+        * potentially reclaimable. Otherwise, we have to worry about
+        * pages like swapcache and zone_unmapped_file_pages() provides
+        * a better estimate
+        */
+       if (zone_reclaim_mode & RECLAIM_SWAP)
+               nr_pagecache_reclaimable = zone_page_state(zone, NR_FILE_PAGES);
+       else
+               nr_pagecache_reclaimable = zone_unmapped_file_pages(zone);
+
+       /* If we can't clean pages, remove dirty pages from consideration */
+       if (!(zone_reclaim_mode & RECLAIM_WRITE))
+               delta += zone_page_state(zone, NR_FILE_DIRTY);
+
+       /* Watch for any possible underflows due to delta */
+       if (unlikely(delta > nr_pagecache_reclaimable))
+               delta = nr_pagecache_reclaimable;
+
+       return nr_pagecache_reclaimable - delta;
+}
+
 /*
  * Try to free up some pages from this zone through reclaim.
  */
@@ -1784,14 +2561,16 @@ static int __zone_reclaim(struct zone *zone, gfp_t gfp_mask, unsigned int order)
        struct task_struct *p = current;
        struct reclaim_state reclaim_state;
        int priority;
-       unsigned long nr_reclaimed = 0;
        struct scan_control sc = {
                .may_writepage = !!(zone_reclaim_mode & RECLAIM_WRITE),
-               .may_swap = !!(zone_reclaim_mode & RECLAIM_SWAP),
-               .swap_cluster_max = max_t(unsigned long, nr_pages,
-                                       SWAP_CLUSTER_MAX),
+               .may_unmap = !!(zone_reclaim_mode & RECLAIM_SWAP),
+               .may_swap = 1,
+               .nr_to_reclaim = max_t(unsigned long, nr_pages,
+                                      SWAP_CLUSTER_MAX),
                .gfp_mask = gfp_mask,
                .swappiness = vm_swappiness,
+               .order = order,
+               .isolate_pages = isolate_pages_global,
        };
        unsigned long slab_reclaimable;
 
@@ -1803,12 +2582,11 @@ static int __zone_reclaim(struct zone *zone, gfp_t gfp_mask, unsigned int order)
         * and RECLAIM_SWAP.
         */
        p->flags |= PF_MEMALLOC | PF_SWAPWRITE;
+       lockdep_set_current_reclaim_state(gfp_mask);
        reclaim_state.reclaimed_slab = 0;
        p->reclaim_state = &reclaim_state;
 
-       if (zone_page_state(zone, NR_FILE_PAGES) -
-               zone_page_state(zone, NR_FILE_MAPPED) >
-               zone->min_unmapped_pages) {
+       if (zone_pagecache_reclaimable(zone) > zone->min_unmapped_pages) {
                /*
                 * Free memory by calling shrink zone with increasing
                 * priorities until we have enough memory freed.
@@ -1816,9 +2594,9 @@ static int __zone_reclaim(struct zone *zone, gfp_t gfp_mask, unsigned int order)
                priority = ZONE_RECLAIM_PRIORITY;
                do {
                        note_zone_scanning_priority(zone, priority);
-                       nr_reclaimed += shrink_zone(priority, zone, &sc);
+                       shrink_zone(priority, zone, &sc);
                        priority--;
-               } while (priority >= 0 && nr_reclaimed < nr_pages);
+               } while (priority >= 0 && sc.nr_reclaimed < nr_pages);
        }
 
        slab_reclaimable = zone_page_state(zone, NR_SLAB_RECLAIMABLE);
@@ -1842,19 +2620,20 @@ static int __zone_reclaim(struct zone *zone, gfp_t gfp_mask, unsigned int order)
                 * Update nr_reclaimed by the number of slab pages we
                 * reclaimed from this zone.
                 */
-               nr_reclaimed += slab_reclaimable -
+               sc.nr_reclaimed += slab_reclaimable -
                        zone_page_state(zone, NR_SLAB_RECLAIMABLE);
        }
 
        p->reclaim_state = NULL;
        current->flags &= ~(PF_MEMALLOC | PF_SWAPWRITE);
-       return nr_reclaimed >= nr_pages;
+       lockdep_clear_current_reclaim_state();
+       return sc.nr_reclaimed >= nr_pages;
 }
 
 int zone_reclaim(struct zone *zone, gfp_t gfp_mask, unsigned int order)
 {
-       cpumask_t mask;
        int node_id;
+       int ret;
 
        /*
         * Zone reclaim reclaims unmapped file backed pages and
@@ -1866,22 +2645,18 @@ int zone_reclaim(struct zone *zone, gfp_t gfp_mask, unsigned int order)
         * if less than a specified percentage of the zone is used by
         * unmapped file backed pages.
         */
-       if (zone_page_state(zone, NR_FILE_PAGES) -
-           zone_page_state(zone, NR_FILE_MAPPED) <= zone->min_unmapped_pages
-           && zone_page_state(zone, NR_SLAB_RECLAIMABLE)
-                       <= zone->min_slab_pages)
-               return 0;
+       if (zone_pagecache_reclaimable(zone) <= zone->min_unmapped_pages &&
+           zone_page_state(zone, NR_SLAB_RECLAIMABLE) <= zone->min_slab_pages)
+               return ZONE_RECLAIM_FULL;
+
+       if (zone->all_unreclaimable)
+               return ZONE_RECLAIM_FULL;
 
        /*
-        * Avoid concurrent zone reclaims, do not reclaim in a zone that does
-        * not have reclaimable pages and if we should not delay the allocation
-        * then do not scan.
+        * Do not scan if the allocation should not be delayed.
         */
-       if (!(gfp_mask & __GFP_WAIT) ||
-               zone->all_unreclaimable ||
-               atomic_read(&zone->reclaim_in_progress) > 0 ||
-               (current->flags & PF_MEMALLOC))
-                       return 0;
+       if (!(gfp_mask & __GFP_WAIT) || (current->flags & PF_MEMALLOC))
+               return ZONE_RECLAIM_NOSCAN;
 
        /*
         * Only run zone reclaim on the local zone or on zones that do not
@@ -1890,9 +2665,265 @@ int zone_reclaim(struct zone *zone, gfp_t gfp_mask, unsigned int order)
         * as wide as possible.
         */
        node_id = zone_to_nid(zone);
-       mask = node_to_cpumask(node_id);
-       if (!cpus_empty(mask) && node_id != numa_node_id())
-               return 0;
-       return __zone_reclaim(zone, gfp_mask, order);
+       if (node_state(node_id, N_CPU) && node_id != numa_node_id())
+               return ZONE_RECLAIM_NOSCAN;
+
+       if (zone_test_and_set_flag(zone, ZONE_RECLAIM_LOCKED))
+               return ZONE_RECLAIM_NOSCAN;
+
+       ret = __zone_reclaim(zone, gfp_mask, order);
+       zone_clear_flag(zone, ZONE_RECLAIM_LOCKED);
+
+       if (!ret)
+               count_vm_event(PGSCAN_ZONE_RECLAIM_FAILED);
+
+       return ret;
 }
 #endif
+
+/*
+ * page_evictable - test whether a page is evictable
+ * @page: the page to test
+ * @vma: the VMA in which the page is or will be mapped, may be NULL
+ *
+ * Test whether page is evictable--i.e., should be placed on active/inactive
+ * lists vs unevictable list.  The vma argument is !NULL when called from the
+ * fault path to determine how to instantate a new page.
+ *
+ * Reasons page might not be evictable:
+ * (1) page's mapping marked unevictable
+ * (2) page is part of an mlocked VMA
+ *
+ */
+int page_evictable(struct page *page, struct vm_area_struct *vma)
+{
+
+       if (mapping_unevictable(page_mapping(page)))
+               return 0;
+
+       if (PageMlocked(page) || (vma && is_mlocked_vma(vma, page)))
+               return 0;
+
+       return 1;
+}
+
+/**
+ * check_move_unevictable_page - check page for evictability and move to appropriate zone lru list
+ * @page: page to check evictability and move to appropriate lru list
+ * @zone: zone page is in
+ *
+ * Checks a page for evictability and moves the page to the appropriate
+ * zone lru list.
+ *
+ * Restrictions: zone->lru_lock must be held, page must be on LRU and must
+ * have PageUnevictable set.
+ */
+static void check_move_unevictable_page(struct page *page, struct zone *zone)
+{
+       VM_BUG_ON(PageActive(page));
+
+retry:
+       ClearPageUnevictable(page);
+       if (page_evictable(page, NULL)) {
+               enum lru_list l = page_lru_base_type(page);
+
+               __dec_zone_state(zone, NR_UNEVICTABLE);
+               list_move(&page->lru, &zone->lru[l].list);
+               mem_cgroup_move_lists(page, LRU_UNEVICTABLE, l);
+               __inc_zone_state(zone, NR_INACTIVE_ANON + l);
+               __count_vm_event(UNEVICTABLE_PGRESCUED);
+       } else {
+               /*
+                * rotate unevictable list
+                */
+               SetPageUnevictable(page);
+               list_move(&page->lru, &zone->lru[LRU_UNEVICTABLE].list);
+               mem_cgroup_rotate_lru_list(page, LRU_UNEVICTABLE);
+               if (page_evictable(page, NULL))
+                       goto retry;
+       }
+}
+
+/**
+ * scan_mapping_unevictable_pages - scan an address space for evictable pages
+ * @mapping: struct address_space to scan for evictable pages
+ *
+ * Scan all pages in mapping.  Check unevictable pages for
+ * evictability and move them to the appropriate zone lru list.
+ */
+void scan_mapping_unevictable_pages(struct address_space *mapping)
+{
+       pgoff_t next = 0;
+       pgoff_t end   = (i_size_read(mapping->host) + PAGE_CACHE_SIZE - 1) >>
+                        PAGE_CACHE_SHIFT;
+       struct zone *zone;
+       struct pagevec pvec;
+
+       if (mapping->nrpages == 0)
+               return;
+
+       pagevec_init(&pvec, 0);
+       while (next < end &&
+               pagevec_lookup(&pvec, mapping, next, PAGEVEC_SIZE)) {
+               int i;
+               int pg_scanned = 0;
+
+               zone = NULL;
+
+               for (i = 0; i < pagevec_count(&pvec); i++) {
+                       struct page *page = pvec.pages[i];
+                       pgoff_t page_index = page->index;
+                       struct zone *pagezone = page_zone(page);
+
+                       pg_scanned++;
+                       if (page_index > next)
+                               next = page_index;
+                       next++;
+
+                       if (pagezone != zone) {
+                               if (zone)
+                                       spin_unlock_irq(&zone->lru_lock);
+                               zone = pagezone;
+                               spin_lock_irq(&zone->lru_lock);
+                       }
+
+                       if (PageLRU(page) && PageUnevictable(page))
+                               check_move_unevictable_page(page, zone);
+               }
+               if (zone)
+                       spin_unlock_irq(&zone->lru_lock);
+               pagevec_release(&pvec);
+
+               count_vm_events(UNEVICTABLE_PGSCANNED, pg_scanned);
+       }
+
+}
+
+/**
+ * scan_zone_unevictable_pages - check unevictable list for evictable pages
+ * @zone - zone of which to scan the unevictable list
+ *
+ * Scan @zone's unevictable LRU lists to check for pages that have become
+ * evictable.  Move those that have to @zone's inactive list where they
+ * become candidates for reclaim, unless shrink_inactive_zone() decides
+ * to reactivate them.  Pages that are still unevictable are rotated
+ * back onto @zone's unevictable list.
+ */
+#define SCAN_UNEVICTABLE_BATCH_SIZE 16UL /* arbitrary lock hold batch size */
+static void scan_zone_unevictable_pages(struct zone *zone)
+{
+       struct list_head *l_unevictable = &zone->lru[LRU_UNEVICTABLE].list;
+       unsigned long scan;
+       unsigned long nr_to_scan = zone_page_state(zone, NR_UNEVICTABLE);
+
+       while (nr_to_scan > 0) {
+               unsigned long batch_size = min(nr_to_scan,
+                                               SCAN_UNEVICTABLE_BATCH_SIZE);
+
+               spin_lock_irq(&zone->lru_lock);
+               for (scan = 0;  scan < batch_size; scan++) {
+                       struct page *page = lru_to_page(l_unevictable);
+
+                       if (!trylock_page(page))
+                               continue;
+
+                       prefetchw_prev_lru_page(page, l_unevictable, flags);
+
+                       if (likely(PageLRU(page) && PageUnevictable(page)))
+                               check_move_unevictable_page(page, zone);
+
+                       unlock_page(page);
+               }
+               spin_unlock_irq(&zone->lru_lock);
+
+               nr_to_scan -= batch_size;
+       }
+}
+
+
+/**
+ * scan_all_zones_unevictable_pages - scan all unevictable lists for evictable pages
+ *
+ * A really big hammer:  scan all zones' unevictable LRU lists to check for
+ * pages that have become evictable.  Move those back to the zones'
+ * inactive list where they become candidates for reclaim.
+ * This occurs when, e.g., we have unswappable pages on the unevictable lists,
+ * and we add swap to the system.  As such, it runs in the context of a task
+ * that has possibly/probably made some previously unevictable pages
+ * evictable.
+ */
+static void scan_all_zones_unevictable_pages(void)
+{
+       struct zone *zone;
+
+       for_each_zone(zone) {
+               scan_zone_unevictable_pages(zone);
+       }
+}
+
+/*
+ * scan_unevictable_pages [vm] sysctl handler.  On demand re-scan of
+ * all nodes' unevictable lists for evictable pages
+ */
+unsigned long scan_unevictable_pages;
+
+int scan_unevictable_handler(struct ctl_table *table, int write,
+                          void __user *buffer,
+                          size_t *length, loff_t *ppos)
+{
+       proc_doulongvec_minmax(table, write, buffer, length, ppos);
+
+       if (write && *(unsigned long *)table->data)
+               scan_all_zones_unevictable_pages();
+
+       scan_unevictable_pages = 0;
+       return 0;
+}
+
+/*
+ * per node 'scan_unevictable_pages' attribute.  On demand re-scan of
+ * a specified node's per zone unevictable lists for evictable pages.
+ */
+
+static ssize_t read_scan_unevictable_node(struct sys_device *dev,
+                                         struct sysdev_attribute *attr,
+                                         char *buf)
+{
+       return sprintf(buf, "0\n");     /* always zero; should fit... */
+}
+
+static ssize_t write_scan_unevictable_node(struct sys_device *dev,
+                                          struct sysdev_attribute *attr,
+                                       const char *buf, size_t count)
+{
+       struct zone *node_zones = NODE_DATA(dev->id)->node_zones;
+       struct zone *zone;
+       unsigned long res;
+       unsigned long req = strict_strtoul(buf, 10, &res);
+
+       if (!req)
+               return 1;       /* zero is no-op */
+
+       for (zone = node_zones; zone - node_zones < MAX_NR_ZONES; ++zone) {
+               if (!populated_zone(zone))
+                       continue;
+               scan_zone_unevictable_pages(zone);
+       }
+       return 1;
+}
+
+
+static SYSDEV_ATTR(scan_unevictable_pages, S_IRUGO | S_IWUSR,
+                       read_scan_unevictable_node,
+                       write_scan_unevictable_node);
+
+int scan_unevictable_register_node(struct node *node)
+{
+       return sysdev_create_file(&node->sysdev, &attr_scan_unevictable_pages);
+}
+
+void scan_unevictable_unregister_node(struct node *node)
+{
+       sysdev_remove_file(&node->sysdev, &attr_scan_unevictable_pages);
+}
+