CGroup API files: make CGROUP_DEBUG default to off
[safe/jmp/linux-2.6] / mm / vmscan.c
index 0960846..12e8627 100644 (file)
@@ -19,6 +19,7 @@
 #include <linux/pagemap.h>
 #include <linux/init.h>
 #include <linux/highmem.h>
+#include <linux/vmstat.h>
 #include <linux/file.h>
 #include <linux/writeback.h>
 #include <linux/blkdev.h>
@@ -35,6 +36,8 @@
 #include <linux/rwsem.h>
 #include <linux/delay.h>
 #include <linux/kthread.h>
+#include <linux/freezer.h>
+#include <linux/memcontrol.h>
 
 #include <asm/tlbflush.h>
 #include <asm/div64.h>
@@ -62,17 +65,19 @@ struct scan_control {
        int swap_cluster_max;
 
        int swappiness;
-};
 
-/*
- * The list of shrinker callbacks used by to apply pressure to
- * ageable caches.
- */
-struct shrinker {
-       shrinker_t              shrinker;
-       struct list_head        list;
-       int                     seeks;  /* seeks to recreate an obj */
-       long                    nr;     /* objs pending delete */
+       int all_unreclaimable;
+
+       int order;
+
+       /* Which cgroup do we reclaim from */
+       struct mem_cgroup *mem_cgroup;
+
+       /* Pluggable isolate pages callback */
+       unsigned long (*isolate_pages)(unsigned long nr, struct list_head *dst,
+                       unsigned long *scanned, int order, int mode,
+                       struct zone *z, struct mem_cgroup *mem_cont,
+                       int active);
 };
 
 #define lru_to_page(_head) (list_entry((_head)->prev, struct page, lru))
@@ -114,37 +119,34 @@ long vm_total_pages;      /* The total number of pages which the VM controls */
 static LIST_HEAD(shrinker_list);
 static DECLARE_RWSEM(shrinker_rwsem);
 
+#ifdef CONFIG_CGROUP_MEM_RES_CTLR
+#define scan_global_lru(sc)    (!(sc)->mem_cgroup)
+#else
+#define scan_global_lru(sc)    (1)
+#endif
+
 /*
  * Add a shrinker callback to be called from the vm
  */
-struct shrinker *set_shrinker(int seeks, shrinker_t theshrinker)
+void register_shrinker(struct shrinker *shrinker)
 {
-        struct shrinker *shrinker;
-
-        shrinker = kmalloc(sizeof(*shrinker), GFP_KERNEL);
-        if (shrinker) {
-               shrinker->shrinker = theshrinker;
-               shrinker->seeks = seeks;
-               shrinker->nr = 0;
-               down_write(&shrinker_rwsem);
-               list_add_tail(&shrinker->list, &shrinker_list);
-               up_write(&shrinker_rwsem);
-       }
-       return shrinker;
+       shrinker->nr = 0;
+       down_write(&shrinker_rwsem);
+       list_add_tail(&shrinker->list, &shrinker_list);
+       up_write(&shrinker_rwsem);
 }
-EXPORT_SYMBOL(set_shrinker);
+EXPORT_SYMBOL(register_shrinker);
 
 /*
  * Remove one
  */
-void remove_shrinker(struct shrinker *shrinker)
+void unregister_shrinker(struct shrinker *shrinker)
 {
        down_write(&shrinker_rwsem);
        list_del(&shrinker->list);
        up_write(&shrinker_rwsem);
-       kfree(shrinker);
 }
-EXPORT_SYMBOL(remove_shrinker);
+EXPORT_SYMBOL(unregister_shrinker);
 
 #define SHRINK_BATCH 128
 /*
@@ -155,7 +157,7 @@ EXPORT_SYMBOL(remove_shrinker);
  * percentages of the lru and ageable caches.  This should balance the seeks
  * generated by these structures.
  *
- * If the vm encounted mapped pages on the LRU it increase the pressure on
+ * If the vm encountered mapped pages on the LRU it increase the pressure on
  * slab to avoid swapping.
  *
  * We do weird things to avoid (scanned*seeks*entries) overflowing 32 bits.
@@ -181,7 +183,7 @@ unsigned long shrink_slab(unsigned long scanned, gfp_t gfp_mask,
        list_for_each_entry(shrinker, &shrinker_list, list) {
                unsigned long long delta;
                unsigned long total_scan;
-               unsigned long max_pass = (*shrinker->shrinker)(0, gfp_mask);
+               unsigned long max_pass = (*shrinker->shrink)(0, gfp_mask);
 
                delta = (4 * scanned) / shrinker->seeks;
                delta *= max_pass;
@@ -209,13 +211,13 @@ unsigned long shrink_slab(unsigned long scanned, gfp_t gfp_mask,
                        int shrink_ret;
                        int nr_before;
 
-                       nr_before = (*shrinker->shrinker)(0, gfp_mask);
-                       shrink_ret = (*shrinker->shrinker)(this_scan, gfp_mask);
+                       nr_before = (*shrinker->shrink)(0, gfp_mask);
+                       shrink_ret = (*shrinker->shrink)(this_scan, gfp_mask);
                        if (shrink_ret == -1)
                                break;
                        if (shrink_ret < nr_before)
                                ret += nr_before - shrink_ret;
-                       mod_page_state(slabs_scanned, this_scan);
+                       count_vm_events(SLABS_SCANNED, this_scan);
                        total_scan -= this_scan;
 
                        cond_resched();
@@ -280,15 +282,17 @@ static void handle_write_error(struct address_space *mapping,
                                struct page *page, int error)
 {
        lock_page(page);
-       if (page_mapping(page) == mapping) {
-               if (error == -ENOSPC)
-                       set_bit(AS_ENOSPC, &mapping->flags);
-               else
-                       set_bit(AS_EIO, &mapping->flags);
-       }
+       if (page_mapping(page) == mapping)
+               mapping_set_error(mapping, error);
        unlock_page(page);
 }
 
+/* Request for sync pageout. */
+enum pageout_io {
+       PAGEOUT_IO_ASYNC,
+       PAGEOUT_IO_SYNC,
+};
+
 /* possible outcome of pageout() */
 typedef enum {
        /* failed to write page out, page is locked */
@@ -305,7 +309,8 @@ typedef enum {
  * pageout is called by shrink_page_list() for each dirty page.
  * Calls ->writepage().
  */
-static pageout_t pageout(struct page *page, struct address_space *mapping)
+static pageout_t pageout(struct page *page, struct address_space *mapping,
+                                               enum pageout_io sync_writeback)
 {
        /*
         * If the page is dirty, only perform writeback if that write
@@ -364,28 +369,62 @@ static pageout_t pageout(struct page *page, struct address_space *mapping)
                        ClearPageReclaim(page);
                        return PAGE_ACTIVATE;
                }
+
+               /*
+                * Wait on writeback if requested to. This happens when
+                * direct reclaiming a large contiguous area and the
+                * first attempt to free a range of pages fails.
+                */
+               if (PageWriteback(page) && sync_writeback == PAGEOUT_IO_SYNC)
+                       wait_on_page_writeback(page);
+
                if (!PageWriteback(page)) {
                        /* synchronous write or broken a_ops? */
                        ClearPageReclaim(page);
                }
-
+               inc_zone_page_state(page, NR_VMSCAN_WRITE);
                return PAGE_SUCCESS;
        }
 
        return PAGE_CLEAN;
 }
 
+/*
+ * Attempt to detach a locked page from its ->mapping.  If it is dirty or if
+ * someone else has a ref on the page, abort and return 0.  If it was
+ * successfully detached, return 1.  Assumes the caller has a single ref on
+ * this page.
+ */
 int remove_mapping(struct address_space *mapping, struct page *page)
 {
-       if (!mapping)
-               return 0;               /* truncate got there first */
+       BUG_ON(!PageLocked(page));
+       BUG_ON(mapping != page_mapping(page));
 
        write_lock_irq(&mapping->tree_lock);
-
        /*
-        * The non-racy check for busy page.  It is critical to check
-        * PageDirty _after_ making sure that the page is freeable and
-        * not in use by anybody.       (pagecache + us == 2)
+        * The non racy check for a busy page.
+        *
+        * Must be careful with the order of the tests. When someone has
+        * a ref to the page, it may be possible that they dirty it then
+        * drop the reference. So if PageDirty is tested before page_count
+        * here, then the following race may occur:
+        *
+        * get_user_pages(&page);
+        * [user mapping goes away]
+        * write_to(page);
+        *                              !PageDirty(page)    [good]
+        * SetPageDirty(page);
+        * put_page(page);
+        *                              !page_count(page)   [good, discard it]
+        *
+        * [oops, our write_to data is lost]
+        *
+        * Reversing the order of the tests ensures such a situation cannot
+        * escape unnoticed. The smp_rmb is needed to ensure the page->flags
+        * load is not satisfied before that of page->_count.
+        *
+        * Note that if SetPageDirty is always performed via set_page_dirty,
+        * and thus under tree_lock, then this ordering is not required.
         */
        if (unlikely(page_count(page) != 2))
                goto cannot_free;
@@ -416,7 +455,8 @@ cannot_free:
  * shrink_page_list() returns the number of reclaimed pages
  */
 static unsigned long shrink_page_list(struct list_head *page_list,
-                                       struct scan_control *sc)
+                                       struct scan_control *sc,
+                                       enum pageout_io sync_writeback)
 {
        LIST_HEAD(ret_pages);
        struct pagevec freed_pvec;
@@ -440,7 +480,7 @@ static unsigned long shrink_page_list(struct list_head *page_list,
                if (TestSetPageLocked(page))
                        goto keep;
 
-               BUG_ON(PageActive(page));
+               VM_BUG_ON(PageActive(page));
 
                sc->nr_scanned++;
 
@@ -451,12 +491,28 @@ static unsigned long shrink_page_list(struct list_head *page_list,
                if (page_mapped(page) || PageSwapCache(page))
                        sc->nr_scanned++;
 
-               if (PageWriteback(page))
-                       goto keep_locked;
+               may_enter_fs = (sc->gfp_mask & __GFP_FS) ||
+                       (PageSwapCache(page) && (sc->gfp_mask & __GFP_IO));
+
+               if (PageWriteback(page)) {
+                       /*
+                        * Synchronous reclaim is performed in two passes,
+                        * first an asynchronous pass over the list to
+                        * start parallel writeback, and a second synchronous
+                        * pass to wait for the IO to complete.  Wait here
+                        * for any page for which writeback has already
+                        * started.
+                        */
+                       if (sync_writeback == PAGEOUT_IO_SYNC && may_enter_fs)
+                               wait_on_page_writeback(page);
+                       else
+                               goto keep_locked;
+               }
 
-               referenced = page_referenced(page, 1);
+               referenced = page_referenced(page, 1, sc->mem_cgroup);
                /* In active use or really unfreeable?  Activate it. */
-               if (referenced && page_mapping_inuse(page))
+               if (sc->order <= PAGE_ALLOC_COSTLY_ORDER &&
+                                       referenced && page_mapping_inuse(page))
                        goto activate_locked;
 
 #ifdef CONFIG_SWAP
@@ -470,8 +526,6 @@ static unsigned long shrink_page_list(struct list_head *page_list,
 #endif /* CONFIG_SWAP */
 
                mapping = page_mapping(page);
-               may_enter_fs = (sc->gfp_mask & __GFP_FS) ||
-                       (PageSwapCache(page) && (sc->gfp_mask & __GFP_IO));
 
                /*
                 * The page is mapped into the page tables of one or more
@@ -489,7 +543,7 @@ static unsigned long shrink_page_list(struct list_head *page_list,
                }
 
                if (PageDirty(page)) {
-                       if (referenced)
+                       if (sc->order <= PAGE_ALLOC_COSTLY_ORDER && referenced)
                                goto keep_locked;
                        if (!may_enter_fs)
                                goto keep_locked;
@@ -497,7 +551,7 @@ static unsigned long shrink_page_list(struct list_head *page_list,
                                goto keep_locked;
 
                        /* Page is dirty, try to write it out here */
-                       switch(pageout(page, mapping)) {
+                       switch (pageout(page, mapping, sync_writeback)) {
                        case PAGE_KEEP:
                                goto keep_locked;
                        case PAGE_ACTIVATE:
@@ -547,7 +601,7 @@ static unsigned long shrink_page_list(struct list_head *page_list,
                                goto free_it;
                }
 
-               if (!remove_mapping(mapping, page))
+               if (!mapping || !remove_mapping(mapping, page))
                        goto keep_locked;
 
 free_it:
@@ -564,15 +618,60 @@ keep_locked:
                unlock_page(page);
 keep:
                list_add(&page->lru, &ret_pages);
-               BUG_ON(PageLRU(page));
+               VM_BUG_ON(PageLRU(page));
        }
        list_splice(&ret_pages, page_list);
        if (pagevec_count(&freed_pvec))
                __pagevec_release_nonlru(&freed_pvec);
-       mod_page_state(pgactivate, pgactivate);
+       count_vm_events(PGACTIVATE, pgactivate);
        return nr_reclaimed;
 }
 
+/* LRU Isolation modes. */
+#define ISOLATE_INACTIVE 0     /* Isolate inactive pages. */
+#define ISOLATE_ACTIVE 1       /* Isolate active pages. */
+#define ISOLATE_BOTH 2         /* Isolate both active and inactive pages. */
+
+/*
+ * Attempt to remove the specified page from its LRU.  Only take this page
+ * if it is of the appropriate PageActive status.  Pages which are being
+ * freed elsewhere are also ignored.
+ *
+ * page:       page to consider
+ * mode:       one of the LRU isolation modes defined above
+ *
+ * returns 0 on success, -ve errno on failure.
+ */
+int __isolate_lru_page(struct page *page, int mode)
+{
+       int ret = -EINVAL;
+
+       /* Only take pages on the LRU. */
+       if (!PageLRU(page))
+               return ret;
+
+       /*
+        * When checking the active state, we need to be sure we are
+        * dealing with comparible boolean values.  Take the logical not
+        * of each.
+        */
+       if (mode != ISOLATE_BOTH && (!PageActive(page) != !mode))
+               return ret;
+
+       ret = -EBUSY;
+       if (likely(get_page_unless_zero(page))) {
+               /*
+                * Be careful not to clear PageLRU until after we're
+                * sure the page is not being freed elsewhere -- the
+                * page release code relies on it.
+                */
+               ClearPageLRU(page);
+               ret = 0;
+       }
+
+       return ret;
+}
+
 /*
  * zone->lru_lock is heavily contended.  Some of the functions that
  * shrink the lists perform better by taking out a batch of pages
@@ -587,44 +686,129 @@ keep:
  * @src:       The LRU list to pull pages off.
  * @dst:       The temp list to put pages on to.
  * @scanned:   The number of pages that were scanned.
+ * @order:     The caller's attempted allocation order
+ * @mode:      One of the LRU isolation modes
  *
  * returns how many pages were moved onto *@dst.
  */
 static unsigned long isolate_lru_pages(unsigned long nr_to_scan,
                struct list_head *src, struct list_head *dst,
-               unsigned long *scanned)
+               unsigned long *scanned, int order, int mode)
 {
        unsigned long nr_taken = 0;
-       struct page *page;
        unsigned long scan;
 
        for (scan = 0; scan < nr_to_scan && !list_empty(src); scan++) {
-               struct list_head *target;
+               struct page *page;
+               unsigned long pfn;
+               unsigned long end_pfn;
+               unsigned long page_pfn;
+               int zone_id;
+
                page = lru_to_page(src);
                prefetchw_prev_lru_page(page, src, flags);
 
-               BUG_ON(!PageLRU(page));
+               VM_BUG_ON(!PageLRU(page));
 
-               list_del(&page->lru);
-               target = src;
-               if (likely(get_page_unless_zero(page))) {
-                       /*
-                        * Be careful not to clear PageLRU until after we're
-                        * sure the page is not being freed elsewhere -- the
-                        * page release code relies on it.
-                        */
-                       ClearPageLRU(page);
-                       target = dst;
+               switch (__isolate_lru_page(page, mode)) {
+               case 0:
+                       list_move(&page->lru, dst);
                        nr_taken++;
-               } /* else it is being freed elsewhere */
+                       break;
+
+               case -EBUSY:
+                       /* else it is being freed elsewhere */
+                       list_move(&page->lru, src);
+                       continue;
+
+               default:
+                       BUG();
+               }
+
+               if (!order)
+                       continue;
 
-               list_add(&page->lru, target);
+               /*
+                * Attempt to take all pages in the order aligned region
+                * surrounding the tag page.  Only take those pages of
+                * the same active state as that tag page.  We may safely
+                * round the target page pfn down to the requested order
+                * as the mem_map is guarenteed valid out to MAX_ORDER,
+                * where that page is in a different zone we will detect
+                * it from its zone id and abort this block scan.
+                */
+               zone_id = page_zone_id(page);
+               page_pfn = page_to_pfn(page);
+               pfn = page_pfn & ~((1 << order) - 1);
+               end_pfn = pfn + (1 << order);
+               for (; pfn < end_pfn; pfn++) {
+                       struct page *cursor_page;
+
+                       /* The target page is in the block, ignore it. */
+                       if (unlikely(pfn == page_pfn))
+                               continue;
+
+                       /* Avoid holes within the zone. */
+                       if (unlikely(!pfn_valid_within(pfn)))
+                               break;
+
+                       cursor_page = pfn_to_page(pfn);
+                       /* Check that we have not crossed a zone boundary. */
+                       if (unlikely(page_zone_id(cursor_page) != zone_id))
+                               continue;
+                       switch (__isolate_lru_page(cursor_page, mode)) {
+                       case 0:
+                               list_move(&cursor_page->lru, dst);
+                               nr_taken++;
+                               scan++;
+                               break;
+
+                       case -EBUSY:
+                               /* else it is being freed elsewhere */
+                               list_move(&cursor_page->lru, src);
+                       default:
+                               break;
+                       }
+               }
        }
 
        *scanned = scan;
        return nr_taken;
 }
 
+static unsigned long isolate_pages_global(unsigned long nr,
+                                       struct list_head *dst,
+                                       unsigned long *scanned, int order,
+                                       int mode, struct zone *z,
+                                       struct mem_cgroup *mem_cont,
+                                       int active)
+{
+       if (active)
+               return isolate_lru_pages(nr, &z->active_list, dst,
+                                               scanned, order, mode);
+       else
+               return isolate_lru_pages(nr, &z->inactive_list, dst,
+                                               scanned, order, mode);
+}
+
+/*
+ * clear_active_flags() is a helper for shrink_active_list(), clearing
+ * any active bits from the pages in the list.
+ */
+static unsigned long clear_active_flags(struct list_head *page_list)
+{
+       int nr_active = 0;
+       struct page *page;
+
+       list_for_each_entry(page, page_list, lru)
+               if (PageActive(page)) {
+                       ClearPageActive(page);
+                       nr_active++;
+               }
+
+       return nr_active;
+}
+
 /*
  * shrink_inactive_list() is a helper for shrink_zone().  It returns the number
  * of reclaimed pages
@@ -646,24 +830,56 @@ static unsigned long shrink_inactive_list(unsigned long max_scan,
                unsigned long nr_taken;
                unsigned long nr_scan;
                unsigned long nr_freed;
-
-               nr_taken = isolate_lru_pages(sc->swap_cluster_max,
-                                            &zone->inactive_list,
-                                            &page_list, &nr_scan);
-               zone->nr_inactive -= nr_taken;
-               zone->pages_scanned += nr_scan;
+               unsigned long nr_active;
+
+               nr_taken = sc->isolate_pages(sc->swap_cluster_max,
+                            &page_list, &nr_scan, sc->order,
+                            (sc->order > PAGE_ALLOC_COSTLY_ORDER)?
+                                            ISOLATE_BOTH : ISOLATE_INACTIVE,
+                               zone, sc->mem_cgroup, 0);
+               nr_active = clear_active_flags(&page_list);
+               __count_vm_events(PGDEACTIVATE, nr_active);
+
+               __mod_zone_page_state(zone, NR_ACTIVE, -nr_active);
+               __mod_zone_page_state(zone, NR_INACTIVE,
+                                               -(nr_taken - nr_active));
+               if (scan_global_lru(sc))
+                       zone->pages_scanned += nr_scan;
                spin_unlock_irq(&zone->lru_lock);
 
                nr_scanned += nr_scan;
-               nr_freed = shrink_page_list(&page_list, sc);
+               nr_freed = shrink_page_list(&page_list, sc, PAGEOUT_IO_ASYNC);
+
+               /*
+                * If we are direct reclaiming for contiguous pages and we do
+                * not reclaim everything in the list, try again and wait
+                * for IO to complete. This will stall high-order allocations
+                * but that should be acceptable to the caller
+                */
+               if (nr_freed < nr_taken && !current_is_kswapd() &&
+                                       sc->order > PAGE_ALLOC_COSTLY_ORDER) {
+                       congestion_wait(WRITE, HZ/10);
+
+                       /*
+                        * The attempt at page out may have made some
+                        * of the pages active, mark them inactive again.
+                        */
+                       nr_active = clear_active_flags(&page_list);
+                       count_vm_events(PGDEACTIVATE, nr_active);
+
+                       nr_freed += shrink_page_list(&page_list, sc,
+                                                       PAGEOUT_IO_SYNC);
+               }
+
                nr_reclaimed += nr_freed;
                local_irq_disable();
                if (current_is_kswapd()) {
-                       __mod_page_state_zone(zone, pgscan_kswapd, nr_scan);
-                       __mod_page_state(kswapd_steal, nr_freed);
-               } else
-                       __mod_page_state_zone(zone, pgscan_direct, nr_scan);
-               __mod_page_state_zone(zone, pgsteal, nr_freed);
+                       __count_zone_vm_events(PGSCAN_KSWAPD, zone, nr_scan);
+                       __count_vm_events(KSWAPD_STEAL, nr_freed);
+               } else if (scan_global_lru(sc))
+                       __count_zone_vm_events(PGSCAN_DIRECT, zone, nr_scan);
+
+               __count_zone_vm_events(PGSTEAL, zone, nr_freed);
 
                if (nr_taken == 0)
                        goto done;
@@ -674,7 +890,7 @@ static unsigned long shrink_inactive_list(unsigned long max_scan,
                 */
                while (!list_empty(&page_list)) {
                        page = lru_to_page(&page_list);
-                       BUG_ON(PageLRU(page));
+                       VM_BUG_ON(PageLRU(page));
                        SetPageLRU(page);
                        list_del(&page->lru);
                        if (PageActive(page))
@@ -696,6 +912,133 @@ done:
 }
 
 /*
+ * We are about to scan this zone at a certain priority level.  If that priority
+ * level is smaller (ie: more urgent) than the previous priority, then note
+ * that priority level within the zone.  This is done so that when the next
+ * process comes in to scan this zone, it will immediately start out at this
+ * priority level rather than having to build up its own scanning priority.
+ * Here, this priority affects only the reclaim-mapped threshold.
+ */
+static inline void note_zone_scanning_priority(struct zone *zone, int priority)
+{
+       if (priority < zone->prev_priority)
+               zone->prev_priority = priority;
+}
+
+static inline int zone_is_near_oom(struct zone *zone)
+{
+       return zone->pages_scanned >= (zone_page_state(zone, NR_ACTIVE)
+                               + zone_page_state(zone, NR_INACTIVE))*3;
+}
+
+/*
+ * Determine we should try to reclaim mapped pages.
+ * This is called only when sc->mem_cgroup is NULL.
+ */
+static int calc_reclaim_mapped(struct scan_control *sc, struct zone *zone,
+                               int priority)
+{
+       long mapped_ratio;
+       long distress;
+       long swap_tendency;
+       long imbalance;
+       int reclaim_mapped = 0;
+       int prev_priority;
+
+       if (scan_global_lru(sc) && zone_is_near_oom(zone))
+               return 1;
+       /*
+        * `distress' is a measure of how much trouble we're having
+        * reclaiming pages.  0 -> no problems.  100 -> great trouble.
+        */
+       if (scan_global_lru(sc))
+               prev_priority = zone->prev_priority;
+       else
+               prev_priority = mem_cgroup_get_reclaim_priority(sc->mem_cgroup);
+
+       distress = 100 >> min(prev_priority, priority);
+
+       /*
+        * The point of this algorithm is to decide when to start
+        * reclaiming mapped memory instead of just pagecache.  Work out
+        * how much memory
+        * is mapped.
+        */
+       if (scan_global_lru(sc))
+               mapped_ratio = ((global_page_state(NR_FILE_MAPPED) +
+                               global_page_state(NR_ANON_PAGES)) * 100) /
+                                       vm_total_pages;
+       else
+               mapped_ratio = mem_cgroup_calc_mapped_ratio(sc->mem_cgroup);
+
+       /*
+        * Now decide how much we really want to unmap some pages.  The
+        * mapped ratio is downgraded - just because there's a lot of
+        * mapped memory doesn't necessarily mean that page reclaim
+        * isn't succeeding.
+        *
+        * The distress ratio is important - we don't want to start
+        * going oom.
+        *
+        * A 100% value of vm_swappiness overrides this algorithm
+        * altogether.
+        */
+       swap_tendency = mapped_ratio / 2 + distress + sc->swappiness;
+
+       /*
+        * If there's huge imbalance between active and inactive
+        * (think active 100 times larger than inactive) we should
+        * become more permissive, or the system will take too much
+        * cpu before it start swapping during memory pressure.
+        * Distress is about avoiding early-oom, this is about
+        * making swappiness graceful despite setting it to low
+        * values.
+        *
+        * Avoid div by zero with nr_inactive+1, and max resulting
+        * value is vm_total_pages.
+        */
+       if (scan_global_lru(sc)) {
+               imbalance  = zone_page_state(zone, NR_ACTIVE);
+               imbalance /= zone_page_state(zone, NR_INACTIVE) + 1;
+       } else
+               imbalance = mem_cgroup_reclaim_imbalance(sc->mem_cgroup);
+
+       /*
+        * Reduce the effect of imbalance if swappiness is low,
+        * this means for a swappiness very low, the imbalance
+        * must be much higher than 100 for this logic to make
+        * the difference.
+        *
+        * Max temporary value is vm_total_pages*100.
+        */
+       imbalance *= (vm_swappiness + 1);
+       imbalance /= 100;
+
+       /*
+        * If not much of the ram is mapped, makes the imbalance
+        * less relevant, it's high priority we refill the inactive
+        * list with mapped pages only in presence of high ratio of
+        * mapped pages.
+        *
+        * Max temporary value is vm_total_pages*100.
+        */
+       imbalance *= mapped_ratio;
+       imbalance /= 100;
+
+       /* apply imbalance feedback to swap_tendency */
+       swap_tendency += imbalance;
+
+       /*
+        * Now use this metric to decide whether to start moving mapped
+        * memory onto the inactive list.
+        */
+       if (swap_tendency >= 100)
+               reclaim_mapped = 1;
+
+       return reclaim_mapped;
+}
+
+/*
  * This moves pages from the active list to the inactive list.
  *
  * We move them the other way if the page is referenced by one or more
@@ -712,8 +1055,10 @@ done:
  * The downside is that we have to touch page->_count against each page.
  * But we had to alter page->flags anyway.
  */
+
+
 static void shrink_active_list(unsigned long nr_pages, struct zone *zone,
-                               struct scan_control *sc)
+                               struct scan_control *sc, int priority)
 {
        unsigned long pgmoved;
        int pgdeactivate = 0;
@@ -725,55 +1070,22 @@ static void shrink_active_list(unsigned long nr_pages, struct zone *zone,
        struct pagevec pvec;
        int reclaim_mapped = 0;
 
-       if (sc->may_swap) {
-               long mapped_ratio;
-               long distress;
-               long swap_tendency;
-
-               /*
-                * `distress' is a measure of how much trouble we're having
-                * reclaiming pages.  0 -> no problems.  100 -> great trouble.
-                */
-               distress = 100 >> zone->prev_priority;
-
-               /*
-                * The point of this algorithm is to decide when to start
-                * reclaiming mapped memory instead of just pagecache.  Work out
-                * how much memory
-                * is mapped.
-                */
-               mapped_ratio = ((global_page_state(NR_FILE_MAPPED) +
-                               global_page_state(NR_ANON_PAGES)) * 100) /
-                                       vm_total_pages;
-
-               /*
-                * Now decide how much we really want to unmap some pages.  The
-                * mapped ratio is downgraded - just because there's a lot of
-                * mapped memory doesn't necessarily mean that page reclaim
-                * isn't succeeding.
-                *
-                * The distress ratio is important - we don't want to start
-                * going oom.
-                *
-                * A 100% value of vm_swappiness overrides this algorithm
-                * altogether.
-                */
-               swap_tendency = mapped_ratio / 2 + distress + sc->swappiness;
-
-               /*
-                * Now use this metric to decide whether to start moving mapped
-                * memory onto the inactive list.
-                */
-               if (swap_tendency >= 100)
-                       reclaim_mapped = 1;
-       }
+       if (sc->may_swap)
+               reclaim_mapped = calc_reclaim_mapped(sc, zone, priority);
 
        lru_add_drain();
        spin_lock_irq(&zone->lru_lock);
-       pgmoved = isolate_lru_pages(nr_pages, &zone->active_list,
-                                   &l_hold, &pgscanned);
-       zone->pages_scanned += pgscanned;
-       zone->nr_active -= pgmoved;
+       pgmoved = sc->isolate_pages(nr_pages, &l_hold, &pgscanned, sc->order,
+                                       ISOLATE_ACTIVE, zone,
+                                       sc->mem_cgroup, 1);
+       /*
+        * zone->pages_scanned is used for detect zone's oom
+        * mem_cgroup remembers nr_scan by itself.
+        */
+       if (scan_global_lru(sc))
+               zone->pages_scanned += pgscanned;
+
+       __mod_zone_page_state(zone, NR_ACTIVE, -pgmoved);
        spin_unlock_irq(&zone->lru_lock);
 
        while (!list_empty(&l_hold)) {
@@ -783,7 +1095,7 @@ static void shrink_active_list(unsigned long nr_pages, struct zone *zone,
                if (page_mapped(page)) {
                        if (!reclaim_mapped ||
                            (total_swap_pages == 0 && PageAnon(page)) ||
-                           page_referenced(page, 0)) {
+                           page_referenced(page, 0, sc->mem_cgroup)) {
                                list_add(&page->lru, &l_active);
                                continue;
                        }
@@ -797,15 +1109,16 @@ static void shrink_active_list(unsigned long nr_pages, struct zone *zone,
        while (!list_empty(&l_inactive)) {
                page = lru_to_page(&l_inactive);
                prefetchw_prev_lru_page(page, &l_inactive, flags);
-               BUG_ON(PageLRU(page));
+               VM_BUG_ON(PageLRU(page));
                SetPageLRU(page);
-               BUG_ON(!PageActive(page));
+               VM_BUG_ON(!PageActive(page));
                ClearPageActive(page);
 
                list_move(&page->lru, &zone->inactive_list);
+               mem_cgroup_move_lists(page, false);
                pgmoved++;
                if (!pagevec_add(&pvec, page)) {
-                       zone->nr_inactive += pgmoved;
+                       __mod_zone_page_state(zone, NR_INACTIVE, pgmoved);
                        spin_unlock_irq(&zone->lru_lock);
                        pgdeactivate += pgmoved;
                        pgmoved = 0;
@@ -815,7 +1128,7 @@ static void shrink_active_list(unsigned long nr_pages, struct zone *zone,
                        spin_lock_irq(&zone->lru_lock);
                }
        }
-       zone->nr_inactive += pgmoved;
+       __mod_zone_page_state(zone, NR_INACTIVE, pgmoved);
        pgdeactivate += pgmoved;
        if (buffer_heads_over_limit) {
                spin_unlock_irq(&zone->lru_lock);
@@ -827,25 +1140,26 @@ static void shrink_active_list(unsigned long nr_pages, struct zone *zone,
        while (!list_empty(&l_active)) {
                page = lru_to_page(&l_active);
                prefetchw_prev_lru_page(page, &l_active, flags);
-               BUG_ON(PageLRU(page));
+               VM_BUG_ON(PageLRU(page));
                SetPageLRU(page);
-               BUG_ON(!PageActive(page));
+               VM_BUG_ON(!PageActive(page));
+
                list_move(&page->lru, &zone->active_list);
+               mem_cgroup_move_lists(page, true);
                pgmoved++;
                if (!pagevec_add(&pvec, page)) {
-                       zone->nr_active += pgmoved;
+                       __mod_zone_page_state(zone, NR_ACTIVE, pgmoved);
                        pgmoved = 0;
                        spin_unlock_irq(&zone->lru_lock);
                        __pagevec_release(&pvec);
                        spin_lock_irq(&zone->lru_lock);
                }
        }
-       zone->nr_active += pgmoved;
-       spin_unlock(&zone->lru_lock);
+       __mod_zone_page_state(zone, NR_ACTIVE, pgmoved);
 
-       __mod_page_state_zone(zone, pgrefill, pgscanned);
-       __mod_page_state(pgdeactivate, pgdeactivate);
-       local_irq_enable();
+       __count_zone_vm_events(PGREFILL, zone, pgscanned);
+       __count_vm_events(PGDEACTIVATE, pgdeactivate);
+       spin_unlock_irq(&zone->lru_lock);
 
        pagevec_release(&pvec);
 }
@@ -861,32 +1175,46 @@ static unsigned long shrink_zone(int priority, struct zone *zone,
        unsigned long nr_to_scan;
        unsigned long nr_reclaimed = 0;
 
-       atomic_inc(&zone->reclaim_in_progress);
+       if (scan_global_lru(sc)) {
+               /*
+                * Add one to nr_to_scan just to make sure that the kernel
+                * will slowly sift through the active list.
+                */
+               zone->nr_scan_active +=
+                       (zone_page_state(zone, NR_ACTIVE) >> priority) + 1;
+               nr_active = zone->nr_scan_active;
+               zone->nr_scan_inactive +=
+                       (zone_page_state(zone, NR_INACTIVE) >> priority) + 1;
+               nr_inactive = zone->nr_scan_inactive;
+               if (nr_inactive >= sc->swap_cluster_max)
+                       zone->nr_scan_inactive = 0;
+               else
+                       nr_inactive = 0;
+
+               if (nr_active >= sc->swap_cluster_max)
+                       zone->nr_scan_active = 0;
+               else
+                       nr_active = 0;
+       } else {
+               /*
+                * This reclaim occurs not because zone memory shortage but
+                * because memory controller hits its limit.
+                * Then, don't modify zone reclaim related data.
+                */
+               nr_active = mem_cgroup_calc_reclaim_active(sc->mem_cgroup,
+                                       zone, priority);
 
-       /*
-        * Add one to `nr_to_scan' just to make sure that the kernel will
-        * slowly sift through the active list.
-        */
-       zone->nr_scan_active += (zone->nr_active >> priority) + 1;
-       nr_active = zone->nr_scan_active;
-       if (nr_active >= sc->swap_cluster_max)
-               zone->nr_scan_active = 0;
-       else
-               nr_active = 0;
+               nr_inactive = mem_cgroup_calc_reclaim_inactive(sc->mem_cgroup,
+                                       zone, priority);
+       }
 
-       zone->nr_scan_inactive += (zone->nr_inactive >> priority) + 1;
-       nr_inactive = zone->nr_scan_inactive;
-       if (nr_inactive >= sc->swap_cluster_max)
-               zone->nr_scan_inactive = 0;
-       else
-               nr_inactive = 0;
 
        while (nr_active || nr_inactive) {
                if (nr_active) {
                        nr_to_scan = min(nr_active,
                                        (unsigned long)sc->swap_cluster_max);
                        nr_active -= nr_to_scan;
-                       shrink_active_list(nr_to_scan, zone, sc);
+                       shrink_active_list(nr_to_scan, zone, sc, priority);
                }
 
                if (nr_inactive) {
@@ -898,9 +1226,7 @@ static unsigned long shrink_zone(int priority, struct zone *zone,
                }
        }
 
-       throttle_vm_writeout();
-
-       atomic_dec(&zone->reclaim_in_progress);
+       throttle_vm_writeout(sc->gfp_mask);
        return nr_reclaimed;
 }
 
@@ -920,30 +1246,44 @@ static unsigned long shrink_zone(int priority, struct zone *zone,
  * If a zone is deemed to be full of pinned pages then just give it a light
  * scan then give up on it.
  */
-static unsigned long shrink_zones(int priority, struct zone **zones,
+static unsigned long shrink_zones(int priority, struct zonelist *zonelist,
                                        struct scan_control *sc)
 {
+       enum zone_type high_zoneidx = gfp_zone(sc->gfp_mask);
        unsigned long nr_reclaimed = 0;
-       int i;
-
-       for (i = 0; zones[i] != NULL; i++) {
-               struct zone *zone = zones[i];
+       struct zoneref *z;
+       struct zone *zone;
 
+       sc->all_unreclaimable = 1;
+       for_each_zone_zonelist(zone, z, zonelist, high_zoneidx) {
                if (!populated_zone(zone))
                        continue;
+               /*
+                * Take care memory controller reclaiming has small influence
+                * to global LRU.
+                */
+               if (scan_global_lru(sc)) {
+                       if (!cpuset_zone_allowed_hardwall(zone, GFP_KERNEL))
+                               continue;
+                       note_zone_scanning_priority(zone, priority);
 
-               if (!cpuset_zone_allowed(zone, __GFP_HARDWALL))
-                       continue;
-
-               zone->temp_priority = priority;
-               if (zone->prev_priority > priority)
-                       zone->prev_priority = priority;
-
-               if (zone->all_unreclaimable && priority != DEF_PRIORITY)
-                       continue;       /* Let kswapd poll it */
+                       if (zone_is_all_unreclaimable(zone) &&
+                                               priority != DEF_PRIORITY)
+                               continue;       /* Let kswapd poll it */
+                       sc->all_unreclaimable = 0;
+               } else {
+                       /*
+                        * Ignore cpuset limitation here. We just want to reduce
+                        * # of used pages by us regardless of memory shortage.
+                        */
+                       sc->all_unreclaimable = 0;
+                       mem_cgroup_note_reclaim_priority(sc->mem_cgroup,
+                                                       priority);
+               }
 
                nr_reclaimed += shrink_zone(priority, zone, sc);
        }
+
        return nr_reclaimed;
 }
  
@@ -959,8 +1299,12 @@ static unsigned long shrink_zones(int priority, struct zone **zones,
  * hope that some of these pages can be written.  But if the allocating task
  * holds filesystem locks which prevent writeout this might not work, and the
  * allocation attempt will fail.
+ *
+ * returns:    0, if no pages reclaimed
+ *             else, the number of pages reclaimed
  */
-unsigned long try_to_free_pages(struct zone **zones, gfp_t gfp_mask)
+static unsigned long do_try_to_free_pages(struct zonelist *zonelist,
+                                       struct scan_control *sc)
 {
        int priority;
        int ret = 0;
@@ -968,40 +1312,45 @@ unsigned long try_to_free_pages(struct zone **zones, gfp_t gfp_mask)
        unsigned long nr_reclaimed = 0;
        struct reclaim_state *reclaim_state = current->reclaim_state;
        unsigned long lru_pages = 0;
-       int i;
-       struct scan_control sc = {
-               .gfp_mask = gfp_mask,
-               .may_writepage = !laptop_mode,
-               .swap_cluster_max = SWAP_CLUSTER_MAX,
-               .may_swap = 1,
-               .swappiness = vm_swappiness,
-       };
-
-       inc_page_state(allocstall);
+       struct zoneref *z;
+       struct zone *zone;
+       enum zone_type high_zoneidx = gfp_zone(sc->gfp_mask);
 
-       for (i = 0; zones[i] != NULL; i++) {
-               struct zone *zone = zones[i];
+       if (scan_global_lru(sc))
+               count_vm_event(ALLOCSTALL);
+       /*
+        * mem_cgroup will not do shrink_slab.
+        */
+       if (scan_global_lru(sc)) {
+               for_each_zone_zonelist(zone, z, zonelist, high_zoneidx) {
 
-               if (!cpuset_zone_allowed(zone, __GFP_HARDWALL))
-                       continue;
+                       if (!cpuset_zone_allowed_hardwall(zone, GFP_KERNEL))
+                               continue;
 
-               zone->temp_priority = DEF_PRIORITY;
-               lru_pages += zone->nr_active + zone->nr_inactive;
+                       lru_pages += zone_page_state(zone, NR_ACTIVE)
+                                       + zone_page_state(zone, NR_INACTIVE);
+               }
        }
 
        for (priority = DEF_PRIORITY; priority >= 0; priority--) {
-               sc.nr_scanned = 0;
+               sc->nr_scanned = 0;
                if (!priority)
                        disable_swap_token();
-               nr_reclaimed += shrink_zones(priority, zones, &sc);
-               shrink_slab(sc.nr_scanned, gfp_mask, lru_pages);
-               if (reclaim_state) {
-                       nr_reclaimed += reclaim_state->reclaimed_slab;
-                       reclaim_state->reclaimed_slab = 0;
+               nr_reclaimed += shrink_zones(priority, zonelist, sc);
+               /*
+                * Don't shrink slabs when reclaiming memory from
+                * over limit cgroups
+                */
+               if (scan_global_lru(sc)) {
+                       shrink_slab(sc->nr_scanned, sc->gfp_mask, lru_pages);
+                       if (reclaim_state) {
+                               nr_reclaimed += reclaim_state->reclaimed_slab;
+                               reclaim_state->reclaimed_slab = 0;
+                       }
                }
-               total_scanned += sc.nr_scanned;
-               if (nr_reclaimed >= sc.swap_cluster_max) {
-                       ret = 1;
+               total_scanned += sc->nr_scanned;
+               if (nr_reclaimed >= sc->swap_cluster_max) {
+                       ret = nr_reclaimed;
                        goto out;
                }
 
@@ -1012,28 +1361,84 @@ unsigned long try_to_free_pages(struct zone **zones, gfp_t gfp_mask)
                 * that's undesirable in laptop mode, where we *want* lumpy
                 * writeout.  So in laptop mode, write out the whole world.
                 */
-               if (total_scanned > sc.swap_cluster_max +
-                                       sc.swap_cluster_max / 2) {
+               if (total_scanned > sc->swap_cluster_max +
+                                       sc->swap_cluster_max / 2) {
                        wakeup_pdflush(laptop_mode ? 0 : total_scanned);
-                       sc.may_writepage = 1;
+                       sc->may_writepage = 1;
                }
 
                /* Take a nap, wait for some writeback to complete */
-               if (sc.nr_scanned && priority < DEF_PRIORITY - 2)
-                       blk_congestion_wait(WRITE, HZ/10);
+               if (sc->nr_scanned && priority < DEF_PRIORITY - 2)
+                       congestion_wait(WRITE, HZ/10);
        }
+       /* top priority shrink_caches still had more to do? don't OOM, then */
+       if (!sc->all_unreclaimable && scan_global_lru(sc))
+               ret = nr_reclaimed;
 out:
-       for (i = 0; zones[i] != 0; i++) {
-               struct zone *zone = zones[i];
+       /*
+        * Now that we've scanned all the zones at this priority level, note
+        * that level within the zone so that the next thread which performs
+        * scanning of this zone will immediately start out at this priority
+        * level.  This affects only the decision whether or not to bring
+        * mapped pages onto the inactive list.
+        */
+       if (priority < 0)
+               priority = 0;
 
-               if (!cpuset_zone_allowed(zone, __GFP_HARDWALL))
-                       continue;
+       if (scan_global_lru(sc)) {
+               for_each_zone_zonelist(zone, z, zonelist, high_zoneidx) {
+
+                       if (!cpuset_zone_allowed_hardwall(zone, GFP_KERNEL))
+                               continue;
+
+                       zone->prev_priority = priority;
+               }
+       } else
+               mem_cgroup_record_reclaim_priority(sc->mem_cgroup, priority);
 
-               zone->prev_priority = zone->temp_priority;
-       }
        return ret;
 }
 
+unsigned long try_to_free_pages(struct zonelist *zonelist, int order,
+                                                               gfp_t gfp_mask)
+{
+       struct scan_control sc = {
+               .gfp_mask = gfp_mask,
+               .may_writepage = !laptop_mode,
+               .swap_cluster_max = SWAP_CLUSTER_MAX,
+               .may_swap = 1,
+               .swappiness = vm_swappiness,
+               .order = order,
+               .mem_cgroup = NULL,
+               .isolate_pages = isolate_pages_global,
+       };
+
+       return do_try_to_free_pages(zonelist, &sc);
+}
+
+#ifdef CONFIG_CGROUP_MEM_RES_CTLR
+
+unsigned long try_to_free_mem_cgroup_pages(struct mem_cgroup *mem_cont,
+                                               gfp_t gfp_mask)
+{
+       struct scan_control sc = {
+               .may_writepage = !laptop_mode,
+               .may_swap = 1,
+               .swap_cluster_max = SWAP_CLUSTER_MAX,
+               .swappiness = vm_swappiness,
+               .order = 0,
+               .mem_cgroup = mem_cont,
+               .isolate_pages = mem_cgroup_isolate_pages,
+       };
+       struct zonelist *zonelist;
+
+       sc.gfp_mask = (gfp_mask & GFP_RECLAIM_MASK) |
+                       (GFP_HIGHUSER_MOVABLE & ~GFP_RECLAIM_MASK);
+       zonelist = NODE_DATA(numa_node_id())->node_zonelists;
+       return do_try_to_free_pages(zonelist, &sc);
+}
+#endif
+
 /*
  * For kswapd, balance_pgdat() will work across all this node's zones until
  * they are all at pages_high.
@@ -1068,19 +1473,24 @@ static unsigned long balance_pgdat(pg_data_t *pgdat, int order)
                .may_swap = 1,
                .swap_cluster_max = SWAP_CLUSTER_MAX,
                .swappiness = vm_swappiness,
+               .order = order,
+               .mem_cgroup = NULL,
+               .isolate_pages = isolate_pages_global,
        };
+       /*
+        * temp_priority is used to remember the scanning priority at which
+        * this zone was successfully refilled to free_pages == pages_high.
+        */
+       int temp_priority[MAX_NR_ZONES];
 
 loop_again:
        total_scanned = 0;
        nr_reclaimed = 0;
        sc.may_writepage = !laptop_mode;
-       inc_page_state(pageoutrun);
+       count_vm_event(PAGEOUTRUN);
 
-       for (i = 0; i < pgdat->nr_zones; i++) {
-               struct zone *zone = pgdat->node_zones + i;
-
-               zone->temp_priority = DEF_PRIORITY;
-       }
+       for (i = 0; i < pgdat->nr_zones; i++)
+               temp_priority[i] = DEF_PRIORITY;
 
        for (priority = DEF_PRIORITY; priority >= 0; priority--) {
                int end_zone = 0;       /* Inclusive.  0 = ZONE_DMA */
@@ -1102,21 +1512,24 @@ loop_again:
                        if (!populated_zone(zone))
                                continue;
 
-                       if (zone->all_unreclaimable && priority != DEF_PRIORITY)
+                       if (zone_is_all_unreclaimable(zone) &&
+                           priority != DEF_PRIORITY)
                                continue;
 
                        if (!zone_watermark_ok(zone, order, zone->pages_high,
                                               0, 0)) {
                                end_zone = i;
-                               goto scan;
+                               break;
                        }
                }
-               goto out;
-scan:
+               if (i < 0)
+                       goto out;
+
                for (i = 0; i <= end_zone; i++) {
                        struct zone *zone = pgdat->node_zones + i;
 
-                       lru_pages += zone->nr_active + zone->nr_inactive;
+                       lru_pages += zone_page_state(zone, NR_ACTIVE)
+                                       + zone_page_state(zone, NR_INACTIVE);
                }
 
                /*
@@ -1135,27 +1548,35 @@ scan:
                        if (!populated_zone(zone))
                                continue;
 
-                       if (zone->all_unreclaimable && priority != DEF_PRIORITY)
+                       if (zone_is_all_unreclaimable(zone) &&
+                                       priority != DEF_PRIORITY)
                                continue;
 
                        if (!zone_watermark_ok(zone, order, zone->pages_high,
                                               end_zone, 0))
                                all_zones_ok = 0;
-                       zone->temp_priority = priority;
-                       if (zone->prev_priority > priority)
-                               zone->prev_priority = priority;
+                       temp_priority[i] = priority;
                        sc.nr_scanned = 0;
-                       nr_reclaimed += shrink_zone(priority, zone, &sc);
+                       note_zone_scanning_priority(zone, priority);
+                       /*
+                        * We put equal pressure on every zone, unless one
+                        * zone has way too many pages free already.
+                        */
+                       if (!zone_watermark_ok(zone, order, 8*zone->pages_high,
+                                               end_zone, 0))
+                               nr_reclaimed += shrink_zone(priority, zone, &sc);
                        reclaim_state->reclaimed_slab = 0;
                        nr_slab = shrink_slab(sc.nr_scanned, GFP_KERNEL,
                                                lru_pages);
                        nr_reclaimed += reclaim_state->reclaimed_slab;
                        total_scanned += sc.nr_scanned;
-                       if (zone->all_unreclaimable)
+                       if (zone_is_all_unreclaimable(zone))
                                continue;
                        if (nr_slab == 0 && zone->pages_scanned >=
-                                   (zone->nr_active + zone->nr_inactive) * 4)
-                               zone->all_unreclaimable = 1;
+                               (zone_page_state(zone, NR_ACTIVE)
+                               + zone_page_state(zone, NR_INACTIVE)) * 6)
+                                       zone_set_flag(zone,
+                                                     ZONE_ALL_UNRECLAIMABLE);
                        /*
                         * If we've done a decent amount of scanning and
                         * the reclaim ratio is low, start doing writepage
@@ -1172,7 +1593,7 @@ scan:
                 * another pass across the zones.
                 */
                if (total_scanned && priority < DEF_PRIORITY - 2)
-                       blk_congestion_wait(WRITE, HZ/10);
+                       congestion_wait(WRITE, HZ/10);
 
                /*
                 * We do this so kswapd doesn't build up large priorities for
@@ -1184,13 +1605,21 @@ scan:
                        break;
        }
 out:
+       /*
+        * Note within each zone the priority level at which this zone was
+        * brought into a happy state.  So that the next thread which scans this
+        * zone will start out at that priority level.
+        */
        for (i = 0; i < pgdat->nr_zones; i++) {
                struct zone *zone = pgdat->node_zones + i;
 
-               zone->prev_priority = zone->temp_priority;
+               zone->prev_priority = temp_priority[i];
        }
        if (!all_zones_ok) {
                cond_resched();
+
+               try_to_freeze();
+
                goto loop_again;
        }
 
@@ -1219,11 +1648,10 @@ static int kswapd(void *p)
        struct reclaim_state reclaim_state = {
                .reclaimed_slab = 0,
        };
-       cpumask_t cpumask;
+       node_to_cpumask_ptr(cpumask, pgdat->node_id);
 
-       cpumask = node_to_cpumask(pgdat->node_id);
-       if (!cpus_empty(cpumask))
-               set_cpus_allowed(tsk, cpumask);
+       if (!cpus_empty(*cpumask))
+               set_cpus_allowed_ptr(tsk, cpumask);
        current->reclaim_state = &reclaim_state;
 
        /*
@@ -1239,13 +1667,12 @@ static int kswapd(void *p)
         * trying to free the first piece of memory in the first place).
         */
        tsk->flags |= PF_MEMALLOC | PF_SWAPWRITE | PF_KSWAPD;
+       set_freezable();
 
        order = 0;
        for ( ; ; ) {
                unsigned long new_order;
 
-               try_to_freeze();
-
                prepare_to_wait(&pgdat->kswapd_wait, &wait, TASK_INTERRUPTIBLE);
                new_order = pgdat->kswapd_max_order;
                pgdat->kswapd_max_order = 0;
@@ -1256,12 +1683,19 @@ static int kswapd(void *p)
                         */
                        order = new_order;
                } else {
-                       schedule();
+                       if (!freezing(current))
+                               schedule();
+
                        order = pgdat->kswapd_max_order;
                }
                finish_wait(&pgdat->kswapd_wait, &wait);
 
-               balance_pgdat(pgdat, order);
+               if (!try_to_freeze()) {
+                       /* We can speed up thawing tasks if we don't call
+                        * balance_pgdat after returning from the refrigerator
+                        */
+                       balance_pgdat(pgdat, order);
+               }
        }
        return 0;
 }
@@ -1281,7 +1715,7 @@ void wakeup_kswapd(struct zone *zone, int order)
                return;
        if (pgdat->kswapd_max_order < order)
                pgdat->kswapd_max_order = order;
-       if (!cpuset_zone_allowed(zone, __GFP_HARDWALL))
+       if (!cpuset_zone_allowed_hardwall(zone, GFP_KERNEL))
                return;
        if (!waitqueue_active(&pgdat->kswapd_wait))
                return;
@@ -1296,8 +1730,8 @@ void wakeup_kswapd(struct zone *zone, int order)
  *
  * For pass > 3 we also try to shrink the LRU lists that contain a few pages
  */
-static unsigned long shrink_all_zones(unsigned long nr_pages, int pass,
-                                     int prio, struct scan_control *sc)
+static unsigned long shrink_all_zones(unsigned long nr_pages, int prio,
+                                     int pass, struct scan_control *sc)
 {
        struct zone *zone;
        unsigned long nr_to_scan, ret = 0;
@@ -1307,23 +1741,27 @@ static unsigned long shrink_all_zones(unsigned long nr_pages, int pass,
                if (!populated_zone(zone))
                        continue;
 
-               if (zone->all_unreclaimable && prio != DEF_PRIORITY)
+               if (zone_is_all_unreclaimable(zone) && prio != DEF_PRIORITY)
                        continue;
 
                /* For pass = 0 we don't shrink the active list */
                if (pass > 0) {
-                       zone->nr_scan_active += (zone->nr_active >> prio) + 1;
+                       zone->nr_scan_active +=
+                               (zone_page_state(zone, NR_ACTIVE) >> prio) + 1;
                        if (zone->nr_scan_active >= nr_pages || pass > 3) {
                                zone->nr_scan_active = 0;
-                               nr_to_scan = min(nr_pages, zone->nr_active);
-                               shrink_active_list(nr_to_scan, zone, sc);
+                               nr_to_scan = min(nr_pages,
+                                       zone_page_state(zone, NR_ACTIVE));
+                               shrink_active_list(nr_to_scan, zone, sc, prio);
                        }
                }
 
-               zone->nr_scan_inactive += (zone->nr_inactive >> prio) + 1;
+               zone->nr_scan_inactive +=
+                       (zone_page_state(zone, NR_INACTIVE) >> prio) + 1;
                if (zone->nr_scan_inactive >= nr_pages || pass > 3) {
                        zone->nr_scan_inactive = 0;
-                       nr_to_scan = min(nr_pages, zone->nr_inactive);
+                       nr_to_scan = min(nr_pages,
+                               zone_page_state(zone, NR_INACTIVE));
                        ret += shrink_inactive_list(nr_to_scan, zone, sc);
                        if (ret >= nr_pages)
                                return ret;
@@ -1333,6 +1771,11 @@ static unsigned long shrink_all_zones(unsigned long nr_pages, int pass,
        return ret;
 }
 
+static unsigned long count_lru_pages(void)
+{
+       return global_page_state(NR_ACTIVE) + global_page_state(NR_INACTIVE);
+}
+
 /*
  * Try to free `nr_pages' of memory, system-wide, and return the number of
  * freed pages.
@@ -1347,22 +1790,19 @@ unsigned long shrink_all_memory(unsigned long nr_pages)
        unsigned long ret = 0;
        int pass;
        struct reclaim_state reclaim_state;
-       struct zone *zone;
        struct scan_control sc = {
                .gfp_mask = GFP_KERNEL,
                .may_swap = 0,
                .swap_cluster_max = nr_pages,
                .may_writepage = 1,
                .swappiness = vm_swappiness,
+               .isolate_pages = isolate_pages_global,
        };
 
        current->reclaim_state = &reclaim_state;
 
-       lru_pages = 0;
-       for_each_zone(zone)
-               lru_pages += zone->nr_active + zone->nr_inactive;
-
-       nr_slab = read_page_state(nr_slab);
+       lru_pages = count_lru_pages();
+       nr_slab = global_page_state(NR_SLAB_RECLAIMABLE);
        /* If slab caches are huge, it's better to hit them first */
        while (nr_slab >= lru_pages) {
                reclaim_state.reclaimed_slab = 0;
@@ -1388,13 +1828,6 @@ unsigned long shrink_all_memory(unsigned long nr_pages)
        for (pass = 0; pass < 5; pass++) {
                int prio;
 
-               /* Needed for shrinking slab caches later on */
-               if (!lru_pages)
-                       for_each_zone(zone) {
-                               lru_pages += zone->nr_active;
-                               lru_pages += zone->nr_inactive;
-                       }
-
                /* Force reclaiming mapped pages in the passes #3 and #4 */
                if (pass > 2) {
                        sc.may_swap = 1;
@@ -1410,28 +1843,28 @@ unsigned long shrink_all_memory(unsigned long nr_pages)
                                goto out;
 
                        reclaim_state.reclaimed_slab = 0;
-                       shrink_slab(sc.nr_scanned, sc.gfp_mask, lru_pages);
+                       shrink_slab(sc.nr_scanned, sc.gfp_mask,
+                                       count_lru_pages());
                        ret += reclaim_state.reclaimed_slab;
                        if (ret >= nr_pages)
                                goto out;
 
                        if (sc.nr_scanned && prio < DEF_PRIORITY - 2)
-                               blk_congestion_wait(WRITE, HZ / 10);
+                               congestion_wait(WRITE, HZ / 10);
                }
-
-               lru_pages = 0;
        }
 
        /*
         * If ret = 0, we could not shrink LRUs, but there may be something
         * in slab caches
         */
-       if (!ret)
+       if (!ret) {
                do {
                        reclaim_state.reclaimed_slab = 0;
-                       shrink_slab(nr_pages, sc.gfp_mask, lru_pages);
+                       shrink_slab(nr_pages, sc.gfp_mask, count_lru_pages());
                        ret += reclaim_state.reclaimed_slab;
                } while (ret < nr_pages && reclaim_state.reclaimed_slab > 0);
+       }
 
 out:
        current->reclaim_state = NULL;
@@ -1440,7 +1873,6 @@ out:
 }
 #endif
 
-#ifdef CONFIG_HOTPLUG_CPU
 /* It's optimal to keep kswapds on the same CPUs as their memory, but
    not required for correctness.  So if the last cpu in a node goes
    away, we get changed to run anywhere: as the first one comes back,
@@ -1448,20 +1880,20 @@ out:
 static int __devinit cpu_callback(struct notifier_block *nfb,
                                  unsigned long action, void *hcpu)
 {
-       pg_data_t *pgdat;
-       cpumask_t mask;
+       int nid;
+
+       if (action == CPU_ONLINE || action == CPU_ONLINE_FROZEN) {
+               for_each_node_state(nid, N_HIGH_MEMORY) {
+                       pg_data_t *pgdat = NODE_DATA(nid);
+                       node_to_cpumask_ptr(mask, pgdat->node_id);
 
-       if (action == CPU_ONLINE) {
-               for_each_online_pgdat(pgdat) {
-                       mask = node_to_cpumask(pgdat->node_id);
-                       if (any_online_cpu(mask) != NR_CPUS)
+                       if (any_online_cpu(*mask) < nr_cpu_ids)
                                /* One of our CPUs online: restore mask */
-                               set_cpus_allowed(pgdat->kswapd, mask);
+                               set_cpus_allowed_ptr(pgdat->kswapd, mask);
                }
        }
        return NOTIFY_OK;
 }
-#endif /* CONFIG_HOTPLUG_CPU */
 
 /*
  * This kswapd start function will be called by init and node-hot-add.
@@ -1490,7 +1922,7 @@ static int __init kswapd_init(void)
        int nid;
 
        swap_setup();
-       for_each_online_node(nid)
+       for_each_node_state(nid, N_HIGH_MEMORY)
                kswapd_run(nid);
        hotcpu_notifier(cpu_callback, 0);
        return 0;
@@ -1504,10 +1936,6 @@ module_init(kswapd_init)
  *
  * If non-zero call zone_reclaim when the number of free pages falls below
  * the watermarks.
- *
- * In the future we may add flags to the mode. However, the page allocator
- * should only have to check that zone_reclaim_mode != 0 before calling
- * zone_reclaim().
  */
 int zone_reclaim_mode __read_mostly;
 
@@ -1515,7 +1943,6 @@ int zone_reclaim_mode __read_mostly;
 #define RECLAIM_ZONE (1<<0)    /* Run shrink_cache on the zone */
 #define RECLAIM_WRITE (1<<1)   /* Writeout pages during reclaim */
 #define RECLAIM_SWAP (1<<2)    /* Swap pages out during reclaim */
-#define RECLAIM_SLAB (1<<3)    /* Do a global slab shrink if the zone is out of memory */
 
 /*
  * Priority for ZONE_RECLAIM. This determines the fraction of pages
@@ -1525,6 +1952,18 @@ int zone_reclaim_mode __read_mostly;
 #define ZONE_RECLAIM_PRIORITY 4
 
 /*
+ * Percentage of pages in a zone that must be unmapped for zone_reclaim to
+ * occur.
+ */
+int sysctl_min_unmapped_ratio = 1;
+
+/*
+ * If the number of slab pages in a zone grows beyond this percentage then
+ * slab reclaim needs to occur.
+ */
+int sysctl_min_slab_ratio = 5;
+
+/*
  * Try to free up some pages from this zone through reclaim.
  */
 static int __zone_reclaim(struct zone *zone, gfp_t gfp_mask, unsigned int order)
@@ -1542,7 +1981,9 @@ static int __zone_reclaim(struct zone *zone, gfp_t gfp_mask, unsigned int order)
                                        SWAP_CLUSTER_MAX),
                .gfp_mask = gfp_mask,
                .swappiness = vm_swappiness,
+               .isolate_pages = isolate_pages_global,
        };
+       unsigned long slab_reclaimable;
 
        disable_swap_token();
        cond_resched();
@@ -1555,29 +1996,44 @@ static int __zone_reclaim(struct zone *zone, gfp_t gfp_mask, unsigned int order)
        reclaim_state.reclaimed_slab = 0;
        p->reclaim_state = &reclaim_state;
 
-       /*
-        * Free memory by calling shrink zone with increasing priorities
-        * until we have enough memory freed.
-        */
-       priority = ZONE_RECLAIM_PRIORITY;
-       do {
-               nr_reclaimed += shrink_zone(priority, zone, &sc);
-               priority--;
-       } while (priority >= 0 && nr_reclaimed < nr_pages);
+       if (zone_page_state(zone, NR_FILE_PAGES) -
+               zone_page_state(zone, NR_FILE_MAPPED) >
+               zone->min_unmapped_pages) {
+               /*
+                * Free memory by calling shrink zone with increasing
+                * priorities until we have enough memory freed.
+                */
+               priority = ZONE_RECLAIM_PRIORITY;
+               do {
+                       note_zone_scanning_priority(zone, priority);
+                       nr_reclaimed += shrink_zone(priority, zone, &sc);
+                       priority--;
+               } while (priority >= 0 && nr_reclaimed < nr_pages);
+       }
 
-       if (nr_reclaimed < nr_pages && (zone_reclaim_mode & RECLAIM_SLAB)) {
+       slab_reclaimable = zone_page_state(zone, NR_SLAB_RECLAIMABLE);
+       if (slab_reclaimable > zone->min_slab_pages) {
                /*
                 * shrink_slab() does not currently allow us to determine how
-                * many pages were freed in this zone. So we just shake the slab
-                * a bit and then go off node for this particular allocation
-                * despite possibly having freed enough memory to allocate in
-                * this zone.  If we freed local memory then the next
-                * allocations will be local again.
+                * many pages were freed in this zone. So we take the current
+                * number of slab pages and shake the slab until it is reduced
+                * by the same nr_pages that we used for reclaiming unmapped
+                * pages.
                 *
-                * shrink_slab will free memory on all zones and may take
-                * a long time.
+                * Note that shrink_slab will free memory on all zones and may
+                * take a long time.
+                */
+               while (shrink_slab(sc.nr_scanned, gfp_mask, order) &&
+                       zone_page_state(zone, NR_SLAB_RECLAIMABLE) >
+                               slab_reclaimable - nr_pages)
+                       ;
+
+               /*
+                * Update nr_reclaimed by the number of slab pages we
+                * reclaimed from this zone.
                 */
-               shrink_slab(sc.nr_scanned, gfp_mask, order);
+               nr_reclaimed += slab_reclaimable -
+                       zone_page_state(zone, NR_SLAB_RECLAIMABLE);
        }
 
        p->reclaim_state = NULL;
@@ -1587,32 +2043,32 @@ static int __zone_reclaim(struct zone *zone, gfp_t gfp_mask, unsigned int order)
 
 int zone_reclaim(struct zone *zone, gfp_t gfp_mask, unsigned int order)
 {
-       cpumask_t mask;
        int node_id;
+       int ret;
 
        /*
-        * Do not reclaim if there are not enough reclaimable pages in this
-        * zone that would satify this allocations.
-        *
-        * All unmapped pagecache pages are reclaimable.
+        * Zone reclaim reclaims unmapped file backed pages and
+        * slab pages if we are over the defined limits.
         *
-        * Both counters may be temporarily off a bit so we use
-        * SWAP_CLUSTER_MAX as the boundary. It may also be good to
-        * leave a few frequently used unmapped pagecache pages around.
+        * A small portion of unmapped file backed pages is needed for
+        * file I/O otherwise pages read by file I/O will be immediately
+        * thrown out if the zone is overallocated. So we do not reclaim
+        * if less than a specified percentage of the zone is used by
+        * unmapped file backed pages.
         */
        if (zone_page_state(zone, NR_FILE_PAGES) -
-               zone_page_state(zone, NR_FILE_MAPPED) < SWAP_CLUSTER_MAX)
-                       return 0;
+           zone_page_state(zone, NR_FILE_MAPPED) <= zone->min_unmapped_pages
+           && zone_page_state(zone, NR_SLAB_RECLAIMABLE)
+                       <= zone->min_slab_pages)
+               return 0;
+
+       if (zone_is_all_unreclaimable(zone))
+               return 0;
 
        /*
-        * Avoid concurrent zone reclaims, do not reclaim in a zone that does
-        * not have reclaimable pages and if we should not delay the allocation
-        * then do not scan.
+        * Do not scan if the allocation should not be delayed.
         */
-       if (!(gfp_mask & __GFP_WAIT) ||
-               zone->all_unreclaimable ||
-               atomic_read(&zone->reclaim_in_progress) > 0 ||
-               (current->flags & PF_MEMALLOC))
+       if (!(gfp_mask & __GFP_WAIT) || (current->flags & PF_MEMALLOC))
                        return 0;
 
        /*
@@ -1621,10 +2077,15 @@ int zone_reclaim(struct zone *zone, gfp_t gfp_mask, unsigned int order)
         * over remote processors and spread off node memory allocations
         * as wide as possible.
         */
-       node_id = zone->zone_pgdat->node_id;
-       mask = node_to_cpumask(node_id);
-       if (!cpus_empty(mask) && node_id != numa_node_id())
+       node_id = zone_to_nid(zone);
+       if (node_state(node_id, N_CPU) && node_id != numa_node_id())
+               return 0;
+
+       if (zone_test_and_set_flag(zone, ZONE_RECLAIM_LOCKED))
                return 0;
-       return __zone_reclaim(zone, gfp_mask, order);
+       ret = __zone_reclaim(zone, gfp_mask, order);
+       zone_clear_flag(zone, ZONE_RECLAIM_LOCKED);
+
+       return ret;
 }
 #endif