usbnet: ratelimit warning messages invoked from callback handler
[safe/jmp/linux-2.6] / mm / vmscan.c
index f75d924..5fa3eda 100644 (file)
@@ -60,6 +60,9 @@ struct scan_control {
 
        int may_writepage;
 
+       /* Can mapped pages be reclaimed? */
+       int may_unmap;
+
        /* Can pages be swapped as part of reclaim? */
        int may_swap;
 
@@ -78,6 +81,12 @@ struct scan_control {
        /* Which cgroup do we reclaim from */
        struct mem_cgroup *mem_cgroup;
 
+       /*
+        * Nodemask of nodes allowed by the caller. If NULL, all nodes
+        * are scanned.
+        */
+       nodemask_t      *nodemask;
+
        /* Pluggable isolate pages callback */
        unsigned long (*isolate_pages)(unsigned long nr, struct list_head *dst,
                        unsigned long *scanned, int order, int mode,
@@ -125,11 +134,30 @@ static LIST_HEAD(shrinker_list);
 static DECLARE_RWSEM(shrinker_rwsem);
 
 #ifdef CONFIG_CGROUP_MEM_RES_CTLR
-#define scan_global_lru(sc)    (!(sc)->mem_cgroup)
+#define scanning_global_lru(sc)        (!(sc)->mem_cgroup)
 #else
-#define scan_global_lru(sc)    (1)
+#define scanning_global_lru(sc)        (1)
 #endif
 
+static struct zone_reclaim_stat *get_reclaim_stat(struct zone *zone,
+                                                 struct scan_control *sc)
+{
+       if (!scanning_global_lru(sc))
+               return mem_cgroup_get_reclaim_stat(sc->mem_cgroup, zone);
+
+       return &zone->reclaim_stat;
+}
+
+static unsigned long zone_nr_pages(struct zone *zone, struct scan_control *sc,
+                                  enum lru_list lru)
+{
+       if (!scanning_global_lru(sc))
+               return mem_cgroup_zone_nr_pages(sc->mem_cgroup, zone, lru);
+
+       return zone_page_state(zone, NR_LRU_BASE + lru);
+}
+
+
 /*
  * Add a shrinker callback to be called from the vm
  */
@@ -195,8 +223,9 @@ unsigned long shrink_slab(unsigned long scanned, gfp_t gfp_mask,
                do_div(delta, lru_pages + 1);
                shrinker->nr += delta;
                if (shrinker->nr < 0) {
-                       printk(KERN_ERR "%s: nr=%ld\n",
-                                       __func__, shrinker->nr);
+                       printk(KERN_ERR "shrink_slab: %pF negative objects to "
+                              "delete nr=%ld\n",
+                              shrinker->shrink, shrinker->nr);
                        shrinker->nr = max_pass;
                }
 
@@ -257,7 +286,7 @@ static inline int page_mapping_inuse(struct page *page)
 
 static inline int is_page_cache_freeable(struct page *page)
 {
-       return page_count(page) - !!PagePrivate(page) == 2;
+       return page_count(page) - !!page_has_private(page) == 2;
 }
 
 static int may_write_to_queue(struct backing_dev_info *bdi)
@@ -341,7 +370,7 @@ static pageout_t pageout(struct page *page, struct address_space *mapping,
                 * Some data journaling orphaned pages can have
                 * page->mapping == NULL while being dirty with clean buffers.
                 */
-               if (PagePrivate(page)) {
+               if (page_has_private(page)) {
                        if (try_to_free_buffers(page)) {
                                ClearPageDirty(page);
                                printk("%s: orphaned page\n", __func__);
@@ -587,7 +616,7 @@ static unsigned long shrink_page_list(struct list_head *page_list,
                if (unlikely(!page_evictable(page, NULL)))
                        goto cull_mlocked;
 
-               if (!sc->may_swap && page_mapped(page))
+               if (!sc->may_unmap && page_mapped(page))
                        goto keep_locked;
 
                /* Double the slab pressure for mapped and swapcache pages */
@@ -701,7 +730,7 @@ static unsigned long shrink_page_list(struct list_head *page_list,
                 * process address space (page_count == 1) it can be freed.
                 * Otherwise, leave the page on the LRU so it is swappable.
                 */
-               if (PagePrivate(page)) {
+               if (page_has_private(page)) {
                        if (!try_to_release_page(page, sc->gfp_mask))
                                goto activate_locked;
                        if (!mapping && page_count(page) == 1) {
@@ -1029,6 +1058,7 @@ static unsigned long shrink_inactive_list(unsigned long max_scan,
        struct pagevec pvec;
        unsigned long nr_scanned = 0;
        unsigned long nr_reclaimed = 0;
+       struct zone_reclaim_stat *reclaim_stat = get_reclaim_stat(zone, sc);
 
        pagevec_init(&pvec, 1);
 
@@ -1070,13 +1100,14 @@ static unsigned long shrink_inactive_list(unsigned long max_scan,
                __mod_zone_page_state(zone, NR_INACTIVE_ANON,
                                                -count[LRU_INACTIVE_ANON]);
 
-               if (scan_global_lru(sc)) {
+               if (scanning_global_lru(sc))
                        zone->pages_scanned += nr_scan;
-                       zone->recent_scanned[0] += count[LRU_INACTIVE_ANON];
-                       zone->recent_scanned[0] += count[LRU_ACTIVE_ANON];
-                       zone->recent_scanned[1] += count[LRU_INACTIVE_FILE];
-                       zone->recent_scanned[1] += count[LRU_ACTIVE_FILE];
-               }
+
+               reclaim_stat->recent_scanned[0] += count[LRU_INACTIVE_ANON];
+               reclaim_stat->recent_scanned[0] += count[LRU_ACTIVE_ANON];
+               reclaim_stat->recent_scanned[1] += count[LRU_INACTIVE_FILE];
+               reclaim_stat->recent_scanned[1] += count[LRU_ACTIVE_FILE];
+
                spin_unlock_irq(&zone->lru_lock);
 
                nr_scanned += nr_scan;
@@ -1108,7 +1139,7 @@ static unsigned long shrink_inactive_list(unsigned long max_scan,
                if (current_is_kswapd()) {
                        __count_zone_vm_events(PGSCAN_KSWAPD, zone, nr_scan);
                        __count_vm_events(KSWAPD_STEAL, nr_freed);
-               } else if (scan_global_lru(sc))
+               } else if (scanning_global_lru(sc))
                        __count_zone_vm_events(PGSCAN_DIRECT, zone, nr_scan);
 
                __count_zone_vm_events(PGSTEAL, zone, nr_freed);
@@ -1134,9 +1165,9 @@ static unsigned long shrink_inactive_list(unsigned long max_scan,
                        SetPageLRU(page);
                        lru = page_lru(page);
                        add_page_to_lru_list(zone, page, lru);
-                       if (PageActive(page) && scan_global_lru(sc)) {
+                       if (PageActive(page)) {
                                int file = !!page_is_file_cache(page);
-                               zone->recent_rotated[file]++;
+                               reclaim_stat->recent_rotated[file]++;
                        }
                        if (!pagevec_add(&pvec, page)) {
                                spin_unlock_irq(&zone->lru_lock);
@@ -1196,6 +1227,7 @@ static void shrink_active_list(unsigned long nr_pages, struct zone *zone,
        struct page *page;
        struct pagevec pvec;
        enum lru_list lru;
+       struct zone_reclaim_stat *reclaim_stat = get_reclaim_stat(zone, sc);
 
        lru_add_drain();
        spin_lock_irq(&zone->lru_lock);
@@ -1206,10 +1238,10 @@ static void shrink_active_list(unsigned long nr_pages, struct zone *zone,
         * zone->pages_scanned is used for detect zone's oom
         * mem_cgroup remembers nr_scan by itself.
         */
-       if (scan_global_lru(sc)) {
+       if (scanning_global_lru(sc)) {
                zone->pages_scanned += pgscanned;
-               zone->recent_scanned[!!file] += pgmoved;
        }
+       reclaim_stat->recent_scanned[!!file] += pgmoved;
 
        if (file)
                __mod_zone_page_state(zone, NR_ACTIVE_FILE, -pgmoved);
@@ -1240,7 +1272,6 @@ static void shrink_active_list(unsigned long nr_pages, struct zone *zone,
         * Move the pages to the [file or anon] inactive list.
         */
        pagevec_init(&pvec, 1);
-       pgmoved = 0;
        lru = LRU_BASE + file * LRU_FILE;
 
        spin_lock_irq(&zone->lru_lock);
@@ -1250,9 +1281,9 @@ static void shrink_active_list(unsigned long nr_pages, struct zone *zone,
         * This helps balance scan pressure between file and anonymous
         * pages in get_scan_ratio.
         */
-       if (scan_global_lru(sc))
-               zone->recent_rotated[!!file] += pgmoved;
+       reclaim_stat->recent_rotated[!!file] += pgmoved;
 
+       pgmoved = 0;
        while (!list_empty(&l_inactive)) {
                page = lru_to_page(&l_inactive);
                prefetchw_prev_lru_page(page, &l_inactive, flags);
@@ -1277,28 +1308,15 @@ static void shrink_active_list(unsigned long nr_pages, struct zone *zone,
        }
        __mod_zone_page_state(zone, NR_LRU_BASE + lru, pgmoved);
        pgdeactivate += pgmoved;
-       if (buffer_heads_over_limit) {
-               spin_unlock_irq(&zone->lru_lock);
-               pagevec_strip(&pvec);
-               spin_lock_irq(&zone->lru_lock);
-       }
        __count_zone_vm_events(PGREFILL, zone, pgscanned);
        __count_vm_events(PGDEACTIVATE, pgdeactivate);
        spin_unlock_irq(&zone->lru_lock);
-       if (vm_swap_full())
-               pagevec_swap_free(&pvec);
-
+       if (buffer_heads_over_limit)
+               pagevec_strip(&pvec);
        pagevec_release(&pvec);
 }
 
-/**
- * inactive_anon_is_low - check if anonymous pages need to be deactivated
- * @zone: zone to check
- *
- * Returns true if the zone does not have enough inactive anon pages,
- * meaning some active anon pages need to be deactivated.
- */
-static int inactive_anon_is_low(struct zone *zone)
+static int inactive_anon_is_low_global(struct zone *zone)
 {
        unsigned long active, inactive;
 
@@ -1311,6 +1329,25 @@ static int inactive_anon_is_low(struct zone *zone)
        return 0;
 }
 
+/**
+ * inactive_anon_is_low - check if anonymous pages need to be deactivated
+ * @zone: zone to check
+ * @sc:   scan control of this context
+ *
+ * Returns true if the zone does not have enough inactive anon pages,
+ * meaning some active anon pages need to be deactivated.
+ */
+static int inactive_anon_is_low(struct zone *zone, struct scan_control *sc)
+{
+       int low;
+
+       if (scanning_global_lru(sc))
+               low = inactive_anon_is_low_global(zone);
+       else
+               low = mem_cgroup_inactive_anon_is_low(sc->mem_cgroup);
+       return low;
+}
+
 static unsigned long shrink_list(enum lru_list lru, unsigned long nr_to_scan,
        struct zone *zone, struct scan_control *sc, int priority)
 {
@@ -1321,8 +1358,7 @@ static unsigned long shrink_list(enum lru_list lru, unsigned long nr_to_scan,
                return 0;
        }
 
-       if (lru == LRU_ACTIVE_ANON &&
-           (!scan_global_lru(sc) || inactive_anon_is_low(zone))) {
+       if (lru == LRU_ACTIVE_ANON && inactive_anon_is_low(zone, sc)) {
                shrink_active_list(nr_to_scan, zone, sc, priority, file);
                return 0;
        }
@@ -1344,25 +1380,29 @@ static void get_scan_ratio(struct zone *zone, struct scan_control *sc,
        unsigned long anon, file, free;
        unsigned long anon_prio, file_prio;
        unsigned long ap, fp;
+       struct zone_reclaim_stat *reclaim_stat = get_reclaim_stat(zone, sc);
 
        /* If we have no swap space, do not bother scanning anon pages. */
-       if (nr_swap_pages <= 0) {
+       if (!sc->may_swap || (nr_swap_pages <= 0)) {
                percent[0] = 0;
                percent[1] = 100;
                return;
        }
 
-       anon  = zone_page_state(zone, NR_ACTIVE_ANON) +
-               zone_page_state(zone, NR_INACTIVE_ANON);
-       file  = zone_page_state(zone, NR_ACTIVE_FILE) +
-               zone_page_state(zone, NR_INACTIVE_FILE);
-       free  = zone_page_state(zone, NR_FREE_PAGES);
-
-       /* If we have very few page cache pages, force-scan anon pages. */
-       if (unlikely(file + free <= zone->pages_high)) {
-               percent[0] = 100;
-               percent[1] = 0;
-               return;
+       anon  = zone_nr_pages(zone, sc, LRU_ACTIVE_ANON) +
+               zone_nr_pages(zone, sc, LRU_INACTIVE_ANON);
+       file  = zone_nr_pages(zone, sc, LRU_ACTIVE_FILE) +
+               zone_nr_pages(zone, sc, LRU_INACTIVE_FILE);
+
+       if (scanning_global_lru(sc)) {
+               free  = zone_page_state(zone, NR_FREE_PAGES);
+               /* If we have very few page cache pages,
+                  force-scan anon pages. */
+               if (unlikely(file + free <= zone->pages_high)) {
+                       percent[0] = 100;
+                       percent[1] = 0;
+                       return;
+               }
        }
 
        /*
@@ -1376,17 +1416,17 @@ static void get_scan_ratio(struct zone *zone, struct scan_control *sc,
         *
         * anon in [0], file in [1]
         */
-       if (unlikely(zone->recent_scanned[0] > anon / 4)) {
+       if (unlikely(reclaim_stat->recent_scanned[0] > anon / 4)) {
                spin_lock_irq(&zone->lru_lock);
-               zone->recent_scanned[0] /= 2;
-               zone->recent_rotated[0] /= 2;
+               reclaim_stat->recent_scanned[0] /= 2;
+               reclaim_stat->recent_rotated[0] /= 2;
                spin_unlock_irq(&zone->lru_lock);
        }
 
-       if (unlikely(zone->recent_scanned[1] > file / 4)) {
+       if (unlikely(reclaim_stat->recent_scanned[1] > file / 4)) {
                spin_lock_irq(&zone->lru_lock);
-               zone->recent_scanned[1] /= 2;
-               zone->recent_rotated[1] /= 2;
+               reclaim_stat->recent_scanned[1] /= 2;
+               reclaim_stat->recent_rotated[1] /= 2;
                spin_unlock_irq(&zone->lru_lock);
        }
 
@@ -1402,11 +1442,11 @@ static void get_scan_ratio(struct zone *zone, struct scan_control *sc,
         * proportional to the fraction of recently scanned pages on
         * each list that were recently referenced and in active use.
         */
-       ap = (anon_prio + 1) * (zone->recent_scanned[0] + 1);
-       ap /= zone->recent_rotated[0] + 1;
+       ap = (anon_prio + 1) * (reclaim_stat->recent_scanned[0] + 1);
+       ap /= reclaim_stat->recent_rotated[0] + 1;
 
-       fp = (file_prio + 1) * (zone->recent_scanned[1] + 1);
-       fp /= zone->recent_rotated[1] + 1;
+       fp = (file_prio + 1) * (reclaim_stat->recent_scanned[1] + 1);
+       fp /= reclaim_stat->recent_rotated[1] + 1;
 
        /* Normalize to percentages */
        percent[0] = 100 * ap / (ap + fp + 1);
@@ -1430,30 +1470,23 @@ static void shrink_zone(int priority, struct zone *zone,
        get_scan_ratio(zone, sc, percent);
 
        for_each_evictable_lru(l) {
-               if (scan_global_lru(sc)) {
-                       int file = is_file_lru(l);
-                       int scan;
-
-                       scan = zone_page_state(zone, NR_LRU_BASE + l);
-                       if (priority) {
-                               scan >>= priority;
-                               scan = (scan * percent[file]) / 100;
-                       }
+               int file = is_file_lru(l);
+               unsigned long scan;
+
+               scan = zone_nr_pages(zone, sc, l);
+               if (priority) {
+                       scan >>= priority;
+                       scan = (scan * percent[file]) / 100;
+               }
+               if (scanning_global_lru(sc)) {
                        zone->lru[l].nr_scan += scan;
                        nr[l] = zone->lru[l].nr_scan;
                        if (nr[l] >= swap_cluster_max)
                                zone->lru[l].nr_scan = 0;
                        else
                                nr[l] = 0;
-               } else {
-                       /*
-                        * This reclaim occurs not because zone memory shortage
-                        * but because memory controller hits its limit.
-                        * Don't modify zone reclaim related data.
-                        */
-                       nr[l] = mem_cgroup_calc_reclaim(sc->mem_cgroup, zone,
-                                                               priority, l);
-               }
+               } else
+                       nr[l] = scan;
        }
 
        while (nr[LRU_INACTIVE_ANON] || nr[LRU_ACTIVE_FILE] ||
@@ -1486,9 +1519,7 @@ static void shrink_zone(int priority, struct zone *zone,
         * Even if we did not try to evict anon pages at all, we want to
         * rebalance the anon lru active/inactive ratio.
         */
-       if (!scan_global_lru(sc) || inactive_anon_is_low(zone))
-               shrink_active_list(SWAP_CLUSTER_MAX, zone, sc, priority, 0);
-       else if (!scan_global_lru(sc))
+       if (inactive_anon_is_low(zone, sc))
                shrink_active_list(SWAP_CLUSTER_MAX, zone, sc, priority, 0);
 
        throttle_vm_writeout(sc->gfp_mask);
@@ -1516,14 +1547,15 @@ static void shrink_zones(int priority, struct zonelist *zonelist,
        struct zone *zone;
 
        sc->all_unreclaimable = 1;
-       for_each_zone_zonelist(zone, z, zonelist, high_zoneidx) {
+       for_each_zone_zonelist_nodemask(zone, z, zonelist, high_zoneidx,
+                                       sc->nodemask) {
                if (!populated_zone(zone))
                        continue;
                /*
                 * Take care memory controller reclaiming has small influence
                 * to global LRU.
                 */
-               if (scan_global_lru(sc)) {
+               if (scanning_global_lru(sc)) {
                        if (!cpuset_zone_allowed_hardwall(zone, GFP_KERNEL))
                                continue;
                        note_zone_scanning_priority(zone, priority);
@@ -1576,12 +1608,12 @@ static unsigned long do_try_to_free_pages(struct zonelist *zonelist,
 
        delayacct_freepages_start();
 
-       if (scan_global_lru(sc))
+       if (scanning_global_lru(sc))
                count_vm_event(ALLOCSTALL);
        /*
         * mem_cgroup will not do shrink_slab.
         */
-       if (scan_global_lru(sc)) {
+       if (scanning_global_lru(sc)) {
                for_each_zone_zonelist(zone, z, zonelist, high_zoneidx) {
 
                        if (!cpuset_zone_allowed_hardwall(zone, GFP_KERNEL))
@@ -1600,7 +1632,7 @@ static unsigned long do_try_to_free_pages(struct zonelist *zonelist,
                 * Don't shrink slabs when reclaiming memory from
                 * over limit cgroups
                 */
-               if (scan_global_lru(sc)) {
+               if (scanning_global_lru(sc)) {
                        shrink_slab(sc->nr_scanned, sc->gfp_mask, lru_pages);
                        if (reclaim_state) {
                                sc->nr_reclaimed += reclaim_state->reclaimed_slab;
@@ -1631,7 +1663,7 @@ static unsigned long do_try_to_free_pages(struct zonelist *zonelist,
                        congestion_wait(WRITE, HZ/10);
        }
        /* top priority shrink_zones still had more to do? don't OOM, then */
-       if (!sc->all_unreclaimable && scan_global_lru(sc))
+       if (!sc->all_unreclaimable && scanning_global_lru(sc))
                ret = sc->nr_reclaimed;
 out:
        /*
@@ -1644,7 +1676,7 @@ out:
        if (priority < 0)
                priority = 0;
 
-       if (scan_global_lru(sc)) {
+       if (scanning_global_lru(sc)) {
                for_each_zone_zonelist(zone, z, zonelist, high_zoneidx) {
 
                        if (!cpuset_zone_allowed_hardwall(zone, GFP_KERNEL))
@@ -1661,17 +1693,19 @@ out:
 }
 
 unsigned long try_to_free_pages(struct zonelist *zonelist, int order,
-                                                               gfp_t gfp_mask)
+                               gfp_t gfp_mask, nodemask_t *nodemask)
 {
        struct scan_control sc = {
                .gfp_mask = gfp_mask,
                .may_writepage = !laptop_mode,
                .swap_cluster_max = SWAP_CLUSTER_MAX,
+               .may_unmap = 1,
                .may_swap = 1,
                .swappiness = vm_swappiness,
                .order = order,
                .mem_cgroup = NULL,
                .isolate_pages = isolate_pages_global,
+               .nodemask = nodemask,
        };
 
        return do_try_to_free_pages(zonelist, &sc);
@@ -1680,23 +1714,23 @@ unsigned long try_to_free_pages(struct zonelist *zonelist, int order,
 #ifdef CONFIG_CGROUP_MEM_RES_CTLR
 
 unsigned long try_to_free_mem_cgroup_pages(struct mem_cgroup *mem_cont,
-                                               gfp_t gfp_mask,
-                                          bool noswap)
+                                          gfp_t gfp_mask,
+                                          bool noswap,
+                                          unsigned int swappiness)
 {
        struct scan_control sc = {
                .may_writepage = !laptop_mode,
-               .may_swap = 1,
+               .may_unmap = 1,
+               .may_swap = !noswap,
                .swap_cluster_max = SWAP_CLUSTER_MAX,
-               .swappiness = vm_swappiness,
+               .swappiness = swappiness,
                .order = 0,
                .mem_cgroup = mem_cont,
                .isolate_pages = mem_cgroup_isolate_pages,
+               .nodemask = NULL, /* we don't care the placement */
        };
        struct zonelist *zonelist;
 
-       if (noswap)
-               sc.may_swap = 0;
-
        sc.gfp_mask = (gfp_mask & GFP_RECLAIM_MASK) |
                        (GFP_HIGHUSER_MOVABLE & ~GFP_RECLAIM_MASK);
        zonelist = NODE_DATA(numa_node_id())->node_zonelists;
@@ -1734,6 +1768,7 @@ static unsigned long balance_pgdat(pg_data_t *pgdat, int order)
        struct reclaim_state *reclaim_state = current->reclaim_state;
        struct scan_control sc = {
                .gfp_mask = GFP_KERNEL,
+               .may_unmap = 1,
                .may_swap = 1,
                .swap_cluster_max = SWAP_CLUSTER_MAX,
                .swappiness = vm_swappiness,
@@ -1784,7 +1819,7 @@ loop_again:
                         * Do some background aging of the anon list, to give
                         * pages a chance to be referenced before reclaiming.
                         */
-                       if (inactive_anon_is_low(zone))
+                       if (inactive_anon_is_low(zone, &sc))
                                shrink_active_list(SWAP_CLUSTER_MAX, zone,
                                                        &sc, priority, 0);
 
@@ -1935,7 +1970,9 @@ static int kswapd(void *p)
        struct reclaim_state reclaim_state = {
                .reclaimed_slab = 0,
        };
-       node_to_cpumask_ptr(cpumask, pgdat->node_id);
+       const struct cpumask *cpumask = cpumask_of_node(pgdat->node_id);
+
+       lockdep_set_current_reclaim_state(GFP_KERNEL);
 
        if (!cpumask_empty(cpumask))
                set_cpus_allowed_ptr(tsk, cpumask);
@@ -2020,49 +2057,47 @@ unsigned long global_lru_pages(void)
 #ifdef CONFIG_PM
 /*
  * Helper function for shrink_all_memory().  Tries to reclaim 'nr_pages' pages
- * from LRU lists system-wide, for given pass and priority, and returns the
- * number of reclaimed pages
+ * from LRU lists system-wide, for given pass and priority.
  *
  * For pass > 3 we also try to shrink the LRU lists that contain a few pages
  */
-static unsigned long shrink_all_zones(unsigned long nr_pages, int prio,
+static void shrink_all_zones(unsigned long nr_pages, int prio,
                                      int pass, struct scan_control *sc)
 {
        struct zone *zone;
-       unsigned long nr_to_scan, ret = 0;
-       enum lru_list l;
-
-       for_each_zone(zone) {
+       unsigned long nr_reclaimed = 0;
 
-               if (!populated_zone(zone))
-                       continue;
+       for_each_populated_zone(zone) {
+               enum lru_list l;
 
                if (zone_is_all_unreclaimable(zone) && prio != DEF_PRIORITY)
                        continue;
 
                for_each_evictable_lru(l) {
+                       enum zone_stat_item ls = NR_LRU_BASE + l;
+                       unsigned long lru_pages = zone_page_state(zone, ls);
+
                        /* For pass = 0, we don't shrink the active list */
-                       if (pass == 0 &&
-                               (l == LRU_ACTIVE || l == LRU_ACTIVE_FILE))
+                       if (pass == 0 && (l == LRU_ACTIVE_ANON ||
+                                               l == LRU_ACTIVE_FILE))
                                continue;
 
-                       zone->lru[l].nr_scan +=
-                               (zone_page_state(zone, NR_LRU_BASE + l)
-                                                               >> prio) + 1;
+                       zone->lru[l].nr_scan += (lru_pages >> prio) + 1;
                        if (zone->lru[l].nr_scan >= nr_pages || pass > 3) {
+                               unsigned long nr_to_scan;
+
                                zone->lru[l].nr_scan = 0;
-                               nr_to_scan = min(nr_pages,
-                                       zone_page_state(zone,
-                                                       NR_LRU_BASE + l));
-                               ret += shrink_list(l, nr_to_scan, zone,
+                               nr_to_scan = min(nr_pages, lru_pages);
+                               nr_reclaimed += shrink_list(l, nr_to_scan, zone,
                                                                sc, prio);
-                               if (ret >= nr_pages)
-                                       return ret;
+                               if (nr_reclaimed >= nr_pages) {
+                                       sc->nr_reclaimed += nr_reclaimed;
+                                       return;
+                               }
                        }
                }
        }
-
-       return ret;
+       sc->nr_reclaimed += nr_reclaimed;
 }
 
 /*
@@ -2076,16 +2111,14 @@ static unsigned long shrink_all_zones(unsigned long nr_pages, int prio,
 unsigned long shrink_all_memory(unsigned long nr_pages)
 {
        unsigned long lru_pages, nr_slab;
-       unsigned long ret = 0;
        int pass;
        struct reclaim_state reclaim_state;
        struct scan_control sc = {
                .gfp_mask = GFP_KERNEL,
-               .may_swap = 0,
-               .swap_cluster_max = nr_pages,
+               .may_unmap = 0,
                .may_writepage = 1,
-               .swappiness = vm_swappiness,
                .isolate_pages = isolate_pages_global,
+               .nr_reclaimed = 0,
        };
 
        current->reclaim_state = &reclaim_state;
@@ -2099,8 +2132,8 @@ unsigned long shrink_all_memory(unsigned long nr_pages)
                if (!reclaim_state.reclaimed_slab)
                        break;
 
-               ret += reclaim_state.reclaimed_slab;
-               if (ret >= nr_pages)
+               sc.nr_reclaimed += reclaim_state.reclaimed_slab;
+               if (sc.nr_reclaimed >= nr_pages)
                        goto out;
 
                nr_slab -= reclaim_state.reclaimed_slab;
@@ -2118,24 +2151,23 @@ unsigned long shrink_all_memory(unsigned long nr_pages)
                int prio;
 
                /* Force reclaiming mapped pages in the passes #3 and #4 */
-               if (pass > 2) {
-                       sc.may_swap = 1;
-                       sc.swappiness = 100;
-               }
+               if (pass > 2)
+                       sc.may_unmap = 1;
 
                for (prio = DEF_PRIORITY; prio >= 0; prio--) {
-                       unsigned long nr_to_scan = nr_pages - ret;
+                       unsigned long nr_to_scan = nr_pages - sc.nr_reclaimed;
 
                        sc.nr_scanned = 0;
-                       ret += shrink_all_zones(nr_to_scan, prio, pass, &sc);
-                       if (ret >= nr_pages)
+                       sc.swap_cluster_max = nr_to_scan;
+                       shrink_all_zones(nr_to_scan, prio, pass, &sc);
+                       if (sc.nr_reclaimed >= nr_pages)
                                goto out;
 
                        reclaim_state.reclaimed_slab = 0;
                        shrink_slab(sc.nr_scanned, sc.gfp_mask,
                                        global_lru_pages());
-                       ret += reclaim_state.reclaimed_slab;
-                       if (ret >= nr_pages)
+                       sc.nr_reclaimed += reclaim_state.reclaimed_slab;
+                       if (sc.nr_reclaimed >= nr_pages)
                                goto out;
 
                        if (sc.nr_scanned && prio < DEF_PRIORITY - 2)
@@ -2144,21 +2176,23 @@ unsigned long shrink_all_memory(unsigned long nr_pages)
        }
 
        /*
-        * If ret = 0, we could not shrink LRUs, but there may be something
-        * in slab caches
+        * If sc.nr_reclaimed = 0, we could not shrink LRUs, but there may be
+        * something in slab caches
         */
-       if (!ret) {
+       if (!sc.nr_reclaimed) {
                do {
                        reclaim_state.reclaimed_slab = 0;
                        shrink_slab(nr_pages, sc.gfp_mask, global_lru_pages());
-                       ret += reclaim_state.reclaimed_slab;
-               } while (ret < nr_pages && reclaim_state.reclaimed_slab > 0);
+                       sc.nr_reclaimed += reclaim_state.reclaimed_slab;
+               } while (sc.nr_reclaimed < nr_pages &&
+                               reclaim_state.reclaimed_slab > 0);
        }
 
+
 out:
        current->reclaim_state = NULL;
 
-       return ret;
+       return sc.nr_reclaimed;
 }
 #endif
 
@@ -2174,7 +2208,9 @@ static int __devinit cpu_callback(struct notifier_block *nfb,
        if (action == CPU_ONLINE || action == CPU_ONLINE_FROZEN) {
                for_each_node_state(nid, N_HIGH_MEMORY) {
                        pg_data_t *pgdat = NODE_DATA(nid);
-                       node_to_cpumask_ptr(mask, pgdat->node_id);
+                       const struct cpumask *mask;
+
+                       mask = cpumask_of_node(pgdat->node_id);
 
                        if (cpumask_any_and(cpu_online_mask, mask) < nr_cpu_ids)
                                /* One of our CPUs online: restore mask */
@@ -2264,11 +2300,13 @@ static int __zone_reclaim(struct zone *zone, gfp_t gfp_mask, unsigned int order)
        int priority;
        struct scan_control sc = {
                .may_writepage = !!(zone_reclaim_mode & RECLAIM_WRITE),
-               .may_swap = !!(zone_reclaim_mode & RECLAIM_SWAP),
+               .may_unmap = !!(zone_reclaim_mode & RECLAIM_SWAP),
+               .may_swap = 1,
                .swap_cluster_max = max_t(unsigned long, nr_pages,
                                        SWAP_CLUSTER_MAX),
                .gfp_mask = gfp_mask,
                .swappiness = vm_swappiness,
+               .order = order,
                .isolate_pages = isolate_pages_global,
        };
        unsigned long slab_reclaimable;