Revert "Ignore madvise(MADV_WILLNEED) for hugetlbfs-backed regions"
[safe/jmp/linux-2.6] / mm / vmscan.c
index 6177e3b..5fa3eda 100644 (file)
@@ -60,6 +60,9 @@ struct scan_control {
 
        int may_writepage;
 
+       /* Can mapped pages be reclaimed? */
+       int may_unmap;
+
        /* Can pages be swapped as part of reclaim? */
        int may_swap;
 
@@ -78,6 +81,12 @@ struct scan_control {
        /* Which cgroup do we reclaim from */
        struct mem_cgroup *mem_cgroup;
 
+       /*
+        * Nodemask of nodes allowed by the caller. If NULL, all nodes
+        * are scanned.
+        */
+       nodemask_t      *nodemask;
+
        /* Pluggable isolate pages callback */
        unsigned long (*isolate_pages)(unsigned long nr, struct list_head *dst,
                        unsigned long *scanned, int order, int mode,
@@ -214,8 +223,9 @@ unsigned long shrink_slab(unsigned long scanned, gfp_t gfp_mask,
                do_div(delta, lru_pages + 1);
                shrinker->nr += delta;
                if (shrinker->nr < 0) {
-                       printk(KERN_ERR "%s: nr=%ld\n",
-                                       __func__, shrinker->nr);
+                       printk(KERN_ERR "shrink_slab: %pF negative objects to "
+                              "delete nr=%ld\n",
+                              shrinker->shrink, shrinker->nr);
                        shrinker->nr = max_pass;
                }
 
@@ -276,7 +286,7 @@ static inline int page_mapping_inuse(struct page *page)
 
 static inline int is_page_cache_freeable(struct page *page)
 {
-       return page_count(page) - !!PagePrivate(page) == 2;
+       return page_count(page) - !!page_has_private(page) == 2;
 }
 
 static int may_write_to_queue(struct backing_dev_info *bdi)
@@ -360,7 +370,7 @@ static pageout_t pageout(struct page *page, struct address_space *mapping,
                 * Some data journaling orphaned pages can have
                 * page->mapping == NULL while being dirty with clean buffers.
                 */
-               if (PagePrivate(page)) {
+               if (page_has_private(page)) {
                        if (try_to_free_buffers(page)) {
                                ClearPageDirty(page);
                                printk("%s: orphaned page\n", __func__);
@@ -606,7 +616,7 @@ static unsigned long shrink_page_list(struct list_head *page_list,
                if (unlikely(!page_evictable(page, NULL)))
                        goto cull_mlocked;
 
-               if (!sc->may_swap && page_mapped(page))
+               if (!sc->may_unmap && page_mapped(page))
                        goto keep_locked;
 
                /* Double the slab pressure for mapped and swapcache pages */
@@ -720,7 +730,7 @@ static unsigned long shrink_page_list(struct list_head *page_list,
                 * process address space (page_count == 1) it can be freed.
                 * Otherwise, leave the page on the LRU so it is swappable.
                 */
-               if (PagePrivate(page)) {
+               if (page_has_private(page)) {
                        if (!try_to_release_page(page, sc->gfp_mask))
                                goto activate_locked;
                        if (!mapping && page_count(page) == 1) {
@@ -1262,7 +1272,6 @@ static void shrink_active_list(unsigned long nr_pages, struct zone *zone,
         * Move the pages to the [file or anon] inactive list.
         */
        pagevec_init(&pvec, 1);
-       pgmoved = 0;
        lru = LRU_BASE + file * LRU_FILE;
 
        spin_lock_irq(&zone->lru_lock);
@@ -1274,6 +1283,7 @@ static void shrink_active_list(unsigned long nr_pages, struct zone *zone,
         */
        reclaim_stat->recent_rotated[!!file] += pgmoved;
 
+       pgmoved = 0;
        while (!list_empty(&l_inactive)) {
                page = lru_to_page(&l_inactive);
                prefetchw_prev_lru_page(page, &l_inactive, flags);
@@ -1298,17 +1308,11 @@ static void shrink_active_list(unsigned long nr_pages, struct zone *zone,
        }
        __mod_zone_page_state(zone, NR_LRU_BASE + lru, pgmoved);
        pgdeactivate += pgmoved;
-       if (buffer_heads_over_limit) {
-               spin_unlock_irq(&zone->lru_lock);
-               pagevec_strip(&pvec);
-               spin_lock_irq(&zone->lru_lock);
-       }
        __count_zone_vm_events(PGREFILL, zone, pgscanned);
        __count_vm_events(PGDEACTIVATE, pgdeactivate);
        spin_unlock_irq(&zone->lru_lock);
-       if (vm_swap_full())
-               pagevec_swap_free(&pvec);
-
+       if (buffer_heads_over_limit)
+               pagevec_strip(&pvec);
        pagevec_release(&pvec);
 }
 
@@ -1379,7 +1383,7 @@ static void get_scan_ratio(struct zone *zone, struct scan_control *sc,
        struct zone_reclaim_stat *reclaim_stat = get_reclaim_stat(zone, sc);
 
        /* If we have no swap space, do not bother scanning anon pages. */
-       if (nr_swap_pages <= 0) {
+       if (!sc->may_swap || (nr_swap_pages <= 0)) {
                percent[0] = 0;
                percent[1] = 100;
                return;
@@ -1467,9 +1471,9 @@ static void shrink_zone(int priority, struct zone *zone,
 
        for_each_evictable_lru(l) {
                int file = is_file_lru(l);
-               int scan;
+               unsigned long scan;
 
-               scan = zone_page_state(zone, NR_LRU_BASE + l);
+               scan = zone_nr_pages(zone, sc, l);
                if (priority) {
                        scan >>= priority;
                        scan = (scan * percent[file]) / 100;
@@ -1543,7 +1547,8 @@ static void shrink_zones(int priority, struct zonelist *zonelist,
        struct zone *zone;
 
        sc->all_unreclaimable = 1;
-       for_each_zone_zonelist(zone, z, zonelist, high_zoneidx) {
+       for_each_zone_zonelist_nodemask(zone, z, zonelist, high_zoneidx,
+                                       sc->nodemask) {
                if (!populated_zone(zone))
                        continue;
                /*
@@ -1688,17 +1693,19 @@ out:
 }
 
 unsigned long try_to_free_pages(struct zonelist *zonelist, int order,
-                                                               gfp_t gfp_mask)
+                               gfp_t gfp_mask, nodemask_t *nodemask)
 {
        struct scan_control sc = {
                .gfp_mask = gfp_mask,
                .may_writepage = !laptop_mode,
                .swap_cluster_max = SWAP_CLUSTER_MAX,
+               .may_unmap = 1,
                .may_swap = 1,
                .swappiness = vm_swappiness,
                .order = order,
                .mem_cgroup = NULL,
                .isolate_pages = isolate_pages_global,
+               .nodemask = nodemask,
        };
 
        return do_try_to_free_pages(zonelist, &sc);
@@ -1713,18 +1720,17 @@ unsigned long try_to_free_mem_cgroup_pages(struct mem_cgroup *mem_cont,
 {
        struct scan_control sc = {
                .may_writepage = !laptop_mode,
-               .may_swap = 1,
+               .may_unmap = 1,
+               .may_swap = !noswap,
                .swap_cluster_max = SWAP_CLUSTER_MAX,
                .swappiness = swappiness,
                .order = 0,
                .mem_cgroup = mem_cont,
                .isolate_pages = mem_cgroup_isolate_pages,
+               .nodemask = NULL, /* we don't care the placement */
        };
        struct zonelist *zonelist;
 
-       if (noswap)
-               sc.may_swap = 0;
-
        sc.gfp_mask = (gfp_mask & GFP_RECLAIM_MASK) |
                        (GFP_HIGHUSER_MOVABLE & ~GFP_RECLAIM_MASK);
        zonelist = NODE_DATA(numa_node_id())->node_zonelists;
@@ -1762,6 +1768,7 @@ static unsigned long balance_pgdat(pg_data_t *pgdat, int order)
        struct reclaim_state *reclaim_state = current->reclaim_state;
        struct scan_control sc = {
                .gfp_mask = GFP_KERNEL,
+               .may_unmap = 1,
                .may_swap = 1,
                .swap_cluster_max = SWAP_CLUSTER_MAX,
                .swappiness = vm_swappiness,
@@ -1963,7 +1970,9 @@ static int kswapd(void *p)
        struct reclaim_state reclaim_state = {
                .reclaimed_slab = 0,
        };
-       node_to_cpumask_ptr(cpumask, pgdat->node_id);
+       const struct cpumask *cpumask = cpumask_of_node(pgdat->node_id);
+
+       lockdep_set_current_reclaim_state(GFP_KERNEL);
 
        if (!cpumask_empty(cpumask))
                set_cpus_allowed_ptr(tsk, cpumask);
@@ -2048,22 +2057,19 @@ unsigned long global_lru_pages(void)
 #ifdef CONFIG_PM
 /*
  * Helper function for shrink_all_memory().  Tries to reclaim 'nr_pages' pages
- * from LRU lists system-wide, for given pass and priority, and returns the
- * number of reclaimed pages
+ * from LRU lists system-wide, for given pass and priority.
  *
  * For pass > 3 we also try to shrink the LRU lists that contain a few pages
  */
-static unsigned long shrink_all_zones(unsigned long nr_pages, int prio,
+static void shrink_all_zones(unsigned long nr_pages, int prio,
                                      int pass, struct scan_control *sc)
 {
        struct zone *zone;
-       unsigned long ret = 0;
+       unsigned long nr_reclaimed = 0;
 
-       for_each_zone(zone) {
+       for_each_populated_zone(zone) {
                enum lru_list l;
 
-               if (!populated_zone(zone))
-                       continue;
                if (zone_is_all_unreclaimable(zone) && prio != DEF_PRIORITY)
                        continue;
 
@@ -2082,14 +2088,16 @@ static unsigned long shrink_all_zones(unsigned long nr_pages, int prio,
 
                                zone->lru[l].nr_scan = 0;
                                nr_to_scan = min(nr_pages, lru_pages);
-                               ret += shrink_list(l, nr_to_scan, zone,
+                               nr_reclaimed += shrink_list(l, nr_to_scan, zone,
                                                                sc, prio);
-                               if (ret >= nr_pages)
-                                       return ret;
+                               if (nr_reclaimed >= nr_pages) {
+                                       sc->nr_reclaimed += nr_reclaimed;
+                                       return;
+                               }
                        }
                }
        }
-       return ret;
+       sc->nr_reclaimed += nr_reclaimed;
 }
 
 /*
@@ -2103,15 +2111,14 @@ static unsigned long shrink_all_zones(unsigned long nr_pages, int prio,
 unsigned long shrink_all_memory(unsigned long nr_pages)
 {
        unsigned long lru_pages, nr_slab;
-       unsigned long ret = 0;
        int pass;
        struct reclaim_state reclaim_state;
        struct scan_control sc = {
                .gfp_mask = GFP_KERNEL,
-               .may_swap = 0,
-               .swap_cluster_max = nr_pages,
+               .may_unmap = 0,
                .may_writepage = 1,
                .isolate_pages = isolate_pages_global,
+               .nr_reclaimed = 0,
        };
 
        current->reclaim_state = &reclaim_state;
@@ -2125,8 +2132,8 @@ unsigned long shrink_all_memory(unsigned long nr_pages)
                if (!reclaim_state.reclaimed_slab)
                        break;
 
-               ret += reclaim_state.reclaimed_slab;
-               if (ret >= nr_pages)
+               sc.nr_reclaimed += reclaim_state.reclaimed_slab;
+               if (sc.nr_reclaimed >= nr_pages)
                        goto out;
 
                nr_slab -= reclaim_state.reclaimed_slab;
@@ -2145,21 +2152,22 @@ unsigned long shrink_all_memory(unsigned long nr_pages)
 
                /* Force reclaiming mapped pages in the passes #3 and #4 */
                if (pass > 2)
-                       sc.may_swap = 1;
+                       sc.may_unmap = 1;
 
                for (prio = DEF_PRIORITY; prio >= 0; prio--) {
-                       unsigned long nr_to_scan = nr_pages - ret;
+                       unsigned long nr_to_scan = nr_pages - sc.nr_reclaimed;
 
                        sc.nr_scanned = 0;
-                       ret += shrink_all_zones(nr_to_scan, prio, pass, &sc);
-                       if (ret >= nr_pages)
+                       sc.swap_cluster_max = nr_to_scan;
+                       shrink_all_zones(nr_to_scan, prio, pass, &sc);
+                       if (sc.nr_reclaimed >= nr_pages)
                                goto out;
 
                        reclaim_state.reclaimed_slab = 0;
                        shrink_slab(sc.nr_scanned, sc.gfp_mask,
                                        global_lru_pages());
-                       ret += reclaim_state.reclaimed_slab;
-                       if (ret >= nr_pages)
+                       sc.nr_reclaimed += reclaim_state.reclaimed_slab;
+                       if (sc.nr_reclaimed >= nr_pages)
                                goto out;
 
                        if (sc.nr_scanned && prio < DEF_PRIORITY - 2)
@@ -2168,21 +2176,23 @@ unsigned long shrink_all_memory(unsigned long nr_pages)
        }
 
        /*
-        * If ret = 0, we could not shrink LRUs, but there may be something
-        * in slab caches
+        * If sc.nr_reclaimed = 0, we could not shrink LRUs, but there may be
+        * something in slab caches
         */
-       if (!ret) {
+       if (!sc.nr_reclaimed) {
                do {
                        reclaim_state.reclaimed_slab = 0;
                        shrink_slab(nr_pages, sc.gfp_mask, global_lru_pages());
-                       ret += reclaim_state.reclaimed_slab;
-               } while (ret < nr_pages && reclaim_state.reclaimed_slab > 0);
+                       sc.nr_reclaimed += reclaim_state.reclaimed_slab;
+               } while (sc.nr_reclaimed < nr_pages &&
+                               reclaim_state.reclaimed_slab > 0);
        }
 
+
 out:
        current->reclaim_state = NULL;
 
-       return ret;
+       return sc.nr_reclaimed;
 }
 #endif
 
@@ -2198,7 +2208,9 @@ static int __devinit cpu_callback(struct notifier_block *nfb,
        if (action == CPU_ONLINE || action == CPU_ONLINE_FROZEN) {
                for_each_node_state(nid, N_HIGH_MEMORY) {
                        pg_data_t *pgdat = NODE_DATA(nid);
-                       node_to_cpumask_ptr(mask, pgdat->node_id);
+                       const struct cpumask *mask;
+
+                       mask = cpumask_of_node(pgdat->node_id);
 
                        if (cpumask_any_and(cpu_online_mask, mask) < nr_cpu_ids)
                                /* One of our CPUs online: restore mask */
@@ -2288,11 +2300,13 @@ static int __zone_reclaim(struct zone *zone, gfp_t gfp_mask, unsigned int order)
        int priority;
        struct scan_control sc = {
                .may_writepage = !!(zone_reclaim_mode & RECLAIM_WRITE),
-               .may_swap = !!(zone_reclaim_mode & RECLAIM_SWAP),
+               .may_unmap = !!(zone_reclaim_mode & RECLAIM_SWAP),
+               .may_swap = 1,
                .swap_cluster_max = max_t(unsigned long, nr_pages,
                                        SWAP_CLUSTER_MAX),
                .gfp_mask = gfp_mask,
                .swappiness = vm_swappiness,
+               .order = order,
                .isolate_pages = isolate_pages_global,
        };
        unsigned long slab_reclaimable;