usbnet: ratelimit warning messages invoked from callback handler
[safe/jmp/linux-2.6] / mm / vmscan.c
index f4619c6..5fa3eda 100644 (file)
@@ -63,6 +63,9 @@ struct scan_control {
        /* Can mapped pages be reclaimed? */
        int may_unmap;
 
+       /* Can pages be swapped as part of reclaim? */
+       int may_swap;
+
        /* This context's SWAP_CLUSTER_MAX. If freeing memory for
         * suspend, we effectively ignore SWAP_CLUSTER_MAX.
         * In this context, it doesn't matter that we scan the
@@ -78,6 +81,12 @@ struct scan_control {
        /* Which cgroup do we reclaim from */
        struct mem_cgroup *mem_cgroup;
 
+       /*
+        * Nodemask of nodes allowed by the caller. If NULL, all nodes
+        * are scanned.
+        */
+       nodemask_t      *nodemask;
+
        /* Pluggable isolate pages callback */
        unsigned long (*isolate_pages)(unsigned long nr, struct list_head *dst,
                        unsigned long *scanned, int order, int mode,
@@ -277,7 +286,7 @@ static inline int page_mapping_inuse(struct page *page)
 
 static inline int is_page_cache_freeable(struct page *page)
 {
-       return page_count(page) - !!PagePrivate(page) == 2;
+       return page_count(page) - !!page_has_private(page) == 2;
 }
 
 static int may_write_to_queue(struct backing_dev_info *bdi)
@@ -361,7 +370,7 @@ static pageout_t pageout(struct page *page, struct address_space *mapping,
                 * Some data journaling orphaned pages can have
                 * page->mapping == NULL while being dirty with clean buffers.
                 */
-               if (PagePrivate(page)) {
+               if (page_has_private(page)) {
                        if (try_to_free_buffers(page)) {
                                ClearPageDirty(page);
                                printk("%s: orphaned page\n", __func__);
@@ -721,7 +730,7 @@ static unsigned long shrink_page_list(struct list_head *page_list,
                 * process address space (page_count == 1) it can be freed.
                 * Otherwise, leave the page on the LRU so it is swappable.
                 */
-               if (PagePrivate(page)) {
+               if (page_has_private(page)) {
                        if (!try_to_release_page(page, sc->gfp_mask))
                                goto activate_locked;
                        if (!mapping && page_count(page) == 1) {
@@ -1374,7 +1383,7 @@ static void get_scan_ratio(struct zone *zone, struct scan_control *sc,
        struct zone_reclaim_stat *reclaim_stat = get_reclaim_stat(zone, sc);
 
        /* If we have no swap space, do not bother scanning anon pages. */
-       if (nr_swap_pages <= 0) {
+       if (!sc->may_swap || (nr_swap_pages <= 0)) {
                percent[0] = 0;
                percent[1] = 100;
                return;
@@ -1462,7 +1471,7 @@ static void shrink_zone(int priority, struct zone *zone,
 
        for_each_evictable_lru(l) {
                int file = is_file_lru(l);
-               int scan;
+               unsigned long scan;
 
                scan = zone_nr_pages(zone, sc, l);
                if (priority) {
@@ -1538,7 +1547,8 @@ static void shrink_zones(int priority, struct zonelist *zonelist,
        struct zone *zone;
 
        sc->all_unreclaimable = 1;
-       for_each_zone_zonelist(zone, z, zonelist, high_zoneidx) {
+       for_each_zone_zonelist_nodemask(zone, z, zonelist, high_zoneidx,
+                                       sc->nodemask) {
                if (!populated_zone(zone))
                        continue;
                /*
@@ -1683,17 +1693,19 @@ out:
 }
 
 unsigned long try_to_free_pages(struct zonelist *zonelist, int order,
-                                                               gfp_t gfp_mask)
+                               gfp_t gfp_mask, nodemask_t *nodemask)
 {
        struct scan_control sc = {
                .gfp_mask = gfp_mask,
                .may_writepage = !laptop_mode,
                .swap_cluster_max = SWAP_CLUSTER_MAX,
                .may_unmap = 1,
+               .may_swap = 1,
                .swappiness = vm_swappiness,
                .order = order,
                .mem_cgroup = NULL,
                .isolate_pages = isolate_pages_global,
+               .nodemask = nodemask,
        };
 
        return do_try_to_free_pages(zonelist, &sc);
@@ -1709,17 +1721,16 @@ unsigned long try_to_free_mem_cgroup_pages(struct mem_cgroup *mem_cont,
        struct scan_control sc = {
                .may_writepage = !laptop_mode,
                .may_unmap = 1,
+               .may_swap = !noswap,
                .swap_cluster_max = SWAP_CLUSTER_MAX,
                .swappiness = swappiness,
                .order = 0,
                .mem_cgroup = mem_cont,
                .isolate_pages = mem_cgroup_isolate_pages,
+               .nodemask = NULL, /* we don't care the placement */
        };
        struct zonelist *zonelist;
 
-       if (noswap)
-               sc.may_unmap = 0;
-
        sc.gfp_mask = (gfp_mask & GFP_RECLAIM_MASK) |
                        (GFP_HIGHUSER_MOVABLE & ~GFP_RECLAIM_MASK);
        zonelist = NODE_DATA(numa_node_id())->node_zonelists;
@@ -1758,6 +1769,7 @@ static unsigned long balance_pgdat(pg_data_t *pgdat, int order)
        struct scan_control sc = {
                .gfp_mask = GFP_KERNEL,
                .may_unmap = 1,
+               .may_swap = 1,
                .swap_cluster_max = SWAP_CLUSTER_MAX,
                .swappiness = vm_swappiness,
                .order = order,
@@ -1958,7 +1970,7 @@ static int kswapd(void *p)
        struct reclaim_state reclaim_state = {
                .reclaimed_slab = 0,
        };
-       node_to_cpumask_ptr(cpumask, pgdat->node_id);
+       const struct cpumask *cpumask = cpumask_of_node(pgdat->node_id);
 
        lockdep_set_current_reclaim_state(GFP_KERNEL);
 
@@ -2079,13 +2091,13 @@ static void shrink_all_zones(unsigned long nr_pages, int prio,
                                nr_reclaimed += shrink_list(l, nr_to_scan, zone,
                                                                sc, prio);
                                if (nr_reclaimed >= nr_pages) {
-                                       sc->nr_reclaimed = nr_reclaimed;
+                                       sc->nr_reclaimed += nr_reclaimed;
                                        return;
                                }
                        }
                }
        }
-       sc->nr_reclaimed = nr_reclaimed;
+       sc->nr_reclaimed += nr_reclaimed;
 }
 
 /*
@@ -2106,6 +2118,7 @@ unsigned long shrink_all_memory(unsigned long nr_pages)
                .may_unmap = 0,
                .may_writepage = 1,
                .isolate_pages = isolate_pages_global,
+               .nr_reclaimed = 0,
        };
 
        current->reclaim_state = &reclaim_state;
@@ -2195,7 +2208,9 @@ static int __devinit cpu_callback(struct notifier_block *nfb,
        if (action == CPU_ONLINE || action == CPU_ONLINE_FROZEN) {
                for_each_node_state(nid, N_HIGH_MEMORY) {
                        pg_data_t *pgdat = NODE_DATA(nid);
-                       node_to_cpumask_ptr(mask, pgdat->node_id);
+                       const struct cpumask *mask;
+
+                       mask = cpumask_of_node(pgdat->node_id);
 
                        if (cpumask_any_and(cpu_online_mask, mask) < nr_cpu_ids)
                                /* One of our CPUs online: restore mask */
@@ -2286,6 +2301,7 @@ static int __zone_reclaim(struct zone *zone, gfp_t gfp_mask, unsigned int order)
        struct scan_control sc = {
                .may_writepage = !!(zone_reclaim_mode & RECLAIM_WRITE),
                .may_unmap = !!(zone_reclaim_mode & RECLAIM_SWAP),
+               .may_swap = 1,
                .swap_cluster_max = max_t(unsigned long, nr_pages,
                                        SWAP_CLUSTER_MAX),
                .gfp_mask = gfp_mask,