memcg: reclaim shouldn't change zone->recent_rotated statistics
[safe/jmp/linux-2.6] / mm / vmscan.c
index 7ea1440..da7c3a2 100644 (file)
@@ -617,7 +617,6 @@ static unsigned long shrink_page_list(struct list_head *page_list,
                                        referenced && page_mapping_inuse(page))
                        goto activate_locked;
 
-#ifdef CONFIG_SWAP
                /*
                 * Anonymous process memory has backing store?
                 * Try to allocate it some swap space here.
@@ -625,20 +624,10 @@ static unsigned long shrink_page_list(struct list_head *page_list,
                if (PageAnon(page) && !PageSwapCache(page)) {
                        if (!(sc->gfp_mask & __GFP_IO))
                                goto keep_locked;
-                       switch (try_to_munlock(page)) {
-                       case SWAP_FAIL:         /* shouldn't happen */
-                       case SWAP_AGAIN:
-                               goto keep_locked;
-                       case SWAP_MLOCK:
-                               goto cull_mlocked;
-                       case SWAP_SUCCESS:
-                               ; /* fall thru'; add to swap cache */
-                       }
-                       if (!add_to_swap(page, GFP_ATOMIC))
+                       if (!add_to_swap(page))
                                goto activate_locked;
                        may_enter_fs = 1;
                }
-#endif /* CONFIG_SWAP */
 
                mapping = page_mapping(page);
 
@@ -752,6 +741,8 @@ free_it:
                continue;
 
 cull_mlocked:
+               if (PageSwapCache(page))
+                       try_to_free_swap(page);
                unlock_page(page);
                putback_lru_page(page);
                continue;
@@ -759,7 +750,7 @@ cull_mlocked:
 activate_locked:
                /* Not a candidate for swapping, so reclaim swap space. */
                if (PageSwapCache(page) && vm_swap_full())
-                       remove_exclusive_swap_page_ref(page);
+                       try_to_free_swap(page);
                VM_BUG_ON(PageActive(page));
                SetPageActive(page);
                pgactivate++;
@@ -1248,13 +1239,15 @@ static void shrink_active_list(unsigned long nr_pages, struct zone *zone,
                list_add(&page->lru, &l_inactive);
        }
 
+       spin_lock_irq(&zone->lru_lock);
        /*
         * Count referenced pages from currently used mappings as
         * rotated, even though they are moved to the inactive list.
         * This helps balance scan pressure between file and anonymous
         * pages in get_scan_ratio.
         */
-       zone->recent_rotated[!!file] += pgmoved;
+       if (scan_global_lru(sc))
+               zone->recent_rotated[!!file] += pgmoved;
 
        /*
         * Move the pages to the [file or anon] inactive list.
@@ -1263,7 +1256,6 @@ static void shrink_active_list(unsigned long nr_pages, struct zone *zone,
 
        pgmoved = 0;
        lru = LRU_BASE + file * LRU_FILE;
-       spin_lock_irq(&zone->lru_lock);
        while (!list_empty(&l_inactive)) {
                page = lru_to_page(&l_inactive);
                prefetchw_prev_lru_page(page, &l_inactive, flags);
@@ -1336,12 +1328,6 @@ static void get_scan_ratio(struct zone *zone, struct scan_control *sc,
        unsigned long anon_prio, file_prio;
        unsigned long ap, fp;
 
-       anon  = zone_page_state(zone, NR_ACTIVE_ANON) +
-               zone_page_state(zone, NR_INACTIVE_ANON);
-       file  = zone_page_state(zone, NR_ACTIVE_FILE) +
-               zone_page_state(zone, NR_INACTIVE_FILE);
-       free  = zone_page_state(zone, NR_FREE_PAGES);
-
        /* If we have no swap space, do not bother scanning anon pages. */
        if (nr_swap_pages <= 0) {
                percent[0] = 0;
@@ -1349,6 +1335,12 @@ static void get_scan_ratio(struct zone *zone, struct scan_control *sc,
                return;
        }
 
+       anon  = zone_page_state(zone, NR_ACTIVE_ANON) +
+               zone_page_state(zone, NR_INACTIVE_ANON);
+       file  = zone_page_state(zone, NR_ACTIVE_FILE) +
+               zone_page_state(zone, NR_INACTIVE_FILE);
+       free  = zone_page_state(zone, NR_FREE_PAGES);
+
        /* If we have very few page cache pages, force-scan anon pages. */
        if (unlikely(file + free <= zone->pages_high)) {
                percent[0] = 100;
@@ -1902,7 +1894,7 @@ static int kswapd(void *p)
        };
        node_to_cpumask_ptr(cpumask, pgdat->node_id);
 
-       if (!cpus_empty(*cpumask))
+       if (!cpumask_empty(cpumask))
                set_cpus_allowed_ptr(tsk, cpumask);
        current->reclaim_state = &reclaim_state;
 
@@ -2141,7 +2133,7 @@ static int __devinit cpu_callback(struct notifier_block *nfb,
                        pg_data_t *pgdat = NODE_DATA(nid);
                        node_to_cpumask_ptr(mask, pgdat->node_id);
 
-                       if (any_online_cpu(*mask) < nr_cpu_ids)
+                       if (cpumask_any_and(cpu_online_mask, mask) < nr_cpu_ids)
                                /* One of our CPUs online: restore mask */
                                set_cpus_allowed_ptr(pgdat->kswapd, mask);
                }