Merge branch 'x86-fixes-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git...
[safe/jmp/linux-2.6] / mm / vmscan.c
index 79a98d9..dea7abd 100644 (file)
@@ -837,7 +837,6 @@ int __isolate_lru_page(struct page *page, int mode, int file)
                 */
                ClearPageLRU(page);
                ret = 0;
-               mem_cgroup_del_lru(page);
        }
 
        return ret;
@@ -885,12 +884,14 @@ static unsigned long isolate_lru_pages(unsigned long nr_to_scan,
                switch (__isolate_lru_page(page, mode, file)) {
                case 0:
                        list_move(&page->lru, dst);
+                       mem_cgroup_del_lru(page);
                        nr_taken++;
                        break;
 
                case -EBUSY:
                        /* else it is being freed elsewhere */
                        list_move(&page->lru, src);
+                       mem_cgroup_rotate_lru_list(page, page_lru(page));
                        continue;
 
                default:
@@ -929,18 +930,11 @@ static unsigned long isolate_lru_pages(unsigned long nr_to_scan,
                        /* Check that we have not crossed a zone boundary. */
                        if (unlikely(page_zone_id(cursor_page) != zone_id))
                                continue;
-                       switch (__isolate_lru_page(cursor_page, mode, file)) {
-                       case 0:
+                       if (__isolate_lru_page(cursor_page, mode, file) == 0) {
                                list_move(&cursor_page->lru, dst);
+                               mem_cgroup_del_lru(cursor_page);
                                nr_taken++;
                                scan++;
-                               break;
-
-                       case -EBUSY:
-                               /* else it is being freed elsewhere */
-                               list_move(&cursor_page->lru, src);
-                       default:
-                               break;  /* ! on LRU or wrong list */
                        }
                }
        }
@@ -1110,7 +1104,7 @@ static unsigned long shrink_inactive_list(unsigned long max_scan,
                 */
                if (nr_freed < nr_taken && !current_is_kswapd() &&
                    lumpy_reclaim) {
-                       congestion_wait(WRITE, HZ/10);
+                       congestion_wait(BLK_RW_ASYNC, HZ/10);
 
                        /*
                         * The attempt at page out may have made some
@@ -1727,7 +1721,7 @@ static unsigned long do_try_to_free_pages(struct zonelist *zonelist,
 
                /* Take a nap, wait for some writeback to complete */
                if (sc->nr_scanned && priority < DEF_PRIORITY - 2)
-                       congestion_wait(WRITE, HZ/10);
+                       congestion_wait(BLK_RW_ASYNC, HZ/10);
        }
        /* top priority shrink_zones still had more to do? don't OOM, then */
        if (!sc->all_unreclaimable && scanning_global_lru(sc))
@@ -1966,7 +1960,7 @@ loop_again:
                 * another pass across the zones.
                 */
                if (total_scanned && priority < DEF_PRIORITY - 2)
-                       congestion_wait(WRITE, HZ/10);
+                       congestion_wait(BLK_RW_ASYNC, HZ/10);
 
                /*
                 * We do this so kswapd doesn't build up large priorities for
@@ -2239,7 +2233,7 @@ unsigned long shrink_all_memory(unsigned long nr_pages)
                                goto out;
 
                        if (sc.nr_scanned && prio < DEF_PRIORITY - 2)
-                               congestion_wait(WRITE, HZ / 10);
+                               congestion_wait(BLK_RW_ASYNC, HZ / 10);
                }
        }
 
@@ -2492,16 +2486,16 @@ int zone_reclaim(struct zone *zone, gfp_t gfp_mask, unsigned int order)
         */
        if (zone_pagecache_reclaimable(zone) <= zone->min_unmapped_pages &&
            zone_page_state(zone, NR_SLAB_RECLAIMABLE) <= zone->min_slab_pages)
-               return 0;
+               return ZONE_RECLAIM_FULL;
 
        if (zone_is_all_unreclaimable(zone))
-               return 0;
+               return ZONE_RECLAIM_FULL;
 
        /*
         * Do not scan if the allocation should not be delayed.
         */
        if (!(gfp_mask & __GFP_WAIT) || (current->flags & PF_MEMALLOC))
-                       return 0;
+               return ZONE_RECLAIM_NOSCAN;
 
        /*
         * Only run zone reclaim on the local zone or on zones that do not
@@ -2511,13 +2505,17 @@ int zone_reclaim(struct zone *zone, gfp_t gfp_mask, unsigned int order)
         */
        node_id = zone_to_nid(zone);
        if (node_state(node_id, N_CPU) && node_id != numa_node_id())
-               return 0;
+               return ZONE_RECLAIM_NOSCAN;
 
        if (zone_test_and_set_flag(zone, ZONE_RECLAIM_LOCKED))
-               return 0;
+               return ZONE_RECLAIM_NOSCAN;
+
        ret = __zone_reclaim(zone, gfp_mask, order);
        zone_clear_flag(zone, ZONE_RECLAIM_LOCKED);
 
+       if (!ret)
+               count_vm_event(PGSCAN_ZONE_RECLAIM_FAILED);
+
        return ret;
 }
 #endif