mmap: save some cycles for the shared anonymous mapping
[safe/jmp/linux-2.6] / mm / vmscan.c
index 172119c..613e89f 100644 (file)
@@ -148,8 +148,8 @@ static struct zone_reclaim_stat *get_reclaim_stat(struct zone *zone,
        return &zone->reclaim_stat;
 }
 
-static unsigned long zone_nr_pages(struct zone *zone, struct scan_control *sc,
-                                  enum lru_list lru)
+static unsigned long zone_nr_lru_pages(struct zone *zone,
+                               struct scan_control *sc, enum lru_list lru)
 {
        if (!scanning_global_lru(sc))
                return mem_cgroup_zone_nr_pages(sc->mem_cgroup, zone, lru);
@@ -286,7 +286,12 @@ static inline int page_mapping_inuse(struct page *page)
 
 static inline int is_page_cache_freeable(struct page *page)
 {
-       return page_count(page) - !!page_has_private(page) == 2;
+       /*
+        * A freeable page cache page is referenced only by the caller
+        * that isolated the page, the page cache radix tree and
+        * optional buffer heads at page->private.
+        */
+       return page_count(page) - page_has_private(page) == 2;
 }
 
 static int may_write_to_queue(struct backing_dev_info *bdi)
@@ -361,7 +366,6 @@ static pageout_t pageout(struct page *page, struct address_space *mapping,
         * block, for some throttling. This happens by accident, because
         * swap_backing_dev_info is bust: it doesn't reflect the
         * congestion state of the swapdevs.  Easy to fix, if needed.
-        * See swapfile.c:page_queue_congested().
         */
        if (!is_page_cache_freeable(page))
                return PAGE_KEEP;
@@ -1484,10 +1488,10 @@ static void get_scan_ratio(struct zone *zone, struct scan_control *sc,
        unsigned long ap, fp;
        struct zone_reclaim_stat *reclaim_stat = get_reclaim_stat(zone, sc);
 
-       anon  = zone_nr_pages(zone, sc, LRU_ACTIVE_ANON) +
-               zone_nr_pages(zone, sc, LRU_INACTIVE_ANON);
-       file  = zone_nr_pages(zone, sc, LRU_ACTIVE_FILE) +
-               zone_nr_pages(zone, sc, LRU_INACTIVE_FILE);
+       anon  = zone_nr_lru_pages(zone, sc, LRU_ACTIVE_ANON) +
+               zone_nr_lru_pages(zone, sc, LRU_INACTIVE_ANON);
+       file  = zone_nr_lru_pages(zone, sc, LRU_ACTIVE_FILE) +
+               zone_nr_lru_pages(zone, sc, LRU_INACTIVE_FILE);
 
        if (scanning_global_lru(sc)) {
                free  = zone_page_state(zone, NR_FREE_PAGES);
@@ -1581,6 +1585,7 @@ static void shrink_zone(int priority, struct zone *zone,
        enum lru_list l;
        unsigned long nr_reclaimed = sc->nr_reclaimed;
        unsigned long swap_cluster_max = sc->swap_cluster_max;
+       struct zone_reclaim_stat *reclaim_stat = get_reclaim_stat(zone, sc);
        int noswap = 0;
 
        /* If we have no swap space, do not bother scanning anon pages. */
@@ -1595,17 +1600,14 @@ static void shrink_zone(int priority, struct zone *zone,
                int file = is_file_lru(l);
                unsigned long scan;
 
-               scan = zone_nr_pages(zone, sc, l);
+               scan = zone_nr_lru_pages(zone, sc, l);
                if (priority || noswap) {
                        scan >>= priority;
                        scan = (scan * percent[file]) / 100;
                }
-               if (scanning_global_lru(sc))
-                       nr[l] = nr_scan_try_batch(scan,
-                                                 &zone->lru[l].nr_saved_scan,
-                                                 swap_cluster_max);
-               else
-                       nr[l] = scan;
+               nr[l] = nr_scan_try_batch(scan,
+                                         &reclaim_stat->nr_saved_scan[l],
+                                         swap_cluster_max);
        }
 
        while (nr[LRU_INACTIVE_ANON] || nr[LRU_ACTIVE_FILE] ||
@@ -2215,6 +2217,7 @@ static void shrink_all_zones(unsigned long nr_pages, int prio,
 {
        struct zone *zone;
        unsigned long nr_reclaimed = 0;
+       struct zone_reclaim_stat *reclaim_stat;
 
        for_each_populated_zone(zone) {
                enum lru_list l;
@@ -2231,11 +2234,14 @@ static void shrink_all_zones(unsigned long nr_pages, int prio,
                                                l == LRU_ACTIVE_FILE))
                                continue;
 
-                       zone->lru[l].nr_saved_scan += (lru_pages >> prio) + 1;
-                       if (zone->lru[l].nr_saved_scan >= nr_pages || pass > 3) {
+                       reclaim_stat = get_reclaim_stat(zone, sc);
+                       reclaim_stat->nr_saved_scan[l] +=
+                                               (lru_pages >> prio) + 1;
+                       if (reclaim_stat->nr_saved_scan[l]
+                                               >= nr_pages || pass > 3) {
                                unsigned long nr_to_scan;
 
-                               zone->lru[l].nr_saved_scan = 0;
+                               reclaim_stat->nr_saved_scan[l] = 0;
                                nr_to_scan = min(nr_pages, lru_pages);
                                nr_reclaimed += shrink_list(l, nr_to_scan, zone,
                                                                sc, prio);