vmscan: cleanup the scan batching code
authorWu Fengguang <fengguang.wu@intel.com>
Tue, 16 Jun 2009 22:32:29 +0000 (15:32 -0700)
committerLinus Torvalds <torvalds@linux-foundation.org>
Wed, 17 Jun 2009 02:47:39 +0000 (19:47 -0700)
The vmscan batching logic is twisting.  Move it into a standalone function
nr_scan_try_batch() and document it.  No behavior change.

Signed-off-by: Wu Fengguang <fengguang.wu@intel.com>
Acked-by: Rik van Riel <riel@redhat.com>
Cc: Nick Piggin <npiggin@suse.de>
Cc: Christoph Lameter <cl@linux-foundation.org>
Acked-by: Peter Zijlstra <a.p.zijlstra@chello.nl>
Acked-by: KOSAKI Motohiro <kosaki.motohiro@jp.fujitsu.com>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
include/linux/mmzone.h
mm/page_alloc.c
mm/vmscan.c
mm/vmstat.c

index dd8487f..db976b9 100644 (file)
@@ -334,9 +334,9 @@ struct zone {
 
        /* Fields commonly accessed by the page reclaim scanner */
        spinlock_t              lru_lock;       
-       struct {
+       struct zone_lru {
                struct list_head list;
-               unsigned long nr_scan;
+               unsigned long nr_saved_scan;    /* accumulated for batching */
        } lru[NR_LRU_LISTS];
 
        struct zone_reclaim_stat reclaim_stat;
index 131655c..e5b8f62 100644 (file)
@@ -3657,7 +3657,7 @@ static void __paginginit free_area_init_core(struct pglist_data *pgdat,
                zone_pcp_init(zone);
                for_each_lru(l) {
                        INIT_LIST_HEAD(&zone->lru[l].list);
-                       zone->lru[l].nr_scan = 0;
+                       zone->lru[l].nr_saved_scan = 0;
                }
                zone->reclaim_stat.recent_rotated[0] = 0;
                zone->reclaim_stat.recent_rotated[1] = 0;
index 9673437..d4da097 100644 (file)
@@ -1492,6 +1492,26 @@ static void get_scan_ratio(struct zone *zone, struct scan_control *sc,
        percent[1] = 100 - percent[0];
 }
 
+/*
+ * Smallish @nr_to_scan's are deposited in @nr_saved_scan,
+ * until we collected @swap_cluster_max pages to scan.
+ */
+static unsigned long nr_scan_try_batch(unsigned long nr_to_scan,
+                                      unsigned long *nr_saved_scan,
+                                      unsigned long swap_cluster_max)
+{
+       unsigned long nr;
+
+       *nr_saved_scan += nr_to_scan;
+       nr = *nr_saved_scan;
+
+       if (nr >= swap_cluster_max)
+               *nr_saved_scan = 0;
+       else
+               nr = 0;
+
+       return nr;
+}
 
 /*
  * This is a basic per-zone page freer.  Used by both kswapd and direct reclaim.
@@ -1517,14 +1537,11 @@ static void shrink_zone(int priority, struct zone *zone,
                        scan >>= priority;
                        scan = (scan * percent[file]) / 100;
                }
-               if (scanning_global_lru(sc)) {
-                       zone->lru[l].nr_scan += scan;
-                       nr[l] = zone->lru[l].nr_scan;
-                       if (nr[l] >= swap_cluster_max)
-                               zone->lru[l].nr_scan = 0;
-                       else
-                               nr[l] = 0;
-               } else
+               if (scanning_global_lru(sc))
+                       nr[l] = nr_scan_try_batch(scan,
+                                                 &zone->lru[l].nr_saved_scan,
+                                                 swap_cluster_max);
+               else
                        nr[l] = scan;
        }
 
@@ -2124,11 +2141,11 @@ static void shrink_all_zones(unsigned long nr_pages, int prio,
                                                l == LRU_ACTIVE_FILE))
                                continue;
 
-                       zone->lru[l].nr_scan += (lru_pages >> prio) + 1;
-                       if (zone->lru[l].nr_scan >= nr_pages || pass > 3) {
+                       zone->lru[l].nr_saved_scan += (lru_pages >> prio) + 1;
+                       if (zone->lru[l].nr_saved_scan >= nr_pages || pass > 3) {
                                unsigned long nr_to_scan;
 
-                               zone->lru[l].nr_scan = 0;
+                               zone->lru[l].nr_saved_scan = 0;
                                nr_to_scan = min(nr_pages, lru_pages);
                                nr_reclaimed += shrink_list(l, nr_to_scan, zone,
                                                                sc, prio);
index 4151107..84c0555 100644 (file)
@@ -718,10 +718,10 @@ static void zoneinfo_show_print(struct seq_file *m, pg_data_t *pgdat,
                   low_wmark_pages(zone),
                   high_wmark_pages(zone),
                   zone->pages_scanned,
-                  zone->lru[LRU_ACTIVE_ANON].nr_scan,
-                  zone->lru[LRU_INACTIVE_ANON].nr_scan,
-                  zone->lru[LRU_ACTIVE_FILE].nr_scan,
-                  zone->lru[LRU_INACTIVE_FILE].nr_scan,
+                  zone->lru[LRU_ACTIVE_ANON].nr_saved_scan,
+                  zone->lru[LRU_INACTIVE_ANON].nr_saved_scan,
+                  zone->lru[LRU_ACTIVE_FILE].nr_saved_scan,
+                  zone->lru[LRU_INACTIVE_FILE].nr_saved_scan,
                   zone->spanned_pages,
                   zone->present_pages);