slub: Fix bad boundary check in init_kmem_cache_nodes()
[safe/jmp/linux-2.6] / mm / vmscan.c
index d9a0e0d..3ff3311 100644 (file)
@@ -13,7 +13,7 @@
 
 #include <linux/mm.h>
 #include <linux/module.h>
-#include <linux/slab.h>
+#include <linux/gfp.h>
 #include <linux/kernel_stat.h>
 #include <linux/swap.h>
 #include <linux/pagemap.h>
@@ -561,18 +561,18 @@ redo:
 enum page_references {
        PAGEREF_RECLAIM,
        PAGEREF_RECLAIM_CLEAN,
+       PAGEREF_KEEP,
        PAGEREF_ACTIVATE,
 };
 
 static enum page_references page_check_references(struct page *page,
                                                  struct scan_control *sc)
 {
+       int referenced_ptes, referenced_page;
        unsigned long vm_flags;
-       int referenced;
 
-       referenced = page_referenced(page, 1, sc->mem_cgroup, &vm_flags);
-       if (!referenced)
-               return PAGEREF_RECLAIM;
+       referenced_ptes = page_referenced(page, 1, sc->mem_cgroup, &vm_flags);
+       referenced_page = TestClearPageReferenced(page);
 
        /* Lumpy reclaim - ignore references */
        if (sc->order > PAGE_ALLOC_COSTLY_ORDER)
@@ -585,11 +585,36 @@ static enum page_references page_check_references(struct page *page,
        if (vm_flags & VM_LOCKED)
                return PAGEREF_RECLAIM;
 
-       if (page_mapped(page))
-               return PAGEREF_ACTIVATE;
+       if (referenced_ptes) {
+               if (PageAnon(page))
+                       return PAGEREF_ACTIVATE;
+               /*
+                * All mapped pages start out with page table
+                * references from the instantiating fault, so we need
+                * to look twice if a mapped file page is used more
+                * than once.
+                *
+                * Mark it and spare it for another trip around the
+                * inactive list.  Another page table reference will
+                * lead to its activation.
+                *
+                * Note: the mark is set for activated pages as well
+                * so that recently deactivated but used pages are
+                * quickly recovered.
+                */
+               SetPageReferenced(page);
+
+               if (referenced_page)
+                       return PAGEREF_ACTIVATE;
+
+               return PAGEREF_KEEP;
+       }
 
        /* Reclaim if clean, defer dirty pages to writeback */
-       return PAGEREF_RECLAIM_CLEAN;
+       if (referenced_page)
+               return PAGEREF_RECLAIM_CLEAN;
+
+       return PAGEREF_RECLAIM;
 }
 
 /*
@@ -657,6 +682,8 @@ static unsigned long shrink_page_list(struct list_head *page_list,
                switch (references) {
                case PAGEREF_ACTIVATE:
                        goto activate_locked;
+               case PAGEREF_KEEP:
+                       goto keep_locked;
                case PAGEREF_RECLAIM:
                case PAGEREF_RECLAIM_CLEAN:
                        ; /* try to reclaim the page below */
@@ -1359,9 +1386,7 @@ static void shrink_active_list(unsigned long nr_pages, struct zone *zone,
                        continue;
                }
 
-               /* page_referenced clears PageReferenced */
-               if (page_mapped(page) &&
-                   page_referenced(page, 0, sc->mem_cgroup, &vm_flags)) {
+               if (page_referenced(page, 0, sc->mem_cgroup, &vm_flags)) {
                        nr_rotated++;
                        /*
                         * Identify referenced, file-backed active pages and
@@ -1510,13 +1535,6 @@ static void get_scan_ratio(struct zone *zone, struct scan_control *sc,
        unsigned long ap, fp;
        struct zone_reclaim_stat *reclaim_stat = get_reclaim_stat(zone, sc);
 
-       /* If we have no swap space, do not bother scanning anon pages. */
-       if (!sc->may_swap || (nr_swap_pages <= 0)) {
-               percent[0] = 0;
-               percent[1] = 100;
-               return;
-       }
-
        anon  = zone_nr_lru_pages(zone, sc, LRU_ACTIVE_ANON) +
                zone_nr_lru_pages(zone, sc, LRU_INACTIVE_ANON);
        file  = zone_nr_lru_pages(zone, sc, LRU_ACTIVE_FILE) +
@@ -1614,20 +1632,22 @@ static void shrink_zone(int priority, struct zone *zone,
        unsigned long nr_reclaimed = sc->nr_reclaimed;
        unsigned long nr_to_reclaim = sc->nr_to_reclaim;
        struct zone_reclaim_stat *reclaim_stat = get_reclaim_stat(zone, sc);
+       int noswap = 0;
 
-       get_scan_ratio(zone, sc, percent);
+       /* If we have no swap space, do not bother scanning anon pages. */
+       if (!sc->may_swap || (nr_swap_pages <= 0)) {
+               noswap = 1;
+               percent[0] = 0;
+               percent[1] = 100;
+       } else
+               get_scan_ratio(zone, sc, percent);
 
        for_each_evictable_lru(l) {
                int file = is_file_lru(l);
                unsigned long scan;
 
-               if (percent[file] == 0) {
-                       nr[l] = 0;
-                       continue;
-               }
-
                scan = zone_nr_lru_pages(zone, sc, l);
-               if (priority) {
+               if (priority || noswap) {
                        scan >>= priority;
                        scan = (scan * percent[file]) / 100;
                }