page-allocator: change migratetype for all pageblocks within a high-order page during...
[safe/jmp/linux-2.6] / mm / vmscan.c
index e8fa2d9..e219b47 100644 (file)
@@ -630,9 +630,14 @@ static unsigned long shrink_page_list(struct list_head *page_list,
 
                referenced = page_referenced(page, 1,
                                                sc->mem_cgroup, &vm_flags);
-               /* In active use or really unfreeable?  Activate it. */
+               /*
+                * In active use or really unfreeable?  Activate it.
+                * If page which have PG_mlocked lost isoltation race,
+                * try_to_unmap moves it to unevictable list
+                */
                if (sc->order <= PAGE_ALLOC_COSTLY_ORDER &&
-                                       referenced && page_mapping_inuse(page))
+                                       referenced && page_mapping_inuse(page)
+                                       && !(vm_flags & VM_LOCKED))
                        goto activate_locked;
 
                /*
@@ -930,9 +935,19 @@ static unsigned long isolate_lru_pages(unsigned long nr_to_scan,
                        /* Check that we have not crossed a zone boundary. */
                        if (unlikely(page_zone_id(cursor_page) != zone_id))
                                continue;
+
+                       /*
+                        * If we don't have enough swap space, reclaiming of
+                        * anon page which don't already have a swap slot is
+                        * pointless.
+                        */
+                       if (nr_swap_pages <= 0 && PageAnon(cursor_page) &&
+                                       !PageSwapCache(cursor_page))
+                               continue;
+
                        if (__isolate_lru_page(cursor_page, mode, file) == 0) {
                                list_move(&cursor_page->lru, dst);
-                               mem_cgroup_del_lru(page);
+                               mem_cgroup_del_lru(cursor_page);
                                nr_taken++;
                                scan++;
                        }
@@ -1029,6 +1044,31 @@ int isolate_lru_page(struct page *page)
 }
 
 /*
+ * Are there way too many processes in the direct reclaim path already?
+ */
+static int too_many_isolated(struct zone *zone, int file,
+               struct scan_control *sc)
+{
+       unsigned long inactive, isolated;
+
+       if (current_is_kswapd())
+               return 0;
+
+       if (!scanning_global_lru(sc))
+               return 0;
+
+       if (file) {
+               inactive = zone_page_state(zone, NR_INACTIVE_FILE);
+               isolated = zone_page_state(zone, NR_ISOLATED_FILE);
+       } else {
+               inactive = zone_page_state(zone, NR_INACTIVE_ANON);
+               isolated = zone_page_state(zone, NR_ISOLATED_ANON);
+       }
+
+       return isolated > inactive;
+}
+
+/*
  * shrink_inactive_list() is a helper for shrink_zone().  It returns the number
  * of reclaimed pages
  */
@@ -1043,6 +1083,14 @@ static unsigned long shrink_inactive_list(unsigned long max_scan,
        struct zone_reclaim_stat *reclaim_stat = get_reclaim_stat(zone, sc);
        int lumpy_reclaim = 0;
 
+       while (unlikely(too_many_isolated(zone, file, sc))) {
+               congestion_wait(WRITE, HZ/10);
+
+               /* We are about to die and free our memory. Return now. */
+               if (fatal_signal_pending(current))
+                       return SWAP_CLUSTER_MAX;
+       }
+
        /*
         * If we need a large contiguous chunk of memory, or have
         * trouble getting a small set of contiguous pages, we
@@ -1067,10 +1115,26 @@ static unsigned long shrink_inactive_list(unsigned long max_scan,
                unsigned long nr_active;
                unsigned int count[NR_LRU_LISTS] = { 0, };
                int mode = lumpy_reclaim ? ISOLATE_BOTH : ISOLATE_INACTIVE;
+               unsigned long nr_anon;
+               unsigned long nr_file;
 
                nr_taken = sc->isolate_pages(sc->swap_cluster_max,
                             &page_list, &nr_scan, sc->order, mode,
                                zone, sc->mem_cgroup, 0, file);
+
+               if (scanning_global_lru(sc)) {
+                       zone->pages_scanned += nr_scan;
+                       if (current_is_kswapd())
+                               __count_zone_vm_events(PGSCAN_KSWAPD, zone,
+                                                      nr_scan);
+                       else
+                               __count_zone_vm_events(PGSCAN_DIRECT, zone,
+                                                      nr_scan);
+               }
+
+               if (nr_taken == 0)
+                       goto done;
+
                nr_active = clear_active_flags(&page_list, count);
                __count_vm_events(PGDEACTIVATE, nr_active);
 
@@ -1083,8 +1147,10 @@ static unsigned long shrink_inactive_list(unsigned long max_scan,
                __mod_zone_page_state(zone, NR_INACTIVE_ANON,
                                                -count[LRU_INACTIVE_ANON]);
 
-               if (scanning_global_lru(sc))
-                       zone->pages_scanned += nr_scan;
+               nr_anon = count[LRU_ACTIVE_ANON] + count[LRU_INACTIVE_ANON];
+               nr_file = count[LRU_ACTIVE_FILE] + count[LRU_INACTIVE_FILE];
+               __mod_zone_page_state(zone, NR_ISOLATED_ANON, nr_anon);
+               __mod_zone_page_state(zone, NR_ISOLATED_FILE, nr_file);
 
                reclaim_stat->recent_scanned[0] += count[LRU_INACTIVE_ANON];
                reclaim_stat->recent_scanned[0] += count[LRU_ACTIVE_ANON];
@@ -1104,7 +1170,7 @@ static unsigned long shrink_inactive_list(unsigned long max_scan,
                 */
                if (nr_freed < nr_taken && !current_is_kswapd() &&
                    lumpy_reclaim) {
-                       congestion_wait(WRITE, HZ/10);
+                       congestion_wait(BLK_RW_ASYNC, HZ/10);
 
                        /*
                         * The attempt at page out may have made some
@@ -1118,18 +1184,12 @@ static unsigned long shrink_inactive_list(unsigned long max_scan,
                }
 
                nr_reclaimed += nr_freed;
+
                local_irq_disable();
-               if (current_is_kswapd()) {
-                       __count_zone_vm_events(PGSCAN_KSWAPD, zone, nr_scan);
+               if (current_is_kswapd())
                        __count_vm_events(KSWAPD_STEAL, nr_freed);
-               } else if (scanning_global_lru(sc))
-                       __count_zone_vm_events(PGSCAN_DIRECT, zone, nr_scan);
-
                __count_zone_vm_events(PGSTEAL, zone, nr_freed);
 
-               if (nr_taken == 0)
-                       goto done;
-
                spin_lock(&zone->lru_lock);
                /*
                 * Put back any unfreeable pages.
@@ -1148,8 +1208,8 @@ static unsigned long shrink_inactive_list(unsigned long max_scan,
                        SetPageLRU(page);
                        lru = page_lru(page);
                        add_page_to_lru_list(zone, page, lru);
-                       if (PageActive(page)) {
-                               int file = !!page_is_file_cache(page);
+                       if (is_active_lru(lru)) {
+                               int file = !!is_file_lru(lru);
                                reclaim_stat->recent_rotated[file]++;
                        }
                        if (!pagevec_add(&pvec, page)) {
@@ -1158,10 +1218,13 @@ static unsigned long shrink_inactive_list(unsigned long max_scan,
                                spin_lock_irq(&zone->lru_lock);
                        }
                }
+               __mod_zone_page_state(zone, NR_ISOLATED_ANON, -nr_anon);
+               __mod_zone_page_state(zone, NR_ISOLATED_FILE, -nr_file);
+
        } while (nr_scanned < max_scan);
-       spin_unlock(&zone->lru_lock);
+
 done:
-       local_irq_enable();
+       spin_unlock_irq(&zone->lru_lock);
        pagevec_release(&pvec);
        return nr_reclaimed;
 }
@@ -1210,15 +1273,10 @@ static void move_active_pages_to_lru(struct zone *zone,
 
        while (!list_empty(list)) {
                page = lru_to_page(list);
-               prefetchw_prev_lru_page(page, list, flags);
 
                VM_BUG_ON(PageLRU(page));
                SetPageLRU(page);
 
-               VM_BUG_ON(!PageActive(page));
-               if (!is_active_lru(lru))
-                       ClearPageActive(page);  /* we are de-activating */
-
                list_move(&page->lru, &zone->lru[lru].list);
                mem_cgroup_add_lru_list(page, lru);
                pgmoved++;
@@ -1239,7 +1297,7 @@ static void move_active_pages_to_lru(struct zone *zone,
 static void shrink_active_list(unsigned long nr_pages, struct zone *zone,
                        struct scan_control *sc, int priority, int file)
 {
-       unsigned long pgmoved;
+       unsigned long nr_taken;
        unsigned long pgscanned;
        unsigned long vm_flags;
        LIST_HEAD(l_hold);      /* The pages which were snipped off */
@@ -1247,10 +1305,11 @@ static void shrink_active_list(unsigned long nr_pages, struct zone *zone,
        LIST_HEAD(l_inactive);
        struct page *page;
        struct zone_reclaim_stat *reclaim_stat = get_reclaim_stat(zone, sc);
+       unsigned long nr_rotated = 0;
 
        lru_add_drain();
        spin_lock_irq(&zone->lru_lock);
-       pgmoved = sc->isolate_pages(nr_pages, &l_hold, &pgscanned, sc->order,
+       nr_taken = sc->isolate_pages(nr_pages, &l_hold, &pgscanned, sc->order,
                                        ISOLATE_ACTIVE, zone,
                                        sc->mem_cgroup, 1, file);
        /*
@@ -1260,16 +1319,16 @@ static void shrink_active_list(unsigned long nr_pages, struct zone *zone,
        if (scanning_global_lru(sc)) {
                zone->pages_scanned += pgscanned;
        }
-       reclaim_stat->recent_scanned[!!file] += pgmoved;
+       reclaim_stat->recent_scanned[!!file] += nr_taken;
 
        __count_zone_vm_events(PGREFILL, zone, pgscanned);
        if (file)
-               __mod_zone_page_state(zone, NR_ACTIVE_FILE, -pgmoved);
+               __mod_zone_page_state(zone, NR_ACTIVE_FILE, -nr_taken);
        else
-               __mod_zone_page_state(zone, NR_ACTIVE_ANON, -pgmoved);
+               __mod_zone_page_state(zone, NR_ACTIVE_ANON, -nr_taken);
+       __mod_zone_page_state(zone, NR_ISOLATED_ANON + file, nr_taken);
        spin_unlock_irq(&zone->lru_lock);
 
-       pgmoved = 0;  /* count referenced (mapping) mapped pages */
        while (!list_empty(&l_hold)) {
                cond_resched();
                page = lru_to_page(&l_hold);
@@ -1283,7 +1342,7 @@ static void shrink_active_list(unsigned long nr_pages, struct zone *zone,
                /* page_referenced clears PageReferenced */
                if (page_mapping_inuse(page) &&
                    page_referenced(page, 0, sc->mem_cgroup, &vm_flags)) {
-                       pgmoved++;
+                       nr_rotated++;
                        /*
                         * Identify referenced, file-backed active pages and
                         * give them one more trip around the active list. So
@@ -1299,6 +1358,7 @@ static void shrink_active_list(unsigned long nr_pages, struct zone *zone,
                        }
                }
 
+               ClearPageActive(page);  /* we are de-activating */
                list_add(&page->lru, &l_inactive);
        }
 
@@ -1312,13 +1372,13 @@ static void shrink_active_list(unsigned long nr_pages, struct zone *zone,
         * helps balance scan pressure between file and anonymous pages in
         * get_scan_ratio.
         */
-       reclaim_stat->recent_rotated[!!file] += pgmoved;
+       reclaim_stat->recent_rotated[!!file] += nr_rotated;
 
        move_active_pages_to_lru(zone, &l_active,
                                                LRU_ACTIVE + file * LRU_FILE);
        move_active_pages_to_lru(zone, &l_inactive,
                                                LRU_BASE   + file * LRU_FILE);
-
+       __mod_zone_page_state(zone, NR_ISOLATED_ANON + file, -nr_taken);
        spin_unlock_irq(&zone->lru_lock);
 }
 
@@ -1680,7 +1740,7 @@ static unsigned long do_try_to_free_pages(struct zonelist *zonelist,
                        if (!cpuset_zone_allowed_hardwall(zone, GFP_KERNEL))
                                continue;
 
-                       lru_pages += zone_lru_pages(zone);
+                       lru_pages += zone_reclaimable_pages(zone);
                }
        }
 
@@ -1715,13 +1775,13 @@ static unsigned long do_try_to_free_pages(struct zonelist *zonelist,
                 */
                if (total_scanned > sc->swap_cluster_max +
                                        sc->swap_cluster_max / 2) {
-                       wakeup_pdflush(laptop_mode ? 0 : total_scanned);
+                       wakeup_flusher_threads(laptop_mode ? 0 : total_scanned);
                        sc->may_writepage = 1;
                }
 
                /* Take a nap, wait for some writeback to complete */
                if (sc->nr_scanned && priority < DEF_PRIORITY - 2)
-                       congestion_wait(WRITE, HZ/10);
+                       congestion_wait(BLK_RW_ASYNC, HZ/10);
        }
        /* top priority shrink_zones still had more to do? don't OOM, then */
        if (!sc->all_unreclaimable && scanning_global_lru(sc))
@@ -1897,7 +1957,7 @@ loop_again:
                for (i = 0; i <= end_zone; i++) {
                        struct zone *zone = pgdat->node_zones + i;
 
-                       lru_pages += zone_lru_pages(zone);
+                       lru_pages += zone_reclaimable_pages(zone);
                }
 
                /*
@@ -1941,7 +2001,7 @@ loop_again:
                        if (zone_is_all_unreclaimable(zone))
                                continue;
                        if (nr_slab == 0 && zone->pages_scanned >=
-                                               (zone_lru_pages(zone) * 6))
+                                       (zone_reclaimable_pages(zone) * 6))
                                        zone_set_flag(zone,
                                                      ZONE_ALL_UNRECLAIMABLE);
                        /*
@@ -1960,7 +2020,7 @@ loop_again:
                 * another pass across the zones.
                 */
                if (total_scanned && priority < DEF_PRIORITY - 2)
-                       congestion_wait(WRITE, HZ/10);
+                       congestion_wait(BLK_RW_ASYNC, HZ/10);
 
                /*
                 * We do this so kswapd doesn't build up large priorities for
@@ -2108,12 +2168,39 @@ void wakeup_kswapd(struct zone *zone, int order)
        wake_up_interruptible(&pgdat->kswapd_wait);
 }
 
-unsigned long global_lru_pages(void)
+/*
+ * The reclaimable count would be mostly accurate.
+ * The less reclaimable pages may be
+ * - mlocked pages, which will be moved to unevictable list when encountered
+ * - mapped pages, which may require several travels to be reclaimed
+ * - dirty pages, which is not "instantly" reclaimable
+ */
+unsigned long global_reclaimable_pages(void)
+{
+       int nr;
+
+       nr = global_page_state(NR_ACTIVE_FILE) +
+            global_page_state(NR_INACTIVE_FILE);
+
+       if (nr_swap_pages > 0)
+               nr += global_page_state(NR_ACTIVE_ANON) +
+                     global_page_state(NR_INACTIVE_ANON);
+
+       return nr;
+}
+
+unsigned long zone_reclaimable_pages(struct zone *zone)
 {
-       return global_page_state(NR_ACTIVE_ANON)
-               + global_page_state(NR_ACTIVE_FILE)
-               + global_page_state(NR_INACTIVE_ANON)
-               + global_page_state(NR_INACTIVE_FILE);
+       int nr;
+
+       nr = zone_page_state(zone, NR_ACTIVE_FILE) +
+            zone_page_state(zone, NR_INACTIVE_FILE);
+
+       if (nr_swap_pages > 0)
+               nr += zone_page_state(zone, NR_ACTIVE_ANON) +
+                     zone_page_state(zone, NR_INACTIVE_ANON);
+
+       return nr;
 }
 
 #ifdef CONFIG_HIBERNATION
@@ -2185,7 +2272,7 @@ unsigned long shrink_all_memory(unsigned long nr_pages)
 
        current->reclaim_state = &reclaim_state;
 
-       lru_pages = global_lru_pages();
+       lru_pages = global_reclaimable_pages();
        nr_slab = global_page_state(NR_SLAB_RECLAIMABLE);
        /* If slab caches are huge, it's better to hit them first */
        while (nr_slab >= lru_pages) {
@@ -2227,13 +2314,13 @@ unsigned long shrink_all_memory(unsigned long nr_pages)
 
                        reclaim_state.reclaimed_slab = 0;
                        shrink_slab(sc.nr_scanned, sc.gfp_mask,
-                                       global_lru_pages());
+                                   global_reclaimable_pages());
                        sc.nr_reclaimed += reclaim_state.reclaimed_slab;
                        if (sc.nr_reclaimed >= nr_pages)
                                goto out;
 
                        if (sc.nr_scanned && prio < DEF_PRIORITY - 2)
-                               congestion_wait(WRITE, HZ / 10);
+                               congestion_wait(BLK_RW_ASYNC, HZ / 10);
                }
        }
 
@@ -2244,7 +2331,8 @@ unsigned long shrink_all_memory(unsigned long nr_pages)
        if (!sc.nr_reclaimed) {
                do {
                        reclaim_state.reclaimed_slab = 0;
-                       shrink_slab(nr_pages, sc.gfp_mask, global_lru_pages());
+                       shrink_slab(nr_pages, sc.gfp_mask,
+                                   global_reclaimable_pages());
                        sc.nr_reclaimed += reclaim_state.reclaimed_slab;
                } while (sc.nr_reclaimed < nr_pages &&
                                reclaim_state.reclaimed_slab > 0);