intel-agp: fixup resource handling in flush code.
[safe/jmp/linux-2.6] / mm / vmscan.c
index 87779dd..e5a9597 100644 (file)
@@ -19,6 +19,7 @@
 #include <linux/pagemap.h>
 #include <linux/init.h>
 #include <linux/highmem.h>
+#include <linux/vmstat.h>
 #include <linux/file.h>
 #include <linux/writeback.h>
 #include <linux/blkdev.h>
@@ -35,6 +36,7 @@
 #include <linux/rwsem.h>
 #include <linux/delay.h>
 #include <linux/kthread.h>
+#include <linux/freezer.h>
 
 #include <asm/tlbflush.h>
 #include <asm/div64.h>
@@ -64,17 +66,8 @@ struct scan_control {
        int swappiness;
 
        int all_unreclaimable;
-};
 
-/*
- * The list of shrinker callbacks used by to apply pressure to
- * ageable caches.
- */
-struct shrinker {
-       shrinker_t              shrinker;
-       struct list_head        list;
-       int                     seeks;  /* seeks to recreate an obj */
-       long                    nr;     /* objs pending delete */
+       int order;
 };
 
 #define lru_to_page(_head) (list_entry((_head)->prev, struct page, lru))
@@ -119,34 +112,25 @@ static DECLARE_RWSEM(shrinker_rwsem);
 /*
  * Add a shrinker callback to be called from the vm
  */
-struct shrinker *set_shrinker(int seeks, shrinker_t theshrinker)
+void register_shrinker(struct shrinker *shrinker)
 {
-        struct shrinker *shrinker;
-
-        shrinker = kmalloc(sizeof(*shrinker), GFP_KERNEL);
-        if (shrinker) {
-               shrinker->shrinker = theshrinker;
-               shrinker->seeks = seeks;
-               shrinker->nr = 0;
-               down_write(&shrinker_rwsem);
-               list_add_tail(&shrinker->list, &shrinker_list);
-               up_write(&shrinker_rwsem);
-       }
-       return shrinker;
+       shrinker->nr = 0;
+       down_write(&shrinker_rwsem);
+       list_add_tail(&shrinker->list, &shrinker_list);
+       up_write(&shrinker_rwsem);
 }
-EXPORT_SYMBOL(set_shrinker);
+EXPORT_SYMBOL(register_shrinker);
 
 /*
  * Remove one
  */
-void remove_shrinker(struct shrinker *shrinker)
+void unregister_shrinker(struct shrinker *shrinker)
 {
        down_write(&shrinker_rwsem);
        list_del(&shrinker->list);
        up_write(&shrinker_rwsem);
-       kfree(shrinker);
 }
-EXPORT_SYMBOL(remove_shrinker);
+EXPORT_SYMBOL(unregister_shrinker);
 
 #define SHRINK_BATCH 128
 /*
@@ -157,7 +141,7 @@ EXPORT_SYMBOL(remove_shrinker);
  * percentages of the lru and ageable caches.  This should balance the seeks
  * generated by these structures.
  *
- * If the vm encounted mapped pages on the LRU it increase the pressure on
+ * If the vm encountered mapped pages on the LRU it increase the pressure on
  * slab to avoid swapping.
  *
  * We do weird things to avoid (scanned*seeks*entries) overflowing 32 bits.
@@ -183,7 +167,7 @@ unsigned long shrink_slab(unsigned long scanned, gfp_t gfp_mask,
        list_for_each_entry(shrinker, &shrinker_list, list) {
                unsigned long long delta;
                unsigned long total_scan;
-               unsigned long max_pass = (*shrinker->shrinker)(0, gfp_mask);
+               unsigned long max_pass = (*shrinker->shrink)(0, gfp_mask);
 
                delta = (4 * scanned) / shrinker->seeks;
                delta *= max_pass;
@@ -211,8 +195,8 @@ unsigned long shrink_slab(unsigned long scanned, gfp_t gfp_mask,
                        int shrink_ret;
                        int nr_before;
 
-                       nr_before = (*shrinker->shrinker)(0, gfp_mask);
-                       shrink_ret = (*shrinker->shrinker)(this_scan, gfp_mask);
+                       nr_before = (*shrinker->shrink)(0, gfp_mask);
+                       shrink_ret = (*shrinker->shrink)(this_scan, gfp_mask);
                        if (shrink_ret == -1)
                                break;
                        if (shrink_ret < nr_before)
@@ -282,15 +266,17 @@ static void handle_write_error(struct address_space *mapping,
                                struct page *page, int error)
 {
        lock_page(page);
-       if (page_mapping(page) == mapping) {
-               if (error == -ENOSPC)
-                       set_bit(AS_ENOSPC, &mapping->flags);
-               else
-                       set_bit(AS_EIO, &mapping->flags);
-       }
+       if (page_mapping(page) == mapping)
+               mapping_set_error(mapping, error);
        unlock_page(page);
 }
 
+/* Request for sync pageout. */
+enum pageout_io {
+       PAGEOUT_IO_ASYNC,
+       PAGEOUT_IO_SYNC,
+};
+
 /* possible outcome of pageout() */
 typedef enum {
        /* failed to write page out, page is locked */
@@ -307,7 +293,8 @@ typedef enum {
  * pageout is called by shrink_page_list() for each dirty page.
  * Calls ->writepage().
  */
-static pageout_t pageout(struct page *page, struct address_space *mapping)
+static pageout_t pageout(struct page *page, struct address_space *mapping,
+                                               enum pageout_io sync_writeback)
 {
        /*
         * If the page is dirty, only perform writeback if that write
@@ -366,28 +353,62 @@ static pageout_t pageout(struct page *page, struct address_space *mapping)
                        ClearPageReclaim(page);
                        return PAGE_ACTIVATE;
                }
+
+               /*
+                * Wait on writeback if requested to. This happens when
+                * direct reclaiming a large contiguous area and the
+                * first attempt to free a range of pages fails.
+                */
+               if (PageWriteback(page) && sync_writeback == PAGEOUT_IO_SYNC)
+                       wait_on_page_writeback(page);
+
                if (!PageWriteback(page)) {
                        /* synchronous write or broken a_ops? */
                        ClearPageReclaim(page);
                }
-
+               inc_zone_page_state(page, NR_VMSCAN_WRITE);
                return PAGE_SUCCESS;
        }
 
        return PAGE_CLEAN;
 }
 
+/*
+ * Attempt to detach a locked page from its ->mapping.  If it is dirty or if
+ * someone else has a ref on the page, abort and return 0.  If it was
+ * successfully detached, return 1.  Assumes the caller has a single ref on
+ * this page.
+ */
 int remove_mapping(struct address_space *mapping, struct page *page)
 {
        BUG_ON(!PageLocked(page));
        BUG_ON(mapping != page_mapping(page));
 
        write_lock_irq(&mapping->tree_lock);
-
        /*
-        * The non-racy check for busy page.  It is critical to check
-        * PageDirty _after_ making sure that the page is freeable and
-        * not in use by anybody.       (pagecache + us == 2)
+        * The non racy check for a busy page.
+        *
+        * Must be careful with the order of the tests. When someone has
+        * a ref to the page, it may be possible that they dirty it then
+        * drop the reference. So if PageDirty is tested before page_count
+        * here, then the following race may occur:
+        *
+        * get_user_pages(&page);
+        * [user mapping goes away]
+        * write_to(page);
+        *                              !PageDirty(page)    [good]
+        * SetPageDirty(page);
+        * put_page(page);
+        *                              !page_count(page)   [good, discard it]
+        *
+        * [oops, our write_to data is lost]
+        *
+        * Reversing the order of the tests ensures such a situation cannot
+        * escape unnoticed. The smp_rmb is needed to ensure the page->flags
+        * load is not satisfied before that of page->_count.
+        *
+        * Note that if SetPageDirty is always performed via set_page_dirty,
+        * and thus under tree_lock, then this ordering is not required.
         */
        if (unlikely(page_count(page) != 2))
                goto cannot_free;
@@ -418,7 +439,8 @@ cannot_free:
  * shrink_page_list() returns the number of reclaimed pages
  */
 static unsigned long shrink_page_list(struct list_head *page_list,
-                                       struct scan_control *sc)
+                                       struct scan_control *sc,
+                                       enum pageout_io sync_writeback)
 {
        LIST_HEAD(ret_pages);
        struct pagevec freed_pvec;
@@ -453,12 +475,28 @@ static unsigned long shrink_page_list(struct list_head *page_list,
                if (page_mapped(page) || PageSwapCache(page))
                        sc->nr_scanned++;
 
-               if (PageWriteback(page))
-                       goto keep_locked;
+               may_enter_fs = (sc->gfp_mask & __GFP_FS) ||
+                       (PageSwapCache(page) && (sc->gfp_mask & __GFP_IO));
+
+               if (PageWriteback(page)) {
+                       /*
+                        * Synchronous reclaim is performed in two passes,
+                        * first an asynchronous pass over the list to
+                        * start parallel writeback, and a second synchronous
+                        * pass to wait for the IO to complete.  Wait here
+                        * for any page for which writeback has already
+                        * started.
+                        */
+                       if (sync_writeback == PAGEOUT_IO_SYNC && may_enter_fs)
+                               wait_on_page_writeback(page);
+                       else
+                               goto keep_locked;
+               }
 
                referenced = page_referenced(page, 1);
                /* In active use or really unfreeable?  Activate it. */
-               if (referenced && page_mapping_inuse(page))
+               if (sc->order <= PAGE_ALLOC_COSTLY_ORDER &&
+                                       referenced && page_mapping_inuse(page))
                        goto activate_locked;
 
 #ifdef CONFIG_SWAP
@@ -472,8 +510,6 @@ static unsigned long shrink_page_list(struct list_head *page_list,
 #endif /* CONFIG_SWAP */
 
                mapping = page_mapping(page);
-               may_enter_fs = (sc->gfp_mask & __GFP_FS) ||
-                       (PageSwapCache(page) && (sc->gfp_mask & __GFP_IO));
 
                /*
                 * The page is mapped into the page tables of one or more
@@ -491,7 +527,7 @@ static unsigned long shrink_page_list(struct list_head *page_list,
                }
 
                if (PageDirty(page)) {
-                       if (referenced)
+                       if (sc->order <= PAGE_ALLOC_COSTLY_ORDER && referenced)
                                goto keep_locked;
                        if (!may_enter_fs)
                                goto keep_locked;
@@ -499,7 +535,7 @@ static unsigned long shrink_page_list(struct list_head *page_list,
                                goto keep_locked;
 
                        /* Page is dirty, try to write it out here */
-                       switch(pageout(page, mapping)) {
+                       switch (pageout(page, mapping, sync_writeback)) {
                        case PAGE_KEEP:
                                goto keep_locked;
                        case PAGE_ACTIVATE:
@@ -575,6 +611,51 @@ keep:
        return nr_reclaimed;
 }
 
+/* LRU Isolation modes. */
+#define ISOLATE_INACTIVE 0     /* Isolate inactive pages. */
+#define ISOLATE_ACTIVE 1       /* Isolate active pages. */
+#define ISOLATE_BOTH 2         /* Isolate both active and inactive pages. */
+
+/*
+ * Attempt to remove the specified page from its LRU.  Only take this page
+ * if it is of the appropriate PageActive status.  Pages which are being
+ * freed elsewhere are also ignored.
+ *
+ * page:       page to consider
+ * mode:       one of the LRU isolation modes defined above
+ *
+ * returns 0 on success, -ve errno on failure.
+ */
+static int __isolate_lru_page(struct page *page, int mode)
+{
+       int ret = -EINVAL;
+
+       /* Only take pages on the LRU. */
+       if (!PageLRU(page))
+               return ret;
+
+       /*
+        * When checking the active state, we need to be sure we are
+        * dealing with comparible boolean values.  Take the logical not
+        * of each.
+        */
+       if (mode != ISOLATE_BOTH && (!PageActive(page) != !mode))
+               return ret;
+
+       ret = -EBUSY;
+       if (likely(get_page_unless_zero(page))) {
+               /*
+                * Be careful not to clear PageLRU until after we're
+                * sure the page is not being freed elsewhere -- the
+                * page release code relies on it.
+                */
+               ClearPageLRU(page);
+               ret = 0;
+       }
+
+       return ret;
+}
+
 /*
  * zone->lru_lock is heavily contended.  Some of the functions that
  * shrink the lists perform better by taking out a batch of pages
@@ -589,38 +670,90 @@ keep:
  * @src:       The LRU list to pull pages off.
  * @dst:       The temp list to put pages on to.
  * @scanned:   The number of pages that were scanned.
+ * @order:     The caller's attempted allocation order
+ * @mode:      One of the LRU isolation modes
  *
  * returns how many pages were moved onto *@dst.
  */
 static unsigned long isolate_lru_pages(unsigned long nr_to_scan,
                struct list_head *src, struct list_head *dst,
-               unsigned long *scanned)
+               unsigned long *scanned, int order, int mode)
 {
        unsigned long nr_taken = 0;
-       struct page *page;
        unsigned long scan;
 
        for (scan = 0; scan < nr_to_scan && !list_empty(src); scan++) {
-               struct list_head *target;
+               struct page *page;
+               unsigned long pfn;
+               unsigned long end_pfn;
+               unsigned long page_pfn;
+               int zone_id;
+
                page = lru_to_page(src);
                prefetchw_prev_lru_page(page, src, flags);
 
                VM_BUG_ON(!PageLRU(page));
 
-               list_del(&page->lru);
-               target = src;
-               if (likely(get_page_unless_zero(page))) {
-                       /*
-                        * Be careful not to clear PageLRU until after we're
-                        * sure the page is not being freed elsewhere -- the
-                        * page release code relies on it.
-                        */
-                       ClearPageLRU(page);
-                       target = dst;
+               switch (__isolate_lru_page(page, mode)) {
+               case 0:
+                       list_move(&page->lru, dst);
                        nr_taken++;
-               } /* else it is being freed elsewhere */
+                       break;
 
-               list_add(&page->lru, target);
+               case -EBUSY:
+                       /* else it is being freed elsewhere */
+                       list_move(&page->lru, src);
+                       continue;
+
+               default:
+                       BUG();
+               }
+
+               if (!order)
+                       continue;
+
+               /*
+                * Attempt to take all pages in the order aligned region
+                * surrounding the tag page.  Only take those pages of
+                * the same active state as that tag page.  We may safely
+                * round the target page pfn down to the requested order
+                * as the mem_map is guarenteed valid out to MAX_ORDER,
+                * where that page is in a different zone we will detect
+                * it from its zone id and abort this block scan.
+                */
+               zone_id = page_zone_id(page);
+               page_pfn = page_to_pfn(page);
+               pfn = page_pfn & ~((1 << order) - 1);
+               end_pfn = pfn + (1 << order);
+               for (; pfn < end_pfn; pfn++) {
+                       struct page *cursor_page;
+
+                       /* The target page is in the block, ignore it. */
+                       if (unlikely(pfn == page_pfn))
+                               continue;
+
+                       /* Avoid holes within the zone. */
+                       if (unlikely(!pfn_valid_within(pfn)))
+                               break;
+
+                       cursor_page = pfn_to_page(pfn);
+                       /* Check that we have not crossed a zone boundary. */
+                       if (unlikely(page_zone_id(cursor_page) != zone_id))
+                               continue;
+                       switch (__isolate_lru_page(cursor_page, mode)) {
+                       case 0:
+                               list_move(&cursor_page->lru, dst);
+                               nr_taken++;
+                               scan++;
+                               break;
+
+                       case -EBUSY:
+                               /* else it is being freed elsewhere */
+                               list_move(&cursor_page->lru, src);
+                       default:
+                               break;
+                       }
+               }
        }
 
        *scanned = scan;
@@ -628,6 +761,24 @@ static unsigned long isolate_lru_pages(unsigned long nr_to_scan,
 }
 
 /*
+ * clear_active_flags() is a helper for shrink_active_list(), clearing
+ * any active bits from the pages in the list.
+ */
+static unsigned long clear_active_flags(struct list_head *page_list)
+{
+       int nr_active = 0;
+       struct page *page;
+
+       list_for_each_entry(page, page_list, lru)
+               if (PageActive(page)) {
+                       ClearPageActive(page);
+                       nr_active++;
+               }
+
+       return nr_active;
+}
+
+/*
  * shrink_inactive_list() is a helper for shrink_zone().  It returns the number
  * of reclaimed pages
  */
@@ -648,16 +799,46 @@ static unsigned long shrink_inactive_list(unsigned long max_scan,
                unsigned long nr_taken;
                unsigned long nr_scan;
                unsigned long nr_freed;
+               unsigned long nr_active;
 
                nr_taken = isolate_lru_pages(sc->swap_cluster_max,
-                                            &zone->inactive_list,
-                                            &page_list, &nr_scan);
-               zone->nr_inactive -= nr_taken;
+                            &zone->inactive_list,
+                            &page_list, &nr_scan, sc->order,
+                            (sc->order > PAGE_ALLOC_COSTLY_ORDER)?
+                                            ISOLATE_BOTH : ISOLATE_INACTIVE);
+               nr_active = clear_active_flags(&page_list);
+               __count_vm_events(PGDEACTIVATE, nr_active);
+
+               __mod_zone_page_state(zone, NR_ACTIVE, -nr_active);
+               __mod_zone_page_state(zone, NR_INACTIVE,
+                                               -(nr_taken - nr_active));
                zone->pages_scanned += nr_scan;
                spin_unlock_irq(&zone->lru_lock);
 
                nr_scanned += nr_scan;
-               nr_freed = shrink_page_list(&page_list, sc);
+               nr_freed = shrink_page_list(&page_list, sc, PAGEOUT_IO_ASYNC);
+
+               /*
+                * If we are direct reclaiming for contiguous pages and we do
+                * not reclaim everything in the list, try again and wait
+                * for IO to complete. This will stall high-order allocations
+                * but that should be acceptable to the caller
+                */
+               if (nr_freed < nr_taken && !current_is_kswapd() &&
+                                       sc->order > PAGE_ALLOC_COSTLY_ORDER) {
+                       congestion_wait(WRITE, HZ/10);
+
+                       /*
+                        * The attempt at page out may have made some
+                        * of the pages active, mark them inactive again.
+                        */
+                       nr_active = clear_active_flags(&page_list);
+                       count_vm_events(PGDEACTIVATE, nr_active);
+
+                       nr_freed += shrink_page_list(&page_list, sc,
+                                                       PAGEOUT_IO_SYNC);
+               }
+
                nr_reclaimed += nr_freed;
                local_irq_disable();
                if (current_is_kswapd()) {
@@ -665,7 +846,7 @@ static unsigned long shrink_inactive_list(unsigned long max_scan,
                        __count_vm_events(KSWAPD_STEAL, nr_freed);
                } else
                        __count_zone_vm_events(PGSCAN_DIRECT, zone, nr_scan);
-               __count_vm_events(PGACTIVATE, nr_freed);
+               __count_zone_vm_events(PGSTEAL, zone, nr_freed);
 
                if (nr_taken == 0)
                        goto done;
@@ -697,9 +878,24 @@ done:
        return nr_reclaimed;
 }
 
+/*
+ * We are about to scan this zone at a certain priority level.  If that priority
+ * level is smaller (ie: more urgent) than the previous priority, then note
+ * that priority level within the zone.  This is done so that when the next
+ * process comes in to scan this zone, it will immediately start out at this
+ * priority level rather than having to build up its own scanning priority.
+ * Here, this priority affects only the reclaim-mapped threshold.
+ */
+static inline void note_zone_scanning_priority(struct zone *zone, int priority)
+{
+       if (priority < zone->prev_priority)
+               zone->prev_priority = priority;
+}
+
 static inline int zone_is_near_oom(struct zone *zone)
 {
-       return zone->pages_scanned >= (zone->nr_active + zone->nr_inactive)*3;
+       return zone->pages_scanned >= (zone_page_state(zone, NR_ACTIVE)
+                               + zone_page_state(zone, NR_INACTIVE))*3;
 }
 
 /*
@@ -720,7 +916,7 @@ static inline int zone_is_near_oom(struct zone *zone)
  * But we had to alter page->flags anyway.
  */
 static void shrink_active_list(unsigned long nr_pages, struct zone *zone,
-                               struct scan_control *sc)
+                               struct scan_control *sc, int priority)
 {
        unsigned long pgmoved;
        int pgdeactivate = 0;
@@ -736,6 +932,7 @@ static void shrink_active_list(unsigned long nr_pages, struct zone *zone,
                long mapped_ratio;
                long distress;
                long swap_tendency;
+               long imbalance;
 
                if (zone_is_near_oom(zone))
                        goto force_reclaim_mapped;
@@ -744,7 +941,7 @@ static void shrink_active_list(unsigned long nr_pages, struct zone *zone,
                 * `distress' is a measure of how much trouble we're having
                 * reclaiming pages.  0 -> no problems.  100 -> great trouble.
                 */
-               distress = 100 >> zone->prev_priority;
+               distress = 100 >> min(zone->prev_priority, priority);
 
                /*
                 * The point of this algorithm is to decide when to start
@@ -771,6 +968,46 @@ static void shrink_active_list(unsigned long nr_pages, struct zone *zone,
                swap_tendency = mapped_ratio / 2 + distress + sc->swappiness;
 
                /*
+                * If there's huge imbalance between active and inactive
+                * (think active 100 times larger than inactive) we should
+                * become more permissive, or the system will take too much
+                * cpu before it start swapping during memory pressure.
+                * Distress is about avoiding early-oom, this is about
+                * making swappiness graceful despite setting it to low
+                * values.
+                *
+                * Avoid div by zero with nr_inactive+1, and max resulting
+                * value is vm_total_pages.
+                */
+               imbalance  = zone_page_state(zone, NR_ACTIVE);
+               imbalance /= zone_page_state(zone, NR_INACTIVE) + 1;
+
+               /*
+                * Reduce the effect of imbalance if swappiness is low,
+                * this means for a swappiness very low, the imbalance
+                * must be much higher than 100 for this logic to make
+                * the difference.
+                *
+                * Max temporary value is vm_total_pages*100.
+                */
+               imbalance *= (vm_swappiness + 1);
+               imbalance /= 100;
+
+               /*
+                * If not much of the ram is mapped, makes the imbalance
+                * less relevant, it's high priority we refill the inactive
+                * list with mapped pages only in presence of high ratio of
+                * mapped pages.
+                *
+                * Max temporary value is vm_total_pages*100.
+                */
+               imbalance *= mapped_ratio;
+               imbalance /= 100;
+
+               /* apply imbalance feedback to swap_tendency */
+               swap_tendency += imbalance;
+
+               /*
                 * Now use this metric to decide whether to start moving mapped
                 * memory onto the inactive list.
                 */
@@ -782,9 +1019,9 @@ force_reclaim_mapped:
        lru_add_drain();
        spin_lock_irq(&zone->lru_lock);
        pgmoved = isolate_lru_pages(nr_pages, &zone->active_list,
-                                   &l_hold, &pgscanned);
+                           &l_hold, &pgscanned, sc->order, ISOLATE_ACTIVE);
        zone->pages_scanned += pgscanned;
-       zone->nr_active -= pgmoved;
+       __mod_zone_page_state(zone, NR_ACTIVE, -pgmoved);
        spin_unlock_irq(&zone->lru_lock);
 
        while (!list_empty(&l_hold)) {
@@ -816,7 +1053,7 @@ force_reclaim_mapped:
                list_move(&page->lru, &zone->inactive_list);
                pgmoved++;
                if (!pagevec_add(&pvec, page)) {
-                       zone->nr_inactive += pgmoved;
+                       __mod_zone_page_state(zone, NR_INACTIVE, pgmoved);
                        spin_unlock_irq(&zone->lru_lock);
                        pgdeactivate += pgmoved;
                        pgmoved = 0;
@@ -826,7 +1063,7 @@ force_reclaim_mapped:
                        spin_lock_irq(&zone->lru_lock);
                }
        }
-       zone->nr_inactive += pgmoved;
+       __mod_zone_page_state(zone, NR_INACTIVE, pgmoved);
        pgdeactivate += pgmoved;
        if (buffer_heads_over_limit) {
                spin_unlock_irq(&zone->lru_lock);
@@ -844,14 +1081,14 @@ force_reclaim_mapped:
                list_move(&page->lru, &zone->active_list);
                pgmoved++;
                if (!pagevec_add(&pvec, page)) {
-                       zone->nr_active += pgmoved;
+                       __mod_zone_page_state(zone, NR_ACTIVE, pgmoved);
                        pgmoved = 0;
                        spin_unlock_irq(&zone->lru_lock);
                        __pagevec_release(&pvec);
                        spin_lock_irq(&zone->lru_lock);
                }
        }
-       zone->nr_active += pgmoved;
+       __mod_zone_page_state(zone, NR_ACTIVE, pgmoved);
 
        __count_zone_vm_events(PGREFILL, zone, pgscanned);
        __count_vm_events(PGDEACTIVATE, pgdeactivate);
@@ -871,20 +1108,20 @@ static unsigned long shrink_zone(int priority, struct zone *zone,
        unsigned long nr_to_scan;
        unsigned long nr_reclaimed = 0;
 
-       atomic_inc(&zone->reclaim_in_progress);
-
        /*
         * Add one to `nr_to_scan' just to make sure that the kernel will
         * slowly sift through the active list.
         */
-       zone->nr_scan_active += (zone->nr_active >> priority) + 1;
+       zone->nr_scan_active +=
+               (zone_page_state(zone, NR_ACTIVE) >> priority) + 1;
        nr_active = zone->nr_scan_active;
        if (nr_active >= sc->swap_cluster_max)
                zone->nr_scan_active = 0;
        else
                nr_active = 0;
 
-       zone->nr_scan_inactive += (zone->nr_inactive >> priority) + 1;
+       zone->nr_scan_inactive +=
+               (zone_page_state(zone, NR_INACTIVE) >> priority) + 1;
        nr_inactive = zone->nr_scan_inactive;
        if (nr_inactive >= sc->swap_cluster_max)
                zone->nr_scan_inactive = 0;
@@ -896,7 +1133,7 @@ static unsigned long shrink_zone(int priority, struct zone *zone,
                        nr_to_scan = min(nr_active,
                                        (unsigned long)sc->swap_cluster_max);
                        nr_active -= nr_to_scan;
-                       shrink_active_list(nr_to_scan, zone, sc);
+                       shrink_active_list(nr_to_scan, zone, sc, priority);
                }
 
                if (nr_inactive) {
@@ -908,9 +1145,7 @@ static unsigned long shrink_zone(int priority, struct zone *zone,
                }
        }
 
-       throttle_vm_writeout();
-
-       atomic_dec(&zone->reclaim_in_progress);
+       throttle_vm_writeout(sc->gfp_mask);
        return nr_reclaimed;
 }
 
@@ -943,14 +1178,12 @@ static unsigned long shrink_zones(int priority, struct zone **zones,
                if (!populated_zone(zone))
                        continue;
 
-               if (!cpuset_zone_allowed(zone, __GFP_HARDWALL))
+               if (!cpuset_zone_allowed_hardwall(zone, GFP_KERNEL))
                        continue;
 
-               zone->temp_priority = priority;
-               if (zone->prev_priority > priority)
-                       zone->prev_priority = priority;
+               note_zone_scanning_priority(zone, priority);
 
-               if (zone->all_unreclaimable && priority != DEF_PRIORITY)
+               if (zone_is_all_unreclaimable(zone) && priority != DEF_PRIORITY)
                        continue;       /* Let kswapd poll it */
 
                sc->all_unreclaimable = 0;
@@ -973,7 +1206,7 @@ static unsigned long shrink_zones(int priority, struct zone **zones,
  * holds filesystem locks which prevent writeout this might not work, and the
  * allocation attempt will fail.
  */
-unsigned long try_to_free_pages(struct zone **zones, gfp_t gfp_mask)
+unsigned long try_to_free_pages(struct zone **zones, int order, gfp_t gfp_mask)
 {
        int priority;
        int ret = 0;
@@ -988,6 +1221,7 @@ unsigned long try_to_free_pages(struct zone **zones, gfp_t gfp_mask)
                .swap_cluster_max = SWAP_CLUSTER_MAX,
                .may_swap = 1,
                .swappiness = vm_swappiness,
+               .order = order,
        };
 
        count_vm_event(ALLOCSTALL);
@@ -995,11 +1229,11 @@ unsigned long try_to_free_pages(struct zone **zones, gfp_t gfp_mask)
        for (i = 0; zones[i] != NULL; i++) {
                struct zone *zone = zones[i];
 
-               if (!cpuset_zone_allowed(zone, __GFP_HARDWALL))
+               if (!cpuset_zone_allowed_hardwall(zone, GFP_KERNEL))
                        continue;
 
-               zone->temp_priority = DEF_PRIORITY;
-               lru_pages += zone->nr_active + zone->nr_inactive;
+               lru_pages += zone_page_state(zone, NR_ACTIVE)
+                               + zone_page_state(zone, NR_INACTIVE);
        }
 
        for (priority = DEF_PRIORITY; priority >= 0; priority--) {
@@ -1033,19 +1267,28 @@ unsigned long try_to_free_pages(struct zone **zones, gfp_t gfp_mask)
 
                /* Take a nap, wait for some writeback to complete */
                if (sc.nr_scanned && priority < DEF_PRIORITY - 2)
-                       blk_congestion_wait(WRITE, HZ/10);
+                       congestion_wait(WRITE, HZ/10);
        }
        /* top priority shrink_caches still had more to do? don't OOM, then */
        if (!sc.all_unreclaimable)
                ret = 1;
 out:
-       for (i = 0; zones[i] != 0; i++) {
+       /*
+        * Now that we've scanned all the zones at this priority level, note
+        * that level within the zone so that the next thread which performs
+        * scanning of this zone will immediately start out at this priority
+        * level.  This affects only the decision whether or not to bring
+        * mapped pages onto the inactive list.
+        */
+       if (priority < 0)
+               priority = 0;
+       for (i = 0; zones[i] != NULL; i++) {
                struct zone *zone = zones[i];
 
-               if (!cpuset_zone_allowed(zone, __GFP_HARDWALL))
+               if (!cpuset_zone_allowed_hardwall(zone, GFP_KERNEL))
                        continue;
 
-               zone->prev_priority = zone->temp_priority;
+               zone->prev_priority = priority;
        }
        return ret;
 }
@@ -1084,7 +1327,13 @@ static unsigned long balance_pgdat(pg_data_t *pgdat, int order)
                .may_swap = 1,
                .swap_cluster_max = SWAP_CLUSTER_MAX,
                .swappiness = vm_swappiness,
+               .order = order,
        };
+       /*
+        * temp_priority is used to remember the scanning priority at which
+        * this zone was successfully refilled to free_pages == pages_high.
+        */
+       int temp_priority[MAX_NR_ZONES];
 
 loop_again:
        total_scanned = 0;
@@ -1092,11 +1341,8 @@ loop_again:
        sc.may_writepage = !laptop_mode;
        count_vm_event(PAGEOUTRUN);
 
-       for (i = 0; i < pgdat->nr_zones; i++) {
-               struct zone *zone = pgdat->node_zones + i;
-
-               zone->temp_priority = DEF_PRIORITY;
-       }
+       for (i = 0; i < pgdat->nr_zones; i++)
+               temp_priority[i] = DEF_PRIORITY;
 
        for (priority = DEF_PRIORITY; priority >= 0; priority--) {
                int end_zone = 0;       /* Inclusive.  0 = ZONE_DMA */
@@ -1118,21 +1364,24 @@ loop_again:
                        if (!populated_zone(zone))
                                continue;
 
-                       if (zone->all_unreclaimable && priority != DEF_PRIORITY)
+                       if (zone_is_all_unreclaimable(zone) &&
+                           priority != DEF_PRIORITY)
                                continue;
 
                        if (!zone_watermark_ok(zone, order, zone->pages_high,
                                               0, 0)) {
                                end_zone = i;
-                               goto scan;
+                               break;
                        }
                }
-               goto out;
-scan:
+               if (i < 0)
+                       goto out;
+
                for (i = 0; i <= end_zone; i++) {
                        struct zone *zone = pgdat->node_zones + i;
 
-                       lru_pages += zone->nr_active + zone->nr_inactive;
+                       lru_pages += zone_page_state(zone, NR_ACTIVE)
+                                       + zone_page_state(zone, NR_INACTIVE);
                }
 
                /*
@@ -1151,27 +1400,35 @@ scan:
                        if (!populated_zone(zone))
                                continue;
 
-                       if (zone->all_unreclaimable && priority != DEF_PRIORITY)
+                       if (zone_is_all_unreclaimable(zone) &&
+                                       priority != DEF_PRIORITY)
                                continue;
 
                        if (!zone_watermark_ok(zone, order, zone->pages_high,
                                               end_zone, 0))
                                all_zones_ok = 0;
-                       zone->temp_priority = priority;
-                       if (zone->prev_priority > priority)
-                               zone->prev_priority = priority;
+                       temp_priority[i] = priority;
                        sc.nr_scanned = 0;
-                       nr_reclaimed += shrink_zone(priority, zone, &sc);
+                       note_zone_scanning_priority(zone, priority);
+                       /*
+                        * We put equal pressure on every zone, unless one
+                        * zone has way too many pages free already.
+                        */
+                       if (!zone_watermark_ok(zone, order, 8*zone->pages_high,
+                                               end_zone, 0))
+                               nr_reclaimed += shrink_zone(priority, zone, &sc);
                        reclaim_state->reclaimed_slab = 0;
                        nr_slab = shrink_slab(sc.nr_scanned, GFP_KERNEL,
                                                lru_pages);
                        nr_reclaimed += reclaim_state->reclaimed_slab;
                        total_scanned += sc.nr_scanned;
-                       if (zone->all_unreclaimable)
+                       if (zone_is_all_unreclaimable(zone))
                                continue;
                        if (nr_slab == 0 && zone->pages_scanned >=
-                                   (zone->nr_active + zone->nr_inactive) * 6)
-                               zone->all_unreclaimable = 1;
+                               (zone_page_state(zone, NR_ACTIVE)
+                               + zone_page_state(zone, NR_INACTIVE)) * 6)
+                                       zone_set_flag(zone,
+                                                     ZONE_ALL_UNRECLAIMABLE);
                        /*
                         * If we've done a decent amount of scanning and
                         * the reclaim ratio is low, start doing writepage
@@ -1188,7 +1445,7 @@ scan:
                 * another pass across the zones.
                 */
                if (total_scanned && priority < DEF_PRIORITY - 2)
-                       blk_congestion_wait(WRITE, HZ/10);
+                       congestion_wait(WRITE, HZ/10);
 
                /*
                 * We do this so kswapd doesn't build up large priorities for
@@ -1200,13 +1457,21 @@ scan:
                        break;
        }
 out:
+       /*
+        * Note within each zone the priority level at which this zone was
+        * brought into a happy state.  So that the next thread which scans this
+        * zone will start out at that priority level.
+        */
        for (i = 0; i < pgdat->nr_zones; i++) {
                struct zone *zone = pgdat->node_zones + i;
 
-               zone->prev_priority = zone->temp_priority;
+               zone->prev_priority = temp_priority[i];
        }
        if (!all_zones_ok) {
                cond_resched();
+
+               try_to_freeze();
+
                goto loop_again;
        }
 
@@ -1255,13 +1520,12 @@ static int kswapd(void *p)
         * trying to free the first piece of memory in the first place).
         */
        tsk->flags |= PF_MEMALLOC | PF_SWAPWRITE | PF_KSWAPD;
+       set_freezable();
 
        order = 0;
        for ( ; ; ) {
                unsigned long new_order;
 
-               try_to_freeze();
-
                prepare_to_wait(&pgdat->kswapd_wait, &wait, TASK_INTERRUPTIBLE);
                new_order = pgdat->kswapd_max_order;
                pgdat->kswapd_max_order = 0;
@@ -1272,12 +1536,19 @@ static int kswapd(void *p)
                         */
                        order = new_order;
                } else {
-                       schedule();
+                       if (!freezing(current))
+                               schedule();
+
                        order = pgdat->kswapd_max_order;
                }
                finish_wait(&pgdat->kswapd_wait, &wait);
 
-               balance_pgdat(pgdat, order);
+               if (!try_to_freeze()) {
+                       /* We can speed up thawing tasks if we don't call
+                        * balance_pgdat after returning from the refrigerator
+                        */
+                       balance_pgdat(pgdat, order);
+               }
        }
        return 0;
 }
@@ -1297,7 +1568,7 @@ void wakeup_kswapd(struct zone *zone, int order)
                return;
        if (pgdat->kswapd_max_order < order)
                pgdat->kswapd_max_order = order;
-       if (!cpuset_zone_allowed(zone, __GFP_HARDWALL))
+       if (!cpuset_zone_allowed_hardwall(zone, GFP_KERNEL))
                return;
        if (!waitqueue_active(&pgdat->kswapd_wait))
                return;
@@ -1312,8 +1583,8 @@ void wakeup_kswapd(struct zone *zone, int order)
  *
  * For pass > 3 we also try to shrink the LRU lists that contain a few pages
  */
-static unsigned long shrink_all_zones(unsigned long nr_pages, int pass,
-                                     int prio, struct scan_control *sc)
+static unsigned long shrink_all_zones(unsigned long nr_pages, int prio,
+                                     int pass, struct scan_control *sc)
 {
        struct zone *zone;
        unsigned long nr_to_scan, ret = 0;
@@ -1323,23 +1594,27 @@ static unsigned long shrink_all_zones(unsigned long nr_pages, int pass,
                if (!populated_zone(zone))
                        continue;
 
-               if (zone->all_unreclaimable && prio != DEF_PRIORITY)
+               if (zone_is_all_unreclaimable(zone) && prio != DEF_PRIORITY)
                        continue;
 
                /* For pass = 0 we don't shrink the active list */
                if (pass > 0) {
-                       zone->nr_scan_active += (zone->nr_active >> prio) + 1;
+                       zone->nr_scan_active +=
+                               (zone_page_state(zone, NR_ACTIVE) >> prio) + 1;
                        if (zone->nr_scan_active >= nr_pages || pass > 3) {
                                zone->nr_scan_active = 0;
-                               nr_to_scan = min(nr_pages, zone->nr_active);
-                               shrink_active_list(nr_to_scan, zone, sc);
+                               nr_to_scan = min(nr_pages,
+                                       zone_page_state(zone, NR_ACTIVE));
+                               shrink_active_list(nr_to_scan, zone, sc, prio);
                        }
                }
 
-               zone->nr_scan_inactive += (zone->nr_inactive >> prio) + 1;
+               zone->nr_scan_inactive +=
+                       (zone_page_state(zone, NR_INACTIVE) >> prio) + 1;
                if (zone->nr_scan_inactive >= nr_pages || pass > 3) {
                        zone->nr_scan_inactive = 0;
-                       nr_to_scan = min(nr_pages, zone->nr_inactive);
+                       nr_to_scan = min(nr_pages,
+                               zone_page_state(zone, NR_INACTIVE));
                        ret += shrink_inactive_list(nr_to_scan, zone, sc);
                        if (ret >= nr_pages)
                                return ret;
@@ -1349,6 +1624,11 @@ static unsigned long shrink_all_zones(unsigned long nr_pages, int pass,
        return ret;
 }
 
+static unsigned long count_lru_pages(void)
+{
+       return global_page_state(NR_ACTIVE) + global_page_state(NR_INACTIVE);
+}
+
 /*
  * Try to free `nr_pages' of memory, system-wide, and return the number of
  * freed pages.
@@ -1363,7 +1643,6 @@ unsigned long shrink_all_memory(unsigned long nr_pages)
        unsigned long ret = 0;
        int pass;
        struct reclaim_state reclaim_state;
-       struct zone *zone;
        struct scan_control sc = {
                .gfp_mask = GFP_KERNEL,
                .may_swap = 0,
@@ -1374,10 +1653,7 @@ unsigned long shrink_all_memory(unsigned long nr_pages)
 
        current->reclaim_state = &reclaim_state;
 
-       lru_pages = 0;
-       for_each_zone(zone)
-               lru_pages += zone->nr_active + zone->nr_inactive;
-
+       lru_pages = count_lru_pages();
        nr_slab = global_page_state(NR_SLAB_RECLAIMABLE);
        /* If slab caches are huge, it's better to hit them first */
        while (nr_slab >= lru_pages) {
@@ -1404,13 +1680,6 @@ unsigned long shrink_all_memory(unsigned long nr_pages)
        for (pass = 0; pass < 5; pass++) {
                int prio;
 
-               /* Needed for shrinking slab caches later on */
-               if (!lru_pages)
-                       for_each_zone(zone) {
-                               lru_pages += zone->nr_active;
-                               lru_pages += zone->nr_inactive;
-                       }
-
                /* Force reclaiming mapped pages in the passes #3 and #4 */
                if (pass > 2) {
                        sc.may_swap = 1;
@@ -1426,28 +1695,28 @@ unsigned long shrink_all_memory(unsigned long nr_pages)
                                goto out;
 
                        reclaim_state.reclaimed_slab = 0;
-                       shrink_slab(sc.nr_scanned, sc.gfp_mask, lru_pages);
+                       shrink_slab(sc.nr_scanned, sc.gfp_mask,
+                                       count_lru_pages());
                        ret += reclaim_state.reclaimed_slab;
                        if (ret >= nr_pages)
                                goto out;
 
                        if (sc.nr_scanned && prio < DEF_PRIORITY - 2)
-                               blk_congestion_wait(WRITE, HZ / 10);
+                               congestion_wait(WRITE, HZ / 10);
                }
-
-               lru_pages = 0;
        }
 
        /*
         * If ret = 0, we could not shrink LRUs, but there may be something
         * in slab caches
         */
-       if (!ret)
+       if (!ret) {
                do {
                        reclaim_state.reclaimed_slab = 0;
-                       shrink_slab(nr_pages, sc.gfp_mask, lru_pages);
+                       shrink_slab(nr_pages, sc.gfp_mask, count_lru_pages());
                        ret += reclaim_state.reclaimed_slab;
                } while (ret < nr_pages && reclaim_state.reclaimed_slab > 0);
+       }
 
 out:
        current->reclaim_state = NULL;
@@ -1456,7 +1725,6 @@ out:
 }
 #endif
 
-#ifdef CONFIG_HOTPLUG_CPU
 /* It's optimal to keep kswapds on the same CPUs as their memory, but
    not required for correctness.  So if the last cpu in a node goes
    away, we get changed to run anywhere: as the first one comes back,
@@ -1466,9 +1734,11 @@ static int __devinit cpu_callback(struct notifier_block *nfb,
 {
        pg_data_t *pgdat;
        cpumask_t mask;
+       int nid;
 
-       if (action == CPU_ONLINE) {
-               for_each_online_pgdat(pgdat) {
+       if (action == CPU_ONLINE || action == CPU_ONLINE_FROZEN) {
+               for_each_node_state(nid, N_HIGH_MEMORY) {
+                       pgdat = NODE_DATA(nid);
                        mask = node_to_cpumask(pgdat->node_id);
                        if (any_online_cpu(mask) != NR_CPUS)
                                /* One of our CPUs online: restore mask */
@@ -1477,7 +1747,6 @@ static int __devinit cpu_callback(struct notifier_block *nfb,
        }
        return NOTIFY_OK;
 }
-#endif /* CONFIG_HOTPLUG_CPU */
 
 /*
  * This kswapd start function will be called by init and node-hot-add.
@@ -1506,7 +1775,7 @@ static int __init kswapd_init(void)
        int nid;
 
        swap_setup();
-       for_each_online_node(nid)
+       for_each_node_state(nid, N_HIGH_MEMORY)
                kswapd_run(nid);
        hotcpu_notifier(cpu_callback, 0);
        return 0;
@@ -1588,6 +1857,7 @@ static int __zone_reclaim(struct zone *zone, gfp_t gfp_mask, unsigned int order)
                 */
                priority = ZONE_RECLAIM_PRIORITY;
                do {
+                       note_zone_scanning_priority(zone, priority);
                        nr_reclaimed += shrink_zone(priority, zone, &sc);
                        priority--;
                } while (priority >= 0 && nr_reclaimed < nr_pages);
@@ -1625,8 +1895,8 @@ static int __zone_reclaim(struct zone *zone, gfp_t gfp_mask, unsigned int order)
 
 int zone_reclaim(struct zone *zone, gfp_t gfp_mask, unsigned int order)
 {
-       cpumask_t mask;
        int node_id;
+       int ret;
 
        /*
         * Zone reclaim reclaims unmapped file backed pages and
@@ -1644,15 +1914,13 @@ int zone_reclaim(struct zone *zone, gfp_t gfp_mask, unsigned int order)
                        <= zone->min_slab_pages)
                return 0;
 
+       if (zone_is_all_unreclaimable(zone))
+               return 0;
+
        /*
-        * Avoid concurrent zone reclaims, do not reclaim in a zone that does
-        * not have reclaimable pages and if we should not delay the allocation
-        * then do not scan.
+        * Do not scan if the allocation should not be delayed.
         */
-       if (!(gfp_mask & __GFP_WAIT) ||
-               zone->all_unreclaimable ||
-               atomic_read(&zone->reclaim_in_progress) > 0 ||
-               (current->flags & PF_MEMALLOC))
+       if (!(gfp_mask & __GFP_WAIT) || (current->flags & PF_MEMALLOC))
                        return 0;
 
        /*
@@ -1662,9 +1930,14 @@ int zone_reclaim(struct zone *zone, gfp_t gfp_mask, unsigned int order)
         * as wide as possible.
         */
        node_id = zone_to_nid(zone);
-       mask = node_to_cpumask(node_id);
-       if (!cpus_empty(mask) && node_id != numa_node_id())
+       if (node_state(node_id, N_CPU) && node_id != numa_node_id())
                return 0;
-       return __zone_reclaim(zone, gfp_mask, order);
+
+       if (zone_test_and_set_flag(zone, ZONE_RECLAIM_LOCKED))
+               return 0;
+       ret = __zone_reclaim(zone, gfp_mask, order);
+       zone_clear_flag(zone, ZONE_RECLAIM_LOCKED);
+
+       return ret;
 }
 #endif