[PATCH] SPI: infrastructure to initialize spi_device.mode early
[safe/jmp/linux-2.6] / mm / vmscan.c
index 548e023..eeacb0d 100644 (file)
@@ -34,6 +34,7 @@
 #include <linux/notifier.h>
 #include <linux/rwsem.h>
 #include <linux/delay.h>
+#include <linux/kthread.h>
 
 #include <asm/tlbflush.h>
 #include <asm/div64.h>
 
 #include "internal.h"
 
-/* possible outcome of pageout() */
-typedef enum {
-       /* failed to write page out, page is locked */
-       PAGE_KEEP,
-       /* move page to the active list, page is locked */
-       PAGE_ACTIVATE,
-       /* page has been sent to the disk successfully, page is unlocked */
-       PAGE_SUCCESS,
-       /* page is clean and locked */
-       PAGE_CLEAN,
-} pageout_t;
-
 struct scan_control {
        /* Incremented by the number of inactive pages that were scanned */
        unsigned long nr_scanned;
@@ -73,6 +62,8 @@ struct scan_control {
         * In this context, it doesn't matter that we scan the
         * whole list at once. */
        int swap_cluster_max;
+
+       int swappiness;
 };
 
 /*
@@ -120,7 +111,7 @@ struct shrinker {
  * From 0 .. 100.  Higher means more swappy.
  */
 int vm_swappiness = 60;
-static long total_memory;
+long vm_total_pages;   /* The total number of pages which the VM controls */
 
 static LIST_HEAD(shrinker_list);
 static DECLARE_RWSEM(shrinker_rwsem);
@@ -300,6 +291,18 @@ static void handle_write_error(struct address_space *mapping,
        unlock_page(page);
 }
 
+/* possible outcome of pageout() */
+typedef enum {
+       /* failed to write page out, page is locked */
+       PAGE_KEEP,
+       /* move page to the active list, page is locked */
+       PAGE_ACTIVATE,
+       /* page has been sent to the disk successfully, page is unlocked */
+       PAGE_SUCCESS,
+       /* page is clean and locked */
+       PAGE_CLEAN,
+} pageout_t;
+
 /*
  * pageout is called by shrink_page_list() for each dirty page.
  * Calls ->writepage().
@@ -349,6 +352,8 @@ static pageout_t pageout(struct page *page, struct address_space *mapping)
                struct writeback_control wbc = {
                        .sync_mode = WB_SYNC_NONE,
                        .nr_to_write = SWAP_CLUSTER_MAX,
+                       .range_start = 0,
+                       .range_end = LLONG_MAX,
                        .nonblocking = 1,
                        .for_reclaim = 1,
                };
@@ -372,7 +377,7 @@ static pageout_t pageout(struct page *page, struct address_space *mapping)
        return PAGE_CLEAN;
 }
 
-static int remove_mapping(struct address_space *mapping, struct page *page)
+int remove_mapping(struct address_space *mapping, struct page *page)
 {
        if (!mapping)
                return 0;               /* truncate got there first */
@@ -570,481 +575,6 @@ keep:
        return nr_reclaimed;
 }
 
-#ifdef CONFIG_MIGRATION
-static inline void move_to_lru(struct page *page)
-{
-       list_del(&page->lru);
-       if (PageActive(page)) {
-               /*
-                * lru_cache_add_active checks that
-                * the PG_active bit is off.
-                */
-               ClearPageActive(page);
-               lru_cache_add_active(page);
-       } else {
-               lru_cache_add(page);
-       }
-       put_page(page);
-}
-
-/*
- * Add isolated pages on the list back to the LRU.
- *
- * returns the number of pages put back.
- */
-unsigned long putback_lru_pages(struct list_head *l)
-{
-       struct page *page;
-       struct page *page2;
-       unsigned long count = 0;
-
-       list_for_each_entry_safe(page, page2, l, lru) {
-               move_to_lru(page);
-               count++;
-       }
-       return count;
-}
-
-/*
- * Non migratable page
- */
-int fail_migrate_page(struct page *newpage, struct page *page)
-{
-       return -EIO;
-}
-EXPORT_SYMBOL(fail_migrate_page);
-
-/*
- * swapout a single page
- * page is locked upon entry, unlocked on exit
- */
-static int swap_page(struct page *page)
-{
-       struct address_space *mapping = page_mapping(page);
-
-       if (page_mapped(page) && mapping)
-               if (try_to_unmap(page, 1) != SWAP_SUCCESS)
-                       goto unlock_retry;
-
-       if (PageDirty(page)) {
-               /* Page is dirty, try to write it out here */
-               switch(pageout(page, mapping)) {
-               case PAGE_KEEP:
-               case PAGE_ACTIVATE:
-                       goto unlock_retry;
-
-               case PAGE_SUCCESS:
-                       goto retry;
-
-               case PAGE_CLEAN:
-                       ; /* try to free the page below */
-               }
-       }
-
-       if (PagePrivate(page)) {
-               if (!try_to_release_page(page, GFP_KERNEL) ||
-                   (!mapping && page_count(page) == 1))
-                       goto unlock_retry;
-       }
-
-       if (remove_mapping(mapping, page)) {
-               /* Success */
-               unlock_page(page);
-               return 0;
-       }
-
-unlock_retry:
-       unlock_page(page);
-
-retry:
-       return -EAGAIN;
-}
-EXPORT_SYMBOL(swap_page);
-
-/*
- * Page migration was first developed in the context of the memory hotplug
- * project. The main authors of the migration code are:
- *
- * IWAMOTO Toshihiro <iwamoto@valinux.co.jp>
- * Hirokazu Takahashi <taka@valinux.co.jp>
- * Dave Hansen <haveblue@us.ibm.com>
- * Christoph Lameter <clameter@sgi.com>
- */
-
-/*
- * Remove references for a page and establish the new page with the correct
- * basic settings to be able to stop accesses to the page.
- */
-int migrate_page_remove_references(struct page *newpage,
-                               struct page *page, int nr_refs)
-{
-       struct address_space *mapping = page_mapping(page);
-       struct page **radix_pointer;
-
-       /*
-        * Avoid doing any of the following work if the page count
-        * indicates that the page is in use or truncate has removed
-        * the page.
-        */
-       if (!mapping || page_mapcount(page) + nr_refs != page_count(page))
-               return -EAGAIN;
-
-       /*
-        * Establish swap ptes for anonymous pages or destroy pte
-        * maps for files.
-        *
-        * In order to reestablish file backed mappings the fault handlers
-        * will take the radix tree_lock which may then be used to stop
-        * processses from accessing this page until the new page is ready.
-        *
-        * A process accessing via a swap pte (an anonymous page) will take a
-        * page_lock on the old page which will block the process until the
-        * migration attempt is complete. At that time the PageSwapCache bit
-        * will be examined. If the page was migrated then the PageSwapCache
-        * bit will be clear and the operation to retrieve the page will be
-        * retried which will find the new page in the radix tree. Then a new
-        * direct mapping may be generated based on the radix tree contents.
-        *
-        * If the page was not migrated then the PageSwapCache bit
-        * is still set and the operation may continue.
-        */
-       if (try_to_unmap(page, 1) == SWAP_FAIL)
-               /* A vma has VM_LOCKED set -> Permanent failure */
-               return -EPERM;
-
-       /*
-        * Give up if we were unable to remove all mappings.
-        */
-       if (page_mapcount(page))
-               return -EAGAIN;
-
-       write_lock_irq(&mapping->tree_lock);
-
-       radix_pointer = (struct page **)radix_tree_lookup_slot(
-                                               &mapping->page_tree,
-                                               page_index(page));
-
-       if (!page_mapping(page) || page_count(page) != nr_refs ||
-                       *radix_pointer != page) {
-               write_unlock_irq(&mapping->tree_lock);
-               return -EAGAIN;
-       }
-
-       /*
-        * Now we know that no one else is looking at the page.
-        *
-        * Certain minimal information about a page must be available
-        * in order for other subsystems to properly handle the page if they
-        * find it through the radix tree update before we are finished
-        * copying the page.
-        */
-       get_page(newpage);
-       newpage->index = page->index;
-       newpage->mapping = page->mapping;
-       if (PageSwapCache(page)) {
-               SetPageSwapCache(newpage);
-               set_page_private(newpage, page_private(page));
-       }
-
-       *radix_pointer = newpage;
-       __put_page(page);
-       write_unlock_irq(&mapping->tree_lock);
-
-       return 0;
-}
-EXPORT_SYMBOL(migrate_page_remove_references);
-
-/*
- * Copy the page to its new location
- */
-void migrate_page_copy(struct page *newpage, struct page *page)
-{
-       copy_highpage(newpage, page);
-
-       if (PageError(page))
-               SetPageError(newpage);
-       if (PageReferenced(page))
-               SetPageReferenced(newpage);
-       if (PageUptodate(page))
-               SetPageUptodate(newpage);
-       if (PageActive(page))
-               SetPageActive(newpage);
-       if (PageChecked(page))
-               SetPageChecked(newpage);
-       if (PageMappedToDisk(page))
-               SetPageMappedToDisk(newpage);
-
-       if (PageDirty(page)) {
-               clear_page_dirty_for_io(page);
-               set_page_dirty(newpage);
-       }
-
-       ClearPageSwapCache(page);
-       ClearPageActive(page);
-       ClearPagePrivate(page);
-       set_page_private(page, 0);
-       page->mapping = NULL;
-
-       /*
-        * If any waiters have accumulated on the new page then
-        * wake them up.
-        */
-       if (PageWriteback(newpage))
-               end_page_writeback(newpage);
-}
-EXPORT_SYMBOL(migrate_page_copy);
-
-/*
- * Common logic to directly migrate a single page suitable for
- * pages that do not use PagePrivate.
- *
- * Pages are locked upon entry and exit.
- */
-int migrate_page(struct page *newpage, struct page *page)
-{
-       int rc;
-
-       BUG_ON(PageWriteback(page));    /* Writeback must be complete */
-
-       rc = migrate_page_remove_references(newpage, page, 2);
-
-       if (rc)
-               return rc;
-
-       migrate_page_copy(newpage, page);
-
-       /*
-        * Remove auxiliary swap entries and replace
-        * them with real ptes.
-        *
-        * Note that a real pte entry will allow processes that are not
-        * waiting on the page lock to use the new page via the page tables
-        * before the new page is unlocked.
-        */
-       remove_from_swap(newpage);
-       return 0;
-}
-EXPORT_SYMBOL(migrate_page);
-
-/*
- * migrate_pages
- *
- * Two lists are passed to this function. The first list
- * contains the pages isolated from the LRU to be migrated.
- * The second list contains new pages that the pages isolated
- * can be moved to. If the second list is NULL then all
- * pages are swapped out.
- *
- * The function returns after 10 attempts or if no pages
- * are movable anymore because to has become empty
- * or no retryable pages exist anymore.
- *
- * Return: Number of pages not migrated when "to" ran empty.
- */
-unsigned long migrate_pages(struct list_head *from, struct list_head *to,
-                 struct list_head *moved, struct list_head *failed)
-{
-       unsigned long retry;
-       unsigned long nr_failed = 0;
-       int pass = 0;
-       struct page *page;
-       struct page *page2;
-       int swapwrite = current->flags & PF_SWAPWRITE;
-       int rc;
-
-       if (!swapwrite)
-               current->flags |= PF_SWAPWRITE;
-
-redo:
-       retry = 0;
-
-       list_for_each_entry_safe(page, page2, from, lru) {
-               struct page *newpage = NULL;
-               struct address_space *mapping;
-
-               cond_resched();
-
-               rc = 0;
-               if (page_count(page) == 1)
-                       /* page was freed from under us. So we are done. */
-                       goto next;
-
-               if (to && list_empty(to))
-                       break;
-
-               /*
-                * Skip locked pages during the first two passes to give the
-                * functions holding the lock time to release the page. Later we
-                * use lock_page() to have a higher chance of acquiring the
-                * lock.
-                */
-               rc = -EAGAIN;
-               if (pass > 2)
-                       lock_page(page);
-               else
-                       if (TestSetPageLocked(page))
-                               goto next;
-
-               /*
-                * Only wait on writeback if we have already done a pass where
-                * we we may have triggered writeouts for lots of pages.
-                */
-               if (pass > 0) {
-                       wait_on_page_writeback(page);
-               } else {
-                       if (PageWriteback(page))
-                               goto unlock_page;
-               }
-
-               /*
-                * Anonymous pages must have swap cache references otherwise
-                * the information contained in the page maps cannot be
-                * preserved.
-                */
-               if (PageAnon(page) && !PageSwapCache(page)) {
-                       if (!add_to_swap(page, GFP_KERNEL)) {
-                               rc = -ENOMEM;
-                               goto unlock_page;
-                       }
-               }
-
-               if (!to) {
-                       rc = swap_page(page);
-                       goto next;
-               }
-
-               newpage = lru_to_page(to);
-               lock_page(newpage);
-
-               /*
-                * Pages are properly locked and writeback is complete.
-                * Try to migrate the page.
-                */
-               mapping = page_mapping(page);
-               if (!mapping)
-                       goto unlock_both;
-
-               if (mapping->a_ops->migratepage) {
-                       /*
-                        * Most pages have a mapping and most filesystems
-                        * should provide a migration function. Anonymous
-                        * pages are part of swap space which also has its
-                        * own migration function. This is the most common
-                        * path for page migration.
-                        */
-                       rc = mapping->a_ops->migratepage(newpage, page);
-                       goto unlock_both;
-                }
-
-               /*
-                * Default handling if a filesystem does not provide
-                * a migration function. We can only migrate clean
-                * pages so try to write out any dirty pages first.
-                */
-               if (PageDirty(page)) {
-                       switch (pageout(page, mapping)) {
-                       case PAGE_KEEP:
-                       case PAGE_ACTIVATE:
-                               goto unlock_both;
-
-                       case PAGE_SUCCESS:
-                               unlock_page(newpage);
-                               goto next;
-
-                       case PAGE_CLEAN:
-                               ; /* try to migrate the page below */
-                       }
-                }
-
-               /*
-                * Buffers are managed in a filesystem specific way.
-                * We must have no buffers or drop them.
-                */
-               if (!page_has_buffers(page) ||
-                   try_to_release_page(page, GFP_KERNEL)) {
-                       rc = migrate_page(newpage, page);
-                       goto unlock_both;
-               }
-
-               /*
-                * On early passes with mapped pages simply
-                * retry. There may be a lock held for some
-                * buffers that may go away. Later
-                * swap them out.
-                */
-               if (pass > 4) {
-                       /*
-                        * Persistently unable to drop buffers..... As a
-                        * measure of last resort we fall back to
-                        * swap_page().
-                        */
-                       unlock_page(newpage);
-                       newpage = NULL;
-                       rc = swap_page(page);
-                       goto next;
-               }
-
-unlock_both:
-               unlock_page(newpage);
-
-unlock_page:
-               unlock_page(page);
-
-next:
-               if (rc == -EAGAIN) {
-                       retry++;
-               } else if (rc) {
-                       /* Permanent failure */
-                       list_move(&page->lru, failed);
-                       nr_failed++;
-               } else {
-                       if (newpage) {
-                               /* Successful migration. Return page to LRU */
-                               move_to_lru(newpage);
-                       }
-                       list_move(&page->lru, moved);
-               }
-       }
-       if (retry && pass++ < 10)
-               goto redo;
-
-       if (!swapwrite)
-               current->flags &= ~PF_SWAPWRITE;
-
-       return nr_failed + retry;
-}
-
-/*
- * Isolate one page from the LRU lists and put it on the
- * indicated list with elevated refcount.
- *
- * Result:
- *  0 = page not on LRU list
- *  1 = page removed from LRU list and added to the specified list.
- */
-int isolate_lru_page(struct page *page)
-{
-       int ret = 0;
-
-       if (PageLRU(page)) {
-               struct zone *zone = page_zone(page);
-               spin_lock_irq(&zone->lru_lock);
-               if (PageLRU(page)) {
-                       ret = 1;
-                       get_page(page);
-                       ClearPageLRU(page);
-                       if (PageActive(page))
-                               del_page_from_active_list(zone, page);
-                       else
-                               del_page_from_inactive_list(zone, page);
-               }
-               spin_unlock_irq(&zone->lru_lock);
-       }
-
-       return ret;
-}
-#endif
-
 /*
  * zone->lru_lock is heavily contended.  Some of the functions that
  * shrink the lists perform better by taking out a batch of pages
@@ -1214,7 +744,7 @@ static void shrink_active_list(unsigned long nr_pages, struct zone *zone,
                 * how much memory
                 * is mapped.
                 */
-               mapped_ratio = (sc->nr_mapped * 100) / total_memory;
+               mapped_ratio = (sc->nr_mapped * 100) / vm_total_pages;
 
                /*
                 * Now decide how much we really want to unmap some pages.  The
@@ -1228,7 +758,7 @@ static void shrink_active_list(unsigned long nr_pages, struct zone *zone,
                 * A 100% value of vm_swappiness overrides this algorithm
                 * altogether.
                 */
-               swap_tendency = mapped_ratio / 2 + distress + vm_swappiness;
+               swap_tendency = mapped_ratio / 2 + distress + sc->swappiness;
 
                /*
                 * Now use this metric to decide whether to start moving mapped
@@ -1444,6 +974,7 @@ unsigned long try_to_free_pages(struct zone **zones, gfp_t gfp_mask)
                .may_writepage = !laptop_mode,
                .swap_cluster_max = SWAP_CLUSTER_MAX,
                .may_swap = 1,
+               .swappiness = vm_swappiness,
        };
 
        inc_page_state(allocstall);
@@ -1508,10 +1039,6 @@ out:
  * For kswapd, balance_pgdat() will work across all this node's zones until
  * they are all at pages_high.
  *
- * If `nr_pages' is non-zero then it is the number of pages which are to be
- * reclaimed, regardless of the zone occupancies.  This is a software suspend
- * special.
- *
  * Returns the number of pages which were actually freed.
  *
  * There is special handling here for zones which are full of pinned pages.
@@ -1529,10 +1056,8 @@ out:
  * the page allocator fallback scheme to ensure that aging of pages is balanced
  * across the zones.
  */
-static unsigned long balance_pgdat(pg_data_t *pgdat, unsigned long nr_pages,
-                               int order)
+static unsigned long balance_pgdat(pg_data_t *pgdat, int order)
 {
-       unsigned long to_free = nr_pages;
        int all_zones_ok;
        int priority;
        int i;
@@ -1542,13 +1067,14 @@ static unsigned long balance_pgdat(pg_data_t *pgdat, unsigned long nr_pages,
        struct scan_control sc = {
                .gfp_mask = GFP_KERNEL,
                .may_swap = 1,
-               .swap_cluster_max = nr_pages ? nr_pages : SWAP_CLUSTER_MAX,
+               .swap_cluster_max = SWAP_CLUSTER_MAX,
+               .swappiness = vm_swappiness,
        };
 
 loop_again:
        total_scanned = 0;
        nr_reclaimed = 0;
-       sc.may_writepage = !laptop_mode,
+       sc.may_writepage = !laptop_mode;
        sc.nr_mapped = read_page_state(nr_mapped);
 
        inc_page_state(pageoutrun);
@@ -1569,31 +1095,26 @@ loop_again:
 
                all_zones_ok = 1;
 
-               if (nr_pages == 0) {
-                       /*
-                        * Scan in the highmem->dma direction for the highest
-                        * zone which needs scanning
-                        */
-                       for (i = pgdat->nr_zones - 1; i >= 0; i--) {
-                               struct zone *zone = pgdat->node_zones + i;
+               /*
+                * Scan in the highmem->dma direction for the highest
+                * zone which needs scanning
+                */
+               for (i = pgdat->nr_zones - 1; i >= 0; i--) {
+                       struct zone *zone = pgdat->node_zones + i;
 
-                               if (!populated_zone(zone))
-                                       continue;
+                       if (!populated_zone(zone))
+                               continue;
 
-                               if (zone->all_unreclaimable &&
-                                               priority != DEF_PRIORITY)
-                                       continue;
+                       if (zone->all_unreclaimable && priority != DEF_PRIORITY)
+                               continue;
 
-                               if (!zone_watermark_ok(zone, order,
-                                               zone->pages_high, 0, 0)) {
-                                       end_zone = i;
-                                       goto scan;
-                               }
+                       if (!zone_watermark_ok(zone, order, zone->pages_high,
+                                              0, 0)) {
+                               end_zone = i;
+                               goto scan;
                        }
-                       goto out;
-               } else {
-                       end_zone = pgdat->nr_zones - 1;
                }
+               goto out;
 scan:
                for (i = 0; i <= end_zone; i++) {
                        struct zone *zone = pgdat->node_zones + i;
@@ -1620,11 +1141,9 @@ scan:
                        if (zone->all_unreclaimable && priority != DEF_PRIORITY)
                                continue;
 
-                       if (nr_pages == 0) {    /* Not software suspend */
-                               if (!zone_watermark_ok(zone, order,
-                                               zone->pages_high, end_zone, 0))
-                                       all_zones_ok = 0;
-                       }
+                       if (!zone_watermark_ok(zone, order, zone->pages_high,
+                                              end_zone, 0))
+                               all_zones_ok = 0;
                        zone->temp_priority = priority;
                        if (zone->prev_priority > priority)
                                zone->prev_priority = priority;
@@ -1649,8 +1168,6 @@ scan:
                            total_scanned > nr_reclaimed + nr_reclaimed / 2)
                                sc.may_writepage = 1;
                }
-               if (nr_pages && to_free > nr_reclaimed)
-                       continue;       /* swsusp: need to do more work */
                if (all_zones_ok)
                        break;          /* kswapd: all done */
                /*
@@ -1666,7 +1183,7 @@ scan:
                 * matches the direct reclaim path behaviour in terms of impact
                 * on zone->*_priority.
                 */
-               if ((nr_reclaimed >= SWAP_CLUSTER_MAX) && !nr_pages)
+               if (nr_reclaimed >= SWAP_CLUSTER_MAX)
                        break;
        }
 out:
@@ -1707,7 +1224,6 @@ static int kswapd(void *p)
        };
        cpumask_t cpumask;
 
-       daemonize("kswapd%d", pgdat->node_id);
        cpumask = node_to_cpumask(pgdat->node_id);
        if (!cpus_empty(cpumask))
                set_cpus_allowed(tsk, cpumask);
@@ -1748,7 +1264,7 @@ static int kswapd(void *p)
                }
                finish_wait(&pgdat->kswapd_wait, &wait);
 
-               balance_pgdat(pgdat, 0, order);
+               balance_pgdat(pgdat, order);
        }
        return 0;
 }
@@ -1777,35 +1293,154 @@ void wakeup_kswapd(struct zone *zone, int order)
 
 #ifdef CONFIG_PM
 /*
- * Try to free `nr_pages' of memory, system-wide.  Returns the number of freed
- * pages.
+ * Helper function for shrink_all_memory().  Tries to reclaim 'nr_pages' pages
+ * from LRU lists system-wide, for given pass and priority, and returns the
+ * number of reclaimed pages
+ *
+ * For pass > 3 we also try to shrink the LRU lists that contain a few pages
+ */
+static unsigned long shrink_all_zones(unsigned long nr_pages, int pass,
+                                     int prio, struct scan_control *sc)
+{
+       struct zone *zone;
+       unsigned long nr_to_scan, ret = 0;
+
+       for_each_zone(zone) {
+
+               if (!populated_zone(zone))
+                       continue;
+
+               if (zone->all_unreclaimable && prio != DEF_PRIORITY)
+                       continue;
+
+               /* For pass = 0 we don't shrink the active list */
+               if (pass > 0) {
+                       zone->nr_scan_active += (zone->nr_active >> prio) + 1;
+                       if (zone->nr_scan_active >= nr_pages || pass > 3) {
+                               zone->nr_scan_active = 0;
+                               nr_to_scan = min(nr_pages, zone->nr_active);
+                               shrink_active_list(nr_to_scan, zone, sc);
+                       }
+               }
+
+               zone->nr_scan_inactive += (zone->nr_inactive >> prio) + 1;
+               if (zone->nr_scan_inactive >= nr_pages || pass > 3) {
+                       zone->nr_scan_inactive = 0;
+                       nr_to_scan = min(nr_pages, zone->nr_inactive);
+                       ret += shrink_inactive_list(nr_to_scan, zone, sc);
+                       if (ret >= nr_pages)
+                               return ret;
+               }
+       }
+
+       return ret;
+}
+
+/*
+ * Try to free `nr_pages' of memory, system-wide, and return the number of
+ * freed pages.
+ *
+ * Rather than trying to age LRUs the aim is to preserve the overall
+ * LRU order by reclaiming preferentially
+ * inactive > active > active referenced > active mapped
  */
 unsigned long shrink_all_memory(unsigned long nr_pages)
 {
-       pg_data_t *pgdat;
-       unsigned long nr_to_free = nr_pages;
+       unsigned long lru_pages, nr_slab;
        unsigned long ret = 0;
-       unsigned retry = 2;
-       struct reclaim_state reclaim_state = {
-               .reclaimed_slab = 0,
+       int pass;
+       struct reclaim_state reclaim_state;
+       struct zone *zone;
+       struct scan_control sc = {
+               .gfp_mask = GFP_KERNEL,
+               .may_swap = 0,
+               .swap_cluster_max = nr_pages,
+               .may_writepage = 1,
+               .swappiness = vm_swappiness,
        };
 
        current->reclaim_state = &reclaim_state;
-repeat:
-       for_each_pgdat(pgdat) {
-               unsigned long freed;
-
-               freed = balance_pgdat(pgdat, nr_to_free, 0);
-               ret += freed;
-               nr_to_free -= freed;
-               if ((long)nr_to_free <= 0)
+
+       lru_pages = 0;
+       for_each_zone(zone)
+               lru_pages += zone->nr_active + zone->nr_inactive;
+
+       nr_slab = read_page_state(nr_slab);
+       /* If slab caches are huge, it's better to hit them first */
+       while (nr_slab >= lru_pages) {
+               reclaim_state.reclaimed_slab = 0;
+               shrink_slab(nr_pages, sc.gfp_mask, lru_pages);
+               if (!reclaim_state.reclaimed_slab)
                        break;
+
+               ret += reclaim_state.reclaimed_slab;
+               if (ret >= nr_pages)
+                       goto out;
+
+               nr_slab -= reclaim_state.reclaimed_slab;
        }
-       if (retry-- && ret < nr_pages) {
-               blk_congestion_wait(WRITE, HZ/5);
-               goto repeat;
+
+       /*
+        * We try to shrink LRUs in 5 passes:
+        * 0 = Reclaim from inactive_list only
+        * 1 = Reclaim from active list but don't reclaim mapped
+        * 2 = 2nd pass of type 1
+        * 3 = Reclaim mapped (normal reclaim)
+        * 4 = 2nd pass of type 3
+        */
+       for (pass = 0; pass < 5; pass++) {
+               int prio;
+
+               /* Needed for shrinking slab caches later on */
+               if (!lru_pages)
+                       for_each_zone(zone) {
+                               lru_pages += zone->nr_active;
+                               lru_pages += zone->nr_inactive;
+                       }
+
+               /* Force reclaiming mapped pages in the passes #3 and #4 */
+               if (pass > 2) {
+                       sc.may_swap = 1;
+                       sc.swappiness = 100;
+               }
+
+               for (prio = DEF_PRIORITY; prio >= 0; prio--) {
+                       unsigned long nr_to_scan = nr_pages - ret;
+
+                       sc.nr_mapped = read_page_state(nr_mapped);
+                       sc.nr_scanned = 0;
+
+                       ret += shrink_all_zones(nr_to_scan, prio, pass, &sc);
+                       if (ret >= nr_pages)
+                               goto out;
+
+                       reclaim_state.reclaimed_slab = 0;
+                       shrink_slab(sc.nr_scanned, sc.gfp_mask, lru_pages);
+                       ret += reclaim_state.reclaimed_slab;
+                       if (ret >= nr_pages)
+                               goto out;
+
+                       if (sc.nr_scanned && prio < DEF_PRIORITY - 2)
+                               blk_congestion_wait(WRITE, HZ / 10);
+               }
+
+               lru_pages = 0;
        }
+
+       /*
+        * If ret = 0, we could not shrink LRUs, but there may be something
+        * in slab caches
+        */
+       if (!ret)
+               do {
+                       reclaim_state.reclaimed_slab = 0;
+                       shrink_slab(nr_pages, sc.gfp_mask, lru_pages);
+                       ret += reclaim_state.reclaimed_slab;
+               } while (ret < nr_pages && reclaim_state.reclaimed_slab > 0);
+
+out:
        current->reclaim_state = NULL;
+
        return ret;
 }
 #endif
@@ -1822,7 +1457,7 @@ static int __devinit cpu_callback(struct notifier_block *nfb,
        cpumask_t mask;
 
        if (action == CPU_ONLINE) {
-               for_each_pgdat(pgdat) {
+               for_each_online_pgdat(pgdat) {
                        mask = node_to_cpumask(pgdat->node_id);
                        if (any_online_cpu(mask) != NR_CPUS)
                                /* One of our CPUs online: restore mask */
@@ -1833,19 +1468,35 @@ static int __devinit cpu_callback(struct notifier_block *nfb,
 }
 #endif /* CONFIG_HOTPLUG_CPU */
 
-static int __init kswapd_init(void)
+/*
+ * This kswapd start function will be called by init and node-hot-add.
+ * On node-hot-add, kswapd will moved to proper cpus if cpus are hot-added.
+ */
+int kswapd_run(int nid)
 {
-       pg_data_t *pgdat;
+       pg_data_t *pgdat = NODE_DATA(nid);
+       int ret = 0;
 
-       swap_setup();
-       for_each_pgdat(pgdat) {
-               pid_t pid;
+       if (pgdat->kswapd)
+               return 0;
 
-               pid = kernel_thread(kswapd, pgdat, CLONE_KERNEL);
-               BUG_ON(pid < 0);
-               pgdat->kswapd = find_task_by_pid(pid);
+       pgdat->kswapd = kthread_run(kswapd, pgdat, "kswapd%d", nid);
+       if (IS_ERR(pgdat->kswapd)) {
+               /* failure at boot is fatal */
+               BUG_ON(system_state == SYSTEM_BOOTING);
+               printk("Failed to start kswapd on node %d\n",nid);
+               ret = -1;
        }
-       total_memory = nr_free_pagecache_pages();
+       return ret;
+}
+
+static int __init kswapd_init(void)
+{
+       int nid;
+
+       swap_setup();
+       for_each_online_node(nid)
+               kswapd_run(nid);
        hotcpu_notifier(cpu_callback, 0);
        return 0;
 }
@@ -1901,6 +1552,7 @@ static int __zone_reclaim(struct zone *zone, gfp_t gfp_mask, unsigned int order)
                .swap_cluster_max = max_t(unsigned long, nr_pages,
                                        SWAP_CLUSTER_MAX),
                .gfp_mask = gfp_mask,
+               .swappiness = vm_swappiness,
        };
 
        disable_swap_token();