[PATCH] Add find_get_pages_contig(): contiguous variant of find_get_pages()
[safe/jmp/linux-2.6] / mm / vmscan.c
index 85e95f4..4649a63 100644 (file)
 #include <linux/cpuset.h>
 #include <linux/notifier.h>
 #include <linux/rwsem.h>
+#include <linux/delay.h>
 
 #include <asm/tlbflush.h>
 #include <asm/div64.h>
 
 #include <linux/swapops.h>
 
-/* possible outcome of pageout() */
-typedef enum {
-       /* failed to write page out, page is locked */
-       PAGE_KEEP,
-       /* move page to the active list, page is locked */
-       PAGE_ACTIVATE,
-       /* page has been sent to the disk successfully, page is unlocked */
-       PAGE_SUCCESS,
-       /* page is clean and locked */
-       PAGE_CLEAN,
-} pageout_t;
+#include "internal.h"
 
 struct scan_control {
        /* Incremented by the number of inactive pages that were scanned */
@@ -301,7 +292,7 @@ static void handle_write_error(struct address_space *mapping,
  * pageout is called by shrink_page_list() for each dirty page.
  * Calls ->writepage().
  */
-static pageout_t pageout(struct page *page, struct address_space *mapping)
+pageout_t pageout(struct page *page, struct address_space *mapping)
 {
        /*
         * If the page is dirty, only perform writeback if that write
@@ -369,7 +360,7 @@ static pageout_t pageout(struct page *page, struct address_space *mapping)
        return PAGE_CLEAN;
 }
 
-static int remove_mapping(struct address_space *mapping, struct page *page)
+int remove_mapping(struct address_space *mapping, struct page *page)
 {
        if (!mapping)
                return 0;               /* truncate got there first */
@@ -458,12 +449,9 @@ static unsigned long shrink_page_list(struct list_head *page_list,
                 * Anonymous process memory has backing store?
                 * Try to allocate it some swap space here.
                 */
-               if (PageAnon(page) && !PageSwapCache(page)) {
-                       if (!sc->may_swap)
-                               goto keep_locked;
+               if (PageAnon(page) && !PageSwapCache(page))
                        if (!add_to_swap(page, GFP_ATOMIC))
                                goto activate_locked;
-               }
 #endif /* CONFIG_SWAP */
 
                mapping = page_mapping(page);
@@ -475,12 +463,6 @@ static unsigned long shrink_page_list(struct list_head *page_list,
                 * processes. Try to unmap it here.
                 */
                if (page_mapped(page) && mapping) {
-                       /*
-                        * No unmapping if we do not swap
-                        */
-                       if (!sc->may_swap)
-                               goto keep_locked;
-
                        switch (try_to_unmap(page, 0)) {
                        case SWAP_FAIL:
                                goto activate_locked;
@@ -576,481 +558,6 @@ keep:
        return nr_reclaimed;
 }
 
-#ifdef CONFIG_MIGRATION
-static inline void move_to_lru(struct page *page)
-{
-       list_del(&page->lru);
-       if (PageActive(page)) {
-               /*
-                * lru_cache_add_active checks that
-                * the PG_active bit is off.
-                */
-               ClearPageActive(page);
-               lru_cache_add_active(page);
-       } else {
-               lru_cache_add(page);
-       }
-       put_page(page);
-}
-
-/*
- * Add isolated pages on the list back to the LRU.
- *
- * returns the number of pages put back.
- */
-unsigned long putback_lru_pages(struct list_head *l)
-{
-       struct page *page;
-       struct page *page2;
-       unsigned long count = 0;
-
-       list_for_each_entry_safe(page, page2, l, lru) {
-               move_to_lru(page);
-               count++;
-       }
-       return count;
-}
-
-/*
- * Non migratable page
- */
-int fail_migrate_page(struct page *newpage, struct page *page)
-{
-       return -EIO;
-}
-EXPORT_SYMBOL(fail_migrate_page);
-
-/*
- * swapout a single page
- * page is locked upon entry, unlocked on exit
- */
-static int swap_page(struct page *page)
-{
-       struct address_space *mapping = page_mapping(page);
-
-       if (page_mapped(page) && mapping)
-               if (try_to_unmap(page, 1) != SWAP_SUCCESS)
-                       goto unlock_retry;
-
-       if (PageDirty(page)) {
-               /* Page is dirty, try to write it out here */
-               switch(pageout(page, mapping)) {
-               case PAGE_KEEP:
-               case PAGE_ACTIVATE:
-                       goto unlock_retry;
-
-               case PAGE_SUCCESS:
-                       goto retry;
-
-               case PAGE_CLEAN:
-                       ; /* try to free the page below */
-               }
-       }
-
-       if (PagePrivate(page)) {
-               if (!try_to_release_page(page, GFP_KERNEL) ||
-                   (!mapping && page_count(page) == 1))
-                       goto unlock_retry;
-       }
-
-       if (remove_mapping(mapping, page)) {
-               /* Success */
-               unlock_page(page);
-               return 0;
-       }
-
-unlock_retry:
-       unlock_page(page);
-
-retry:
-       return -EAGAIN;
-}
-EXPORT_SYMBOL(swap_page);
-
-/*
- * Page migration was first developed in the context of the memory hotplug
- * project. The main authors of the migration code are:
- *
- * IWAMOTO Toshihiro <iwamoto@valinux.co.jp>
- * Hirokazu Takahashi <taka@valinux.co.jp>
- * Dave Hansen <haveblue@us.ibm.com>
- * Christoph Lameter <clameter@sgi.com>
- */
-
-/*
- * Remove references for a page and establish the new page with the correct
- * basic settings to be able to stop accesses to the page.
- */
-int migrate_page_remove_references(struct page *newpage,
-                               struct page *page, int nr_refs)
-{
-       struct address_space *mapping = page_mapping(page);
-       struct page **radix_pointer;
-
-       /*
-        * Avoid doing any of the following work if the page count
-        * indicates that the page is in use or truncate has removed
-        * the page.
-        */
-       if (!mapping || page_mapcount(page) + nr_refs != page_count(page))
-               return -EAGAIN;
-
-       /*
-        * Establish swap ptes for anonymous pages or destroy pte
-        * maps for files.
-        *
-        * In order to reestablish file backed mappings the fault handlers
-        * will take the radix tree_lock which may then be used to stop
-        * processses from accessing this page until the new page is ready.
-        *
-        * A process accessing via a swap pte (an anonymous page) will take a
-        * page_lock on the old page which will block the process until the
-        * migration attempt is complete. At that time the PageSwapCache bit
-        * will be examined. If the page was migrated then the PageSwapCache
-        * bit will be clear and the operation to retrieve the page will be
-        * retried which will find the new page in the radix tree. Then a new
-        * direct mapping may be generated based on the radix tree contents.
-        *
-        * If the page was not migrated then the PageSwapCache bit
-        * is still set and the operation may continue.
-        */
-       if (try_to_unmap(page, 1) == SWAP_FAIL)
-               /* A vma has VM_LOCKED set -> Permanent failure */
-               return -EPERM;
-
-       /*
-        * Give up if we were unable to remove all mappings.
-        */
-       if (page_mapcount(page))
-               return -EAGAIN;
-
-       write_lock_irq(&mapping->tree_lock);
-
-       radix_pointer = (struct page **)radix_tree_lookup_slot(
-                                               &mapping->page_tree,
-                                               page_index(page));
-
-       if (!page_mapping(page) || page_count(page) != nr_refs ||
-                       *radix_pointer != page) {
-               write_unlock_irq(&mapping->tree_lock);
-               return -EAGAIN;
-       }
-
-       /*
-        * Now we know that no one else is looking at the page.
-        *
-        * Certain minimal information about a page must be available
-        * in order for other subsystems to properly handle the page if they
-        * find it through the radix tree update before we are finished
-        * copying the page.
-        */
-       get_page(newpage);
-       newpage->index = page->index;
-       newpage->mapping = page->mapping;
-       if (PageSwapCache(page)) {
-               SetPageSwapCache(newpage);
-               set_page_private(newpage, page_private(page));
-       }
-
-       *radix_pointer = newpage;
-       __put_page(page);
-       write_unlock_irq(&mapping->tree_lock);
-
-       return 0;
-}
-EXPORT_SYMBOL(migrate_page_remove_references);
-
-/*
- * Copy the page to its new location
- */
-void migrate_page_copy(struct page *newpage, struct page *page)
-{
-       copy_highpage(newpage, page);
-
-       if (PageError(page))
-               SetPageError(newpage);
-       if (PageReferenced(page))
-               SetPageReferenced(newpage);
-       if (PageUptodate(page))
-               SetPageUptodate(newpage);
-       if (PageActive(page))
-               SetPageActive(newpage);
-       if (PageChecked(page))
-               SetPageChecked(newpage);
-       if (PageMappedToDisk(page))
-               SetPageMappedToDisk(newpage);
-
-       if (PageDirty(page)) {
-               clear_page_dirty_for_io(page);
-               set_page_dirty(newpage);
-       }
-
-       ClearPageSwapCache(page);
-       ClearPageActive(page);
-       ClearPagePrivate(page);
-       set_page_private(page, 0);
-       page->mapping = NULL;
-
-       /*
-        * If any waiters have accumulated on the new page then
-        * wake them up.
-        */
-       if (PageWriteback(newpage))
-               end_page_writeback(newpage);
-}
-EXPORT_SYMBOL(migrate_page_copy);
-
-/*
- * Common logic to directly migrate a single page suitable for
- * pages that do not use PagePrivate.
- *
- * Pages are locked upon entry and exit.
- */
-int migrate_page(struct page *newpage, struct page *page)
-{
-       int rc;
-
-       BUG_ON(PageWriteback(page));    /* Writeback must be complete */
-
-       rc = migrate_page_remove_references(newpage, page, 2);
-
-       if (rc)
-               return rc;
-
-       migrate_page_copy(newpage, page);
-
-       /*
-        * Remove auxiliary swap entries and replace
-        * them with real ptes.
-        *
-        * Note that a real pte entry will allow processes that are not
-        * waiting on the page lock to use the new page via the page tables
-        * before the new page is unlocked.
-        */
-       remove_from_swap(newpage);
-       return 0;
-}
-EXPORT_SYMBOL(migrate_page);
-
-/*
- * migrate_pages
- *
- * Two lists are passed to this function. The first list
- * contains the pages isolated from the LRU to be migrated.
- * The second list contains new pages that the pages isolated
- * can be moved to. If the second list is NULL then all
- * pages are swapped out.
- *
- * The function returns after 10 attempts or if no pages
- * are movable anymore because to has become empty
- * or no retryable pages exist anymore.
- *
- * Return: Number of pages not migrated when "to" ran empty.
- */
-unsigned long migrate_pages(struct list_head *from, struct list_head *to,
-                 struct list_head *moved, struct list_head *failed)
-{
-       unsigned long retry;
-       unsigned long nr_failed = 0;
-       int pass = 0;
-       struct page *page;
-       struct page *page2;
-       int swapwrite = current->flags & PF_SWAPWRITE;
-       int rc;
-
-       if (!swapwrite)
-               current->flags |= PF_SWAPWRITE;
-
-redo:
-       retry = 0;
-
-       list_for_each_entry_safe(page, page2, from, lru) {
-               struct page *newpage = NULL;
-               struct address_space *mapping;
-
-               cond_resched();
-
-               rc = 0;
-               if (page_count(page) == 1)
-                       /* page was freed from under us. So we are done. */
-                       goto next;
-
-               if (to && list_empty(to))
-                       break;
-
-               /*
-                * Skip locked pages during the first two passes to give the
-                * functions holding the lock time to release the page. Later we
-                * use lock_page() to have a higher chance of acquiring the
-                * lock.
-                */
-               rc = -EAGAIN;
-               if (pass > 2)
-                       lock_page(page);
-               else
-                       if (TestSetPageLocked(page))
-                               goto next;
-
-               /*
-                * Only wait on writeback if we have already done a pass where
-                * we we may have triggered writeouts for lots of pages.
-                */
-               if (pass > 0) {
-                       wait_on_page_writeback(page);
-               } else {
-                       if (PageWriteback(page))
-                               goto unlock_page;
-               }
-
-               /*
-                * Anonymous pages must have swap cache references otherwise
-                * the information contained in the page maps cannot be
-                * preserved.
-                */
-               if (PageAnon(page) && !PageSwapCache(page)) {
-                       if (!add_to_swap(page, GFP_KERNEL)) {
-                               rc = -ENOMEM;
-                               goto unlock_page;
-                       }
-               }
-
-               if (!to) {
-                       rc = swap_page(page);
-                       goto next;
-               }
-
-               newpage = lru_to_page(to);
-               lock_page(newpage);
-
-               /*
-                * Pages are properly locked and writeback is complete.
-                * Try to migrate the page.
-                */
-               mapping = page_mapping(page);
-               if (!mapping)
-                       goto unlock_both;
-
-               if (mapping->a_ops->migratepage) {
-                       /*
-                        * Most pages have a mapping and most filesystems
-                        * should provide a migration function. Anonymous
-                        * pages are part of swap space which also has its
-                        * own migration function. This is the most common
-                        * path for page migration.
-                        */
-                       rc = mapping->a_ops->migratepage(newpage, page);
-                       goto unlock_both;
-                }
-
-               /*
-                * Default handling if a filesystem does not provide
-                * a migration function. We can only migrate clean
-                * pages so try to write out any dirty pages first.
-                */
-               if (PageDirty(page)) {
-                       switch (pageout(page, mapping)) {
-                       case PAGE_KEEP:
-                       case PAGE_ACTIVATE:
-                               goto unlock_both;
-
-                       case PAGE_SUCCESS:
-                               unlock_page(newpage);
-                               goto next;
-
-                       case PAGE_CLEAN:
-                               ; /* try to migrate the page below */
-                       }
-                }
-
-               /*
-                * Buffers are managed in a filesystem specific way.
-                * We must have no buffers or drop them.
-                */
-               if (!page_has_buffers(page) ||
-                   try_to_release_page(page, GFP_KERNEL)) {
-                       rc = migrate_page(newpage, page);
-                       goto unlock_both;
-               }
-
-               /*
-                * On early passes with mapped pages simply
-                * retry. There may be a lock held for some
-                * buffers that may go away. Later
-                * swap them out.
-                */
-               if (pass > 4) {
-                       /*
-                        * Persistently unable to drop buffers..... As a
-                        * measure of last resort we fall back to
-                        * swap_page().
-                        */
-                       unlock_page(newpage);
-                       newpage = NULL;
-                       rc = swap_page(page);
-                       goto next;
-               }
-
-unlock_both:
-               unlock_page(newpage);
-
-unlock_page:
-               unlock_page(page);
-
-next:
-               if (rc == -EAGAIN) {
-                       retry++;
-               } else if (rc) {
-                       /* Permanent failure */
-                       list_move(&page->lru, failed);
-                       nr_failed++;
-               } else {
-                       if (newpage) {
-                               /* Successful migration. Return page to LRU */
-                               move_to_lru(newpage);
-                       }
-                       list_move(&page->lru, moved);
-               }
-       }
-       if (retry && pass++ < 10)
-               goto redo;
-
-       if (!swapwrite)
-               current->flags &= ~PF_SWAPWRITE;
-
-       return nr_failed + retry;
-}
-
-/*
- * Isolate one page from the LRU lists and put it on the
- * indicated list with elevated refcount.
- *
- * Result:
- *  0 = page not on LRU list
- *  1 = page removed from LRU list and added to the specified list.
- */
-int isolate_lru_page(struct page *page)
-{
-       int ret = 0;
-
-       if (PageLRU(page)) {
-               struct zone *zone = page_zone(page);
-               spin_lock_irq(&zone->lru_lock);
-               if (PageLRU(page)) {
-                       ret = 1;
-                       get_page(page);
-                       ClearPageLRU(page);
-                       if (PageActive(page))
-                               del_page_from_active_list(zone, page);
-                       else
-                               del_page_from_inactive_list(zone, page);
-               }
-               spin_unlock_irq(&zone->lru_lock);
-       }
-
-       return ret;
-}
-#endif
-
 /*
  * zone->lru_lock is heavily contended.  Some of the functions that
  * shrink the lists perform better by taking out a batch of pages
@@ -1132,9 +639,6 @@ static unsigned long shrink_inactive_list(unsigned long max_scan,
                zone->pages_scanned += nr_scan;
                spin_unlock_irq(&zone->lru_lock);
 
-               if (nr_taken == 0)
-                       goto done;
-
                nr_scanned += nr_scan;
                nr_freed = shrink_page_list(&page_list, sc);
                nr_reclaimed += nr_freed;
@@ -1146,6 +650,9 @@ static unsigned long shrink_inactive_list(unsigned long max_scan,
                        __mod_page_state_zone(zone, pgscan_direct, nr_scan);
                __mod_page_state_zone(zone, pgsteal, nr_freed);
 
+               if (nr_taken == 0)
+                       goto done;
+
                spin_lock(&zone->lru_lock);
                /*
                 * Put back any unfreeable pages.
@@ -1166,8 +673,9 @@ static unsigned long shrink_inactive_list(unsigned long max_scan,
                        }
                }
        } while (nr_scanned < max_scan);
-       spin_unlock_irq(&zone->lru_lock);
+       spin_unlock(&zone->lru_lock);
 done:
+       local_irq_enable();
        pagevec_release(&pvec);
        return nr_reclaimed;
 }
@@ -1202,7 +710,7 @@ static void shrink_active_list(unsigned long nr_pages, struct zone *zone,
        struct pagevec pvec;
        int reclaim_mapped = 0;
 
-       if (unlikely(sc->may_swap)) {
+       if (sc->may_swap) {
                long mapped_ratio;
                long distress;
                long swap_tendency;
@@ -1790,12 +1298,14 @@ unsigned long shrink_all_memory(unsigned long nr_pages)
        pg_data_t *pgdat;
        unsigned long nr_to_free = nr_pages;
        unsigned long ret = 0;
+       unsigned retry = 2;
        struct reclaim_state reclaim_state = {
                .reclaimed_slab = 0,
        };
 
        current->reclaim_state = &reclaim_state;
-       for_each_pgdat(pgdat) {
+repeat:
+       for_each_online_pgdat(pgdat) {
                unsigned long freed;
 
                freed = balance_pgdat(pgdat, nr_to_free, 0);
@@ -1804,6 +1314,10 @@ unsigned long shrink_all_memory(unsigned long nr_pages)
                if ((long)nr_to_free <= 0)
                        break;
        }
+       if (retry-- && ret < nr_pages) {
+               blk_congestion_wait(WRITE, HZ/5);
+               goto repeat;
+       }
        current->reclaim_state = NULL;
        return ret;
 }
@@ -1814,14 +1328,14 @@ unsigned long shrink_all_memory(unsigned long nr_pages)
    not required for correctness.  So if the last cpu in a node goes
    away, we get changed to run anywhere: as the first one comes back,
    restore their cpu bindings. */
-static int __devinit cpu_callback(struct notifier_block *nfb,
+static int cpu_callback(struct notifier_block *nfb,
                                  unsigned long action, void *hcpu)
 {
        pg_data_t *pgdat;
        cpumask_t mask;
 
        if (action == CPU_ONLINE) {
-               for_each_pgdat(pgdat) {
+               for_each_online_pgdat(pgdat) {
                        mask = node_to_cpumask(pgdat->node_id);
                        if (any_online_cpu(mask) != NR_CPUS)
                                /* One of our CPUs online: restore mask */
@@ -1837,12 +1351,14 @@ static int __init kswapd_init(void)
        pg_data_t *pgdat;
 
        swap_setup();
-       for_each_pgdat(pgdat) {
+       for_each_online_pgdat(pgdat) {
                pid_t pid;
 
                pid = kernel_thread(kswapd, pgdat, CLONE_KERNEL);
                BUG_ON(pid < 0);
+               read_lock(&tasklist_lock);
                pgdat->kswapd = find_task_by_pid(pid);
+               read_unlock(&tasklist_lock);
        }
        total_memory = nr_free_pagecache_pages();
        hotcpu_notifier(cpu_callback, 0);