nfsd: support ext4 i_version
[safe/jmp/linux-2.6] / mm / readahead.c
index c5c8981..133b6d5 100644 (file)
@@ -3,7 +3,7 @@
  *
  * Copyright (C) 2002, Linus Torvalds
  *
- * 09Apr2002   akpm@zip.com.au
+ * 09Apr2002   Andrew Morton
  *             Initial version.
  */
 
 #include <linux/pagevec.h>
 #include <linux/pagemap.h>
 
-void default_unplug_io_fn(struct backing_dev_info *bdi, struct page *page)
-{
-}
-EXPORT_SYMBOL(default_unplug_io_fn);
-
-struct backing_dev_info default_backing_dev_info = {
-       .ra_pages       = VM_MAX_READAHEAD * 1024 / PAGE_CACHE_SIZE,
-       .state          = 0,
-       .capabilities   = BDI_CAP_MAP_COPY,
-       .unplug_io_fn   = default_unplug_io_fn,
-};
-EXPORT_SYMBOL_GPL(default_backing_dev_info);
-
 /*
  * Initialise a struct file's readahead state.  Assumes that the caller has
  * memset *ra to zero.
@@ -44,6 +31,42 @@ EXPORT_SYMBOL_GPL(file_ra_state_init);
 
 #define list_to_page(head) (list_entry((head)->prev, struct page, lru))
 
+/*
+ * see if a page needs releasing upon read_cache_pages() failure
+ * - the caller of read_cache_pages() may have set PG_private or PG_fscache
+ *   before calling, such as the NFS fs marking pages that are cached locally
+ *   on disk, thus we need to give the fs a chance to clean up in the event of
+ *   an error
+ */
+static void read_cache_pages_invalidate_page(struct address_space *mapping,
+                                            struct page *page)
+{
+       if (page_has_private(page)) {
+               if (!trylock_page(page))
+                       BUG();
+               page->mapping = mapping;
+               do_invalidatepage(page, 0);
+               page->mapping = NULL;
+               unlock_page(page);
+       }
+       page_cache_release(page);
+}
+
+/*
+ * release a list of pages, invalidating them first if need be
+ */
+static void read_cache_pages_invalidate_pages(struct address_space *mapping,
+                                             struct list_head *pages)
+{
+       struct page *victim;
+
+       while (!list_empty(pages)) {
+               victim = list_to_page(pages);
+               list_del(&victim->lru);
+               read_cache_pages_invalidate_page(mapping, victim);
+       }
+}
+
 /**
  * read_cache_pages - populate an address space with some pages & start reads against them
  * @mapping: the address_space
@@ -58,28 +81,25 @@ int read_cache_pages(struct address_space *mapping, struct list_head *pages,
                        int (*filler)(void *, struct page *), void *data)
 {
        struct page *page;
-       struct pagevec lru_pvec;
        int ret = 0;
 
-       pagevec_init(&lru_pvec, 0);
-
        while (!list_empty(pages)) {
                page = list_to_page(pages);
                list_del(&page->lru);
-               if (add_to_page_cache(page, mapping, page->index, GFP_KERNEL)) {
-                       page_cache_release(page);
+               if (add_to_page_cache_lru(page, mapping,
+                                       page->index, GFP_KERNEL)) {
+                       read_cache_pages_invalidate_page(mapping, page);
                        continue;
                }
+               page_cache_release(page);
+
                ret = filler(data, page);
-               if (!pagevec_add(&lru_pvec, page))
-                       __pagevec_lru_add(&lru_pvec);
-               if (ret) {
-                       put_pages_list(pages);
+               if (unlikely(ret)) {
+                       read_cache_pages_invalidate_pages(mapping, pages);
                        break;
                }
                task_io_account_read(PAGE_CACHE_SIZE);
        }
-       pagevec_lru_add(&lru_pvec);
        return ret;
 }
 
@@ -89,7 +109,6 @@ static int read_pages(struct address_space *mapping, struct file *filp,
                struct list_head *pages, unsigned nr_pages)
 {
        unsigned page_idx;
-       struct pagevec lru_pvec;
        int ret;
 
        if (mapping->a_ops->readpages) {
@@ -99,19 +118,15 @@ static int read_pages(struct address_space *mapping, struct file *filp,
                goto out;
        }
 
-       pagevec_init(&lru_pvec, 0);
        for (page_idx = 0; page_idx < nr_pages; page_idx++) {
                struct page *page = list_to_page(pages);
                list_del(&page->lru);
-               if (!add_to_page_cache(page, mapping,
+               if (!add_to_page_cache_lru(page, mapping,
                                        page->index, GFP_KERNEL)) {
                        mapping->a_ops->readpage(filp, page);
-                       if (!pagevec_add(&lru_pvec, page))
-                               __pagevec_lru_add(&lru_pvec);
-               } else
-                       page_cache_release(page);
+               }
+               page_cache_release(page);
        }
-       pagevec_lru_add(&lru_pvec);
        ret = 0;
 out:
        return ret;
@@ -237,7 +252,7 @@ int do_page_cache_readahead(struct address_space *mapping, struct file *filp,
  */
 unsigned long max_sane_readahead(unsigned long nr)
 {
-       return min(nr, (node_page_state(numa_node_id(), NR_INACTIVE)
+       return min(nr, (node_page_state(numa_node_id(), NR_INACTIVE_FILE)
                + node_page_state(numa_node_id(), NR_FREE_PAGES)) / 2);
 }
 
@@ -378,9 +393,9 @@ ondemand_readahead(struct address_space *mapping,
        if (hit_readahead_marker) {
                pgoff_t start;
 
-               read_lock_irq(&mapping->tree_lock);
-               start = radix_tree_next_hole(&mapping->page_tree, offset, max+1);
-               read_unlock_irq(&mapping->tree_lock);
+               rcu_read_lock();
+               start = radix_tree_next_hole(&mapping->page_tree, offset,max+1);
+               rcu_read_unlock();
 
                if (!start || start - offset > max)
                        return 0;
@@ -445,9 +460,10 @@ EXPORT_SYMBOL_GPL(page_cache_sync_readahead);
  *            pagecache pages
  *
  * page_cache_async_ondemand() should be called when a page is used which
- * has the PG_readahead flag: this is a marker to suggest that the application
+ * has the PG_readahead flag; this is a marker to suggest that the application
  * has used up enough of the readahead window that we should start pulling in
- * more pages. */
+ * more pages.
+ */
 void
 page_cache_async_readahead(struct address_space *mapping,
                           struct file_ra_state *ra, struct file *filp,