FS-Cache: Release page->private after failed readahead
[safe/jmp/linux-2.6] / mm / filemap.c
index 455119c..126d397 100644 (file)
 #include <linux/backing-dev.h>
 #include <linux/pagevec.h>
 #include <linux/blkdev.h>
-#include <linux/backing-dev.h>
 #include <linux/security.h>
 #include <linux/syscalls.h>
 #include <linux/cpuset.h>
 #include <linux/hardirq.h> /* for BUG_ON(!in_atomic()) only */
+#include <linux/memcontrol.h>
+#include <linux/mm_inline.h> /* for page_is_file_cache() */
 #include "internal.h"
 
 /*
@@ -42,9 +43,6 @@
 
 #include <asm/mman.h>
 
-static ssize_t
-generic_file_direct_IO(int rw, struct kiocb *iocb, const struct iovec *iov,
-       loff_t offset, unsigned long nr_segs);
 
 /*
  * Shared mappings implemented 30.11.1994. It's not fully working yet,
@@ -65,7 +63,6 @@ generic_file_direct_IO(int rw, struct kiocb *iocb, const struct iovec *iov,
  *    ->private_lock           (__free_pte->__set_page_dirty_buffers)
  *      ->swap_lock            (exclusive_swap_page, others)
  *        ->mapping->tree_lock
- *          ->zone.lock
  *
  *  ->i_mutex
  *    ->i_mmap_lock            (truncate->unmap_mapping_range)
@@ -113,7 +110,7 @@ generic_file_direct_IO(int rw, struct kiocb *iocb, const struct iovec *iov,
 /*
  * Remove a page from the page cache and free it. Caller has to make
  * sure the page is locked and that nobody else uses it - or that usage
- * is safe.  The caller must hold a write_lock on the mapping's tree_lock.
+ * is safe.  The caller must hold the mapping's tree_lock.
  */
 void __remove_from_page_cache(struct page *page)
 {
@@ -124,6 +121,19 @@ void __remove_from_page_cache(struct page *page)
        mapping->nrpages--;
        __dec_zone_page_state(page, NR_FILE_PAGES);
        BUG_ON(page_mapped(page));
+       mem_cgroup_uncharge_cache_page(page);
+
+       /*
+        * Some filesystems seem to re-dirty the page even after
+        * the VM has canceled the dirty bit (eg ext3 journaling).
+        *
+        * Fix it up by doing a final dirty accounting check after
+        * having removed the page entirely.
+        */
+       if (PageDirty(page) && mapping_cap_account_dirty(mapping)) {
+               dec_zone_page_state(page, NR_FILE_DIRTY);
+               dec_bdi_stat(mapping->backing_dev_info, BDI_RECLAIMABLE);
+       }
 }
 
 void remove_from_page_cache(struct page *page)
@@ -132,9 +142,9 @@ void remove_from_page_cache(struct page *page)
 
        BUG_ON(!PageLocked(page));
 
-       write_lock_irq(&mapping->tree_lock);
+       spin_lock_irq(&mapping->tree_lock);
        __remove_from_page_cache(page);
-       write_unlock_irq(&mapping->tree_lock);
+       spin_unlock_irq(&mapping->tree_lock);
 }
 
 static int sync_page(void *word)
@@ -200,7 +210,7 @@ int __filemap_fdatawrite_range(struct address_space *mapping, loff_t start,
        int ret;
        struct writeback_control wbc = {
                .sync_mode = sync_mode,
-               .nr_to_write = mapping->nrpages * 2,
+               .nr_to_write = LONG_MAX,
                .range_start = start,
                .range_end = end,
        };
@@ -224,11 +234,12 @@ int filemap_fdatawrite(struct address_space *mapping)
 }
 EXPORT_SYMBOL(filemap_fdatawrite);
 
-static int filemap_fdatawrite_range(struct address_space *mapping, loff_t start,
+int filemap_fdatawrite_range(struct address_space *mapping, loff_t start,
                                loff_t end)
 {
        return __filemap_fdatawrite_range(mapping, start, end, WB_SYNC_ALL);
 }
+EXPORT_SYMBOL(filemap_fdatawrite_range);
 
 /**
  * filemap_flush - mostly a non-blocking flush
@@ -331,7 +342,7 @@ int sync_page_range(struct inode *inode, struct address_space *mapping,
 EXPORT_SYMBOL(sync_page_range);
 
 /**
- * sync_page_range_nolock
+ * sync_page_range_nolock - write & wait on all pages in the passed range without locking
  * @inode:     target inode
  * @mapping:   target address_space
  * @pos:       beginning offset in pages to write
@@ -432,47 +443,74 @@ int filemap_write_and_wait_range(struct address_space *mapping,
 }
 
 /**
- * add_to_page_cache - add newly allocated pagecache pages
+ * add_to_page_cache_locked - add a locked page to the pagecache
  * @page:      page to add
  * @mapping:   the page's address_space
  * @offset:    page index
  * @gfp_mask:  page allocation mode
  *
- * This function is used to add newly allocated pagecache pages;
- * the page is new, so we can just run SetPageLocked() against it.
- * The other page state flags were set by rmqueue().
- *
+ * This function is used to add a page to the pagecache. It must be locked.
  * This function does not add the page to the LRU.  The caller must do that.
  */
-int add_to_page_cache(struct page *page, struct address_space *mapping,
+int add_to_page_cache_locked(struct page *page, struct address_space *mapping,
                pgoff_t offset, gfp_t gfp_mask)
 {
-       int error = radix_tree_preload(gfp_mask & ~__GFP_HIGHMEM);
+       int error;
+
+       VM_BUG_ON(!PageLocked(page));
+
+       error = mem_cgroup_cache_charge(page, current->mm,
+                                       gfp_mask & GFP_RECLAIM_MASK);
+       if (error)
+               goto out;
 
+       error = radix_tree_preload(gfp_mask & ~__GFP_HIGHMEM);
        if (error == 0) {
-               write_lock_irq(&mapping->tree_lock);
+               page_cache_get(page);
+               page->mapping = mapping;
+               page->index = offset;
+
+               spin_lock_irq(&mapping->tree_lock);
                error = radix_tree_insert(&mapping->page_tree, offset, page);
-               if (!error) {
-                       page_cache_get(page);
-                       SetPageLocked(page);
-                       page->mapping = mapping;
-                       page->index = offset;
+               if (likely(!error)) {
                        mapping->nrpages++;
                        __inc_zone_page_state(page, NR_FILE_PAGES);
+               } else {
+                       page->mapping = NULL;
+                       mem_cgroup_uncharge_cache_page(page);
+                       page_cache_release(page);
                }
-               write_unlock_irq(&mapping->tree_lock);
+
+               spin_unlock_irq(&mapping->tree_lock);
                radix_tree_preload_end();
-       }
+       } else
+               mem_cgroup_uncharge_cache_page(page);
+out:
        return error;
 }
-EXPORT_SYMBOL(add_to_page_cache);
+EXPORT_SYMBOL(add_to_page_cache_locked);
 
 int add_to_page_cache_lru(struct page *page, struct address_space *mapping,
                                pgoff_t offset, gfp_t gfp_mask)
 {
-       int ret = add_to_page_cache(page, mapping, offset, gfp_mask);
-       if (ret == 0)
-               lru_cache_add(page);
+       int ret;
+
+       /*
+        * Splice_read and readahead add shmem/tmpfs pages into the page cache
+        * before shmem_readpage has a chance to mark them as SwapBacked: they
+        * need to go on the active_anon lru below, and mem_cgroup_cache_charge
+        * (called in add_to_page_cache) needs to know where they're going too.
+        */
+       if (mapping_cap_swap_backed(mapping))
+               SetPageSwapBacked(page);
+
+       ret = add_to_page_cache(page, mapping, offset, gfp_mask);
+       if (ret == 0) {
+               if (page_is_file_cache(page))
+                       lru_cache_add_file(page);
+               else
+                       lru_cache_add_active_anon(page);
+       }
        return ret;
 }
 
@@ -516,7 +554,7 @@ static inline void wake_up_page(struct page *page, int bit)
        __wake_up_bit(page_waitqueue(page), &page->flags, bit);
 }
 
-void fastcall wait_on_page_bit(struct page *page, int bit_nr)
+void wait_on_page_bit(struct page *page, int bit_nr)
 {
        DEFINE_WAIT_BIT(wait, &page->flags, bit_nr);
 
@@ -535,17 +573,14 @@ EXPORT_SYMBOL(wait_on_page_bit);
  * mechananism between PageLocked pages and PageWriteback pages is shared.
  * But that's OK - sleepers in wait_on_page_writeback() just go back to sleep.
  *
- * The first mb is necessary to safely close the critical section opened by the
- * TestSetPageLocked(), the second mb is necessary to enforce ordering between
- * the clear_bit and the read of the waitqueue (to avoid SMP races with a
- * parallel wait_on_page_locked()).
+ * The mb is necessary to enforce ordering between the clear_bit and the read
+ * of the waitqueue (to avoid SMP races with a parallel wait_on_page_locked()).
  */
-void fastcall unlock_page(struct page *page)
+void unlock_page(struct page *page)
 {
-       smp_mb__before_clear_bit();
-       if (!TestClearPageLocked(page))
-               BUG();
-       smp_mb__after_clear_bit(); 
+       VM_BUG_ON(!PageLocked(page));
+       clear_bit_unlock(PG_locked, &page->flags);
+       smp_mb__after_clear_bit();
        wake_up_page(page, PG_locked);
 }
 EXPORT_SYMBOL(unlock_page);
@@ -556,10 +591,12 @@ EXPORT_SYMBOL(unlock_page);
  */
 void end_page_writeback(struct page *page)
 {
-       if (!TestClearPageReclaim(page) || rotate_reclaimable_page(page)) {
-               if (!test_clear_page_writeback(page))
-                       BUG();
-       }
+       if (TestClearPageReclaim(page))
+               rotate_reclaimable_page(page);
+
+       if (!test_clear_page_writeback(page))
+               BUG();
+
        smp_mb__after_clear_bit();
        wake_up_page(page, PG_writeback);
 }
@@ -574,7 +611,7 @@ EXPORT_SYMBOL(end_page_writeback);
  * chances are that on the second loop, the block layer's plug list is empty,
  * so sync_page() will then return in state TASK_UNINTERRUPTIBLE.
  */
-void fastcall __lock_page(struct page *page)
+void __lock_page(struct page *page)
 {
        DEFINE_WAIT_BIT(wait, &page->flags, PG_locked);
 
@@ -583,7 +620,7 @@ void fastcall __lock_page(struct page *page)
 }
 EXPORT_SYMBOL(__lock_page);
 
-int fastcall __lock_page_killable(struct page *page)
+int __lock_page_killable(struct page *page)
 {
        DEFINE_WAIT_BIT(wait, &page->flags, PG_locked);
 
@@ -591,11 +628,14 @@ int fastcall __lock_page_killable(struct page *page)
                                        sync_page_killable, TASK_KILLABLE);
 }
 
-/*
+/**
+ * __lock_page_nosync - get a lock on the page, without calling sync_page()
+ * @page: the page to lock
+ *
  * Variant of lock_page that does not require the caller to hold a reference
  * on the page's mapping.
  */
-void fastcall __lock_page_nosync(struct page *page)
+void __lock_page_nosync(struct page *page)
 {
        DEFINE_WAIT_BIT(wait, &page->flags, PG_locked);
        __wait_on_bit_lock(page_waitqueue(page), &wait, __sleep_on_page_lock,
@@ -610,15 +650,35 @@ void fastcall __lock_page_nosync(struct page *page)
  * Is there a pagecache struct page at the given (mapping, offset) tuple?
  * If yes, increment its refcount and return it; if no, return NULL.
  */
-struct page * find_get_page(struct address_space *mapping, pgoff_t offset)
+struct page *find_get_page(struct address_space *mapping, pgoff_t offset)
 {
+       void **pagep;
        struct page *page;
 
-       read_lock_irq(&mapping->tree_lock);
-       page = radix_tree_lookup(&mapping->page_tree, offset);
-       if (page)
-               page_cache_get(page);
-       read_unlock_irq(&mapping->tree_lock);
+       rcu_read_lock();
+repeat:
+       page = NULL;
+       pagep = radix_tree_lookup_slot(&mapping->page_tree, offset);
+       if (pagep) {
+               page = radix_tree_deref_slot(pagep);
+               if (unlikely(!page || page == RADIX_TREE_RETRY))
+                       goto repeat;
+
+               if (!page_cache_get_speculative(page))
+                       goto repeat;
+
+               /*
+                * Has the page moved?
+                * This is part of the lockless pagecache protocol. See
+                * include/linux/pagemap.h for details.
+                */
+               if (unlikely(page != *pagep)) {
+                       page_cache_release(page);
+                       goto repeat;
+               }
+       }
+       rcu_read_unlock();
+
        return page;
 }
 EXPORT_SYMBOL(find_get_page);
@@ -633,32 +693,22 @@ EXPORT_SYMBOL(find_get_page);
  *
  * Returns zero if the page was not present. find_lock_page() may sleep.
  */
-struct page *find_lock_page(struct address_space *mapping,
-                               pgoff_t offset)
+struct page *find_lock_page(struct address_space *mapping, pgoff_t offset)
 {
        struct page *page;
 
 repeat:
-       read_lock_irq(&mapping->tree_lock);
-       page = radix_tree_lookup(&mapping->page_tree, offset);
+       page = find_get_page(mapping, offset);
        if (page) {
-               page_cache_get(page);
-               if (TestSetPageLocked(page)) {
-                       read_unlock_irq(&mapping->tree_lock);
-                       __lock_page(page);
-
-                       /* Has the page been truncated while we slept? */
-                       if (unlikely(page->mapping != mapping)) {
-                               unlock_page(page);
-                               page_cache_release(page);
-                               goto repeat;
-                       }
-                       VM_BUG_ON(page->index != offset);
-                       goto out;
+               lock_page(page);
+               /* Has the page been truncated? */
+               if (unlikely(page->mapping != mapping)) {
+                       unlock_page(page);
+                       page_cache_release(page);
+                       goto repeat;
                }
+               VM_BUG_ON(page->index != offset);
        }
-       read_unlock_irq(&mapping->tree_lock);
-out:
        return page;
 }
 EXPORT_SYMBOL(find_lock_page);
@@ -691,7 +741,14 @@ repeat:
                page = __page_cache_alloc(gfp_mask);
                if (!page)
                        return NULL;
-               err = add_to_page_cache_lru(page, mapping, index, gfp_mask);
+               /*
+                * We want a regular kernel memory (not highmem or DMA etc)
+                * allocation for the radix tree nodes, but we need to honour
+                * the context-specific requirements the caller has asked for.
+                * GFP_RECLAIM_MASK collects those requirements.
+                */
+               err = add_to_page_cache_lru(page, mapping, index,
+                       (gfp_mask & GFP_RECLAIM_MASK));
                if (unlikely(err)) {
                        page_cache_release(page);
                        page = NULL;
@@ -724,13 +781,39 @@ unsigned find_get_pages(struct address_space *mapping, pgoff_t start,
 {
        unsigned int i;
        unsigned int ret;
+       unsigned int nr_found;
+
+       rcu_read_lock();
+restart:
+       nr_found = radix_tree_gang_lookup_slot(&mapping->page_tree,
+                               (void ***)pages, start, nr_pages);
+       ret = 0;
+       for (i = 0; i < nr_found; i++) {
+               struct page *page;
+repeat:
+               page = radix_tree_deref_slot((void **)pages[i]);
+               if (unlikely(!page))
+                       continue;
+               /*
+                * this can only trigger if nr_found == 1, making livelock
+                * a non issue.
+                */
+               if (unlikely(page == RADIX_TREE_RETRY))
+                       goto restart;
+
+               if (!page_cache_get_speculative(page))
+                       goto repeat;
+
+               /* Has the page moved? */
+               if (unlikely(page != *((void **)pages[i]))) {
+                       page_cache_release(page);
+                       goto repeat;
+               }
 
-       read_lock_irq(&mapping->tree_lock);
-       ret = radix_tree_gang_lookup(&mapping->page_tree,
-                               (void **)pages, start, nr_pages);
-       for (i = 0; i < ret; i++)
-               page_cache_get(pages[i]);
-       read_unlock_irq(&mapping->tree_lock);
+               pages[ret] = page;
+               ret++;
+       }
+       rcu_read_unlock();
        return ret;
 }
 
@@ -751,19 +834,44 @@ unsigned find_get_pages_contig(struct address_space *mapping, pgoff_t index,
 {
        unsigned int i;
        unsigned int ret;
+       unsigned int nr_found;
+
+       rcu_read_lock();
+restart:
+       nr_found = radix_tree_gang_lookup_slot(&mapping->page_tree,
+                               (void ***)pages, index, nr_pages);
+       ret = 0;
+       for (i = 0; i < nr_found; i++) {
+               struct page *page;
+repeat:
+               page = radix_tree_deref_slot((void **)pages[i]);
+               if (unlikely(!page))
+                       continue;
+               /*
+                * this can only trigger if nr_found == 1, making livelock
+                * a non issue.
+                */
+               if (unlikely(page == RADIX_TREE_RETRY))
+                       goto restart;
 
-       read_lock_irq(&mapping->tree_lock);
-       ret = radix_tree_gang_lookup(&mapping->page_tree,
-                               (void **)pages, index, nr_pages);
-       for (i = 0; i < ret; i++) {
-               if (pages[i]->mapping == NULL || pages[i]->index != index)
+               if (page->mapping == NULL || page->index != index)
                        break;
 
-               page_cache_get(pages[i]);
+               if (!page_cache_get_speculative(page))
+                       goto repeat;
+
+               /* Has the page moved? */
+               if (unlikely(page != *((void **)pages[i]))) {
+                       page_cache_release(page);
+                       goto repeat;
+               }
+
+               pages[ret] = page;
+               ret++;
                index++;
        }
-       read_unlock_irq(&mapping->tree_lock);
-       return i;
+       rcu_read_unlock();
+       return ret;
 }
 EXPORT_SYMBOL(find_get_pages_contig);
 
@@ -783,15 +891,43 @@ unsigned find_get_pages_tag(struct address_space *mapping, pgoff_t *index,
 {
        unsigned int i;
        unsigned int ret;
+       unsigned int nr_found;
+
+       rcu_read_lock();
+restart:
+       nr_found = radix_tree_gang_lookup_tag_slot(&mapping->page_tree,
+                               (void ***)pages, *index, nr_pages, tag);
+       ret = 0;
+       for (i = 0; i < nr_found; i++) {
+               struct page *page;
+repeat:
+               page = radix_tree_deref_slot((void **)pages[i]);
+               if (unlikely(!page))
+                       continue;
+               /*
+                * this can only trigger if nr_found == 1, making livelock
+                * a non issue.
+                */
+               if (unlikely(page == RADIX_TREE_RETRY))
+                       goto restart;
+
+               if (!page_cache_get_speculative(page))
+                       goto repeat;
+
+               /* Has the page moved? */
+               if (unlikely(page != *((void **)pages[i]))) {
+                       page_cache_release(page);
+                       goto repeat;
+               }
+
+               pages[ret] = page;
+               ret++;
+       }
+       rcu_read_unlock();
 
-       read_lock_irq(&mapping->tree_lock);
-       ret = radix_tree_gang_lookup_tag(&mapping->page_tree,
-                               (void **)pages, *index, nr_pages, tag);
-       for (i = 0; i < ret; i++)
-               page_cache_get(pages[i]);
        if (ret)
                *index = pages[ret - 1]->index + 1;
-       read_unlock_irq(&mapping->tree_lock);
+
        return ret;
 }
 EXPORT_SYMBOL(find_get_pages_tag);
@@ -815,13 +951,13 @@ grab_cache_page_nowait(struct address_space *mapping, pgoff_t index)
        struct page *page = find_get_page(mapping, index);
 
        if (page) {
-               if (!TestSetPageLocked(page))
+               if (trylock_page(page))
                        return page;
                page_cache_release(page);
                return NULL;
        }
        page = __page_cache_alloc(mapping_gfp_mask(mapping) & ~__GFP_FS);
-       if (page && add_to_page_cache_lru(page, mapping, index, GFP_KERNEL)) {
+       if (page && add_to_page_cache_lru(page, mapping, index, GFP_NOFS)) {
                page_cache_release(page);
                page = NULL;
        }
@@ -854,9 +990,7 @@ static void shrink_readahead_size_eio(struct file *filp,
 }
 
 /**
- * do_generic_mapping_read - generic file read routine
- * @mapping:   address_space to be read
- * @ra:                file's readahead state
+ * do_generic_file_read - generic file read routine
  * @filp:      the file to read
  * @ppos:      current file position
  * @desc:      read_descriptor
@@ -867,18 +1001,13 @@ static void shrink_readahead_size_eio(struct file *filp,
  *
  * This is really ugly. But the goto's actually try to clarify some
  * of the logic when it comes to error handling etc.
- *
- * Note the struct file* is only passed for the use of readpage.
- * It may be NULL.
  */
-void do_generic_mapping_read(struct address_space *mapping,
-                            struct file_ra_state *ra,
-                            struct file *filp,
-                            loff_t *ppos,
-                            read_descriptor_t *desc,
-                            read_actor_t actor)
+static void do_generic_file_read(struct file *filp, loff_t *ppos,
+               read_descriptor_t *desc, read_actor_t actor)
 {
+       struct address_space *mapping = filp->f_mapping;
        struct inode *inode = mapping->host;
+       struct file_ra_state *ra = &filp->f_ra;
        pgoff_t index;
        pgoff_t last_index;
        pgoff_t prev_index;
@@ -914,8 +1043,17 @@ find_page:
                                        ra, filp, page,
                                        index, last_index - index);
                }
-               if (!PageUptodate(page))
-                       goto page_not_up_to_date;
+               if (!PageUptodate(page)) {
+                       if (inode->i_blkbits == PAGE_CACHE_SHIFT ||
+                                       !mapping->a_ops->is_partially_uptodate)
+                               goto page_not_up_to_date;
+                       if (!trylock_page(page))
+                               goto page_not_up_to_date;
+                       if (!mapping->a_ops->is_partially_uptodate(page,
+                                                               desc, offset))
+                               goto page_not_up_to_date_locked;
+                       unlock_page(page);
+               }
 page_ok:
                /*
                 * i_size must be checked after we know the page is Uptodate.
@@ -982,9 +1120,11 @@ page_ok:
 
 page_not_up_to_date:
                /* Get exclusive access to the page ... */
-               if (lock_page_killable(page))
-                       goto readpage_eio;
+               error = lock_page_killable(page);
+               if (unlikely(error))
+                       goto readpage_error;
 
+page_not_up_to_date_locked:
                /* Did it get truncated before we got the lock? */
                if (!page->mapping) {
                        unlock_page(page);
@@ -1011,8 +1151,9 @@ readpage:
                }
 
                if (!PageUptodate(page)) {
-                       if (lock_page_killable(page))
-                               goto readpage_eio;
+                       error = lock_page_killable(page);
+                       if (unlikely(error))
+                               goto readpage_error;
                        if (!PageUptodate(page)) {
                                if (page->mapping == NULL) {
                                        /*
@@ -1024,15 +1165,14 @@ readpage:
                                }
                                unlock_page(page);
                                shrink_readahead_size_eio(filp, ra);
-                               goto readpage_eio;
+                               error = -EIO;
+                               goto readpage_error;
                        }
                        unlock_page(page);
                }
 
                goto page_ok;
 
-readpage_eio:
-               error = -EIO;
 readpage_error:
                /* UHHUH! A synchronous read error occurred. Report it */
                desc->error = error;
@@ -1067,10 +1207,8 @@ out:
        ra->prev_pos |= prev_offset;
 
        *ppos = ((loff_t)index << PAGE_CACHE_SHIFT) + offset;
-       if (filp)
-               file_accessed(filp);
+       file_accessed(filp);
 }
-EXPORT_SYMBOL(do_generic_mapping_read);
 
 int file_read_actor(read_descriptor_t *desc, struct page *page,
                        unsigned long offset, unsigned long size)
@@ -1182,42 +1320,42 @@ generic_file_aio_read(struct kiocb *iocb, const struct iovec *iov,
 
                mapping = filp->f_mapping;
                inode = mapping->host;
-               retval = 0;
                if (!count)
                        goto out; /* skip atime */
                size = i_size_read(inode);
                if (pos < size) {
-                       retval = generic_file_direct_IO(READ, iocb,
-                                               iov, pos, nr_segs);
+                       retval = filemap_write_and_wait_range(mapping, pos,
+                                       pos + iov_length(iov, nr_segs) - 1);
+                       if (!retval) {
+                               retval = mapping->a_ops->direct_IO(READ, iocb,
+                                                       iov, pos, nr_segs);
+                       }
                        if (retval > 0)
                                *ppos = pos + retval;
-               }
-               if (likely(retval != 0)) {
-                       file_accessed(filp);
-                       goto out;
+                       if (retval) {
+                               file_accessed(filp);
+                               goto out;
+                       }
                }
        }
 
-       retval = 0;
-       if (count) {
-               for (seg = 0; seg < nr_segs; seg++) {
-                       read_descriptor_t desc;
+       for (seg = 0; seg < nr_segs; seg++) {
+               read_descriptor_t desc;
 
-                       desc.written = 0;
-                       desc.arg.buf = iov[seg].iov_base;
-                       desc.count = iov[seg].iov_len;
-                       if (desc.count == 0)
-                               continue;
-                       desc.error = 0;
-                       do_generic_file_read(filp,ppos,&desc,file_read_actor);
-                       retval += desc.written;
-                       if (desc.error) {
-                               retval = retval ?: desc.error;
-                               break;
-                       }
-                       if (desc.count > 0)
-                               break;
+               desc.written = 0;
+               desc.arg.buf = iov[seg].iov_base;
+               desc.count = iov[seg].iov_len;
+               if (desc.count == 0)
+                       continue;
+               desc.error = 0;
+               do_generic_file_read(filp, ppos, &desc, file_read_actor);
+               retval += desc.written;
+               if (desc.error) {
+                       retval = retval ?: desc.error;
+                       break;
                }
+               if (desc.count > 0)
+                       break;
        }
 out:
        return retval;
@@ -1236,7 +1374,7 @@ do_readahead(struct address_space *mapping, struct file *filp,
        return 0;
 }
 
-asmlinkage ssize_t sys_readahead(int fd, loff_t offset, size_t count)
+SYSCALL_DEFINE(readahead)(int fd, loff_t offset, size_t count)
 {
        ssize_t ret;
        struct file *file;
@@ -1255,6 +1393,13 @@ asmlinkage ssize_t sys_readahead(int fd, loff_t offset, size_t count)
        }
        return ret;
 }
+#ifdef CONFIG_HAVE_SYSCALL_WRAPPERS
+asmlinkage long SyS_readahead(long fd, loff_t offset, long count)
+{
+       return SYSC_readahead((int) fd, offset, (size_t) count);
+}
+SYSCALL_ALIAS(sys_readahead, SyS_readahead);
+#endif
 
 #ifdef CONFIG_MMU
 /**
@@ -1265,7 +1410,7 @@ asmlinkage ssize_t sys_readahead(int fd, loff_t offset, size_t count)
  * This adds the requested page to the page cache if it isn't already there,
  * and schedules an I/O to read in its contents from disk.
  */
-static int fastcall page_cache_read(struct file * file, pgoff_t offset)
+static int page_cache_read(struct file *file, pgoff_t offset)
 {
        struct address_space *mapping = file->f_mapping;
        struct page *page; 
@@ -1311,7 +1456,7 @@ int filemap_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
        struct file_ra_state *ra = &file->f_ra;
        struct inode *inode = mapping->host;
        struct page *page;
-       unsigned long size;
+       pgoff_t size;
        int did_readaround = 0;
        int ret = 0;
 
@@ -1400,7 +1545,6 @@ retry_find:
        /*
         * Found the page and have a reference on it.
         */
-       mark_page_accessed(page);
        ra->prev_pos = (loff_t)page->index << PAGE_CACHE_SHIFT;
        vmf->page = page;
        return ret | VM_FAULT_LOCKED;
@@ -1444,6 +1588,11 @@ page_not_uptodate:
         */
        ClearPageError(page);
        error = mapping->a_ops->readpage(file, page);
+       if (!error) {
+               wait_on_page_locked(page);
+               if (!PageUptodate(page))
+                       error = -EIO;
+       }
        page_cache_release(page);
 
        if (!error || error == AOP_TRUNCATED_PAGE)
@@ -1526,9 +1675,20 @@ repeat:
        return page;
 }
 
-/*
+/**
+ * read_cache_page_async - read into page cache, fill it if needed
+ * @mapping:   the page's address_space
+ * @index:     the page index
+ * @filler:    function to perform the read
+ * @data:      destination for read data
+ *
  * Same as read_cache_page, but don't wait for page to become unlocked
  * after submitting it to the filler.
+ *
+ * Read into the page cache. If a page already exists, and PageUptodate() is
+ * not set, try to fill the page but don't wait for it to become unlocked.
+ *
+ * If the page does not get brought uptodate, return -EIO.
  */
 struct page *read_cache_page_async(struct address_space *mapping,
                                pgoff_t index,
@@ -1620,14 +1780,14 @@ int should_remove_suid(struct dentry *dentry)
        if (unlikely((mode & S_ISGID) && (mode & S_IXGRP)))
                kill |= ATTR_KILL_SGID;
 
-       if (unlikely(kill && !capable(CAP_FSETID)))
+       if (unlikely(kill && !capable(CAP_FSETID) && S_ISREG(mode)))
                return kill;
 
        return 0;
 }
 EXPORT_SYMBOL(should_remove_suid);
 
-int __remove_suid(struct dentry *dentry, int kill)
+static int __remove_suid(struct dentry *dentry, int kill)
 {
        struct iattr newattrs;
 
@@ -1635,8 +1795,9 @@ int __remove_suid(struct dentry *dentry, int kill)
        return notify_change(dentry, &newattrs);
 }
 
-int remove_suid(struct dentry *dentry)
+int file_remove_suid(struct file *file)
 {
+       struct dentry *dentry = file->f_path.dentry;
        int killsuid = should_remove_suid(dentry);
        int killpriv = security_inode_need_killpriv(dentry);
        int error = 0;
@@ -1650,7 +1811,7 @@ int remove_suid(struct dentry *dentry)
 
        return error;
 }
-EXPORT_SYMBOL(remove_suid);
+EXPORT_SYMBOL(file_remove_suid);
 
 static size_t __iovec_copy_from_user_inatomic(char *vaddr,
                        const struct iovec *iov, size_t base, size_t bytes)
@@ -1662,7 +1823,7 @@ static size_t __iovec_copy_from_user_inatomic(char *vaddr,
                int copy = min(bytes, iov->iov_len - base);
 
                base = 0;
-               left = __copy_from_user_inatomic_nocache(vaddr, buf, copy);
+               left = __copy_from_user_inatomic(vaddr, buf, copy);
                copied += copy;
                bytes -= copy;
                vaddr += copy;
@@ -1690,8 +1851,7 @@ size_t iov_iter_copy_from_user_atomic(struct page *page,
        if (likely(i->nr_segs == 1)) {
                int left;
                char __user *buf = i->iov->iov_base + i->iov_offset;
-               left = __copy_from_user_inatomic_nocache(kaddr + offset,
-                                                       buf, bytes);
+               left = __copy_from_user_inatomic(kaddr + offset, buf, bytes);
                copied = bytes - left;
        } else {
                copied = __iovec_copy_from_user_inatomic(kaddr + offset,
@@ -1719,7 +1879,7 @@ size_t iov_iter_copy_from_user(struct page *page,
        if (likely(i->nr_segs == 1)) {
                int left;
                char __user *buf = i->iov->iov_base + i->iov_offset;
-               left = __copy_from_user_nocache(kaddr + offset, buf, bytes);
+               left = __copy_from_user(kaddr + offset, buf, bytes);
                copied = bytes - left;
        } else {
                copied = __iovec_copy_from_user_inatomic(kaddr + offset,
@@ -1730,17 +1890,27 @@ size_t iov_iter_copy_from_user(struct page *page,
 }
 EXPORT_SYMBOL(iov_iter_copy_from_user);
 
-static void __iov_iter_advance_iov(struct iov_iter *i, size_t bytes)
+void iov_iter_advance(struct iov_iter *i, size_t bytes)
 {
+       BUG_ON(i->count < bytes);
+
        if (likely(i->nr_segs == 1)) {
                i->iov_offset += bytes;
+               i->count -= bytes;
        } else {
                const struct iovec *iov = i->iov;
                size_t base = i->iov_offset;
 
-               while (bytes) {
-                       int copy = min(bytes, iov->iov_len - base);
+               /*
+                * The !iov->iov_len check ensures we skip over unlikely
+                * zero-length segments (without overruning the iovec).
+                */
+               while (bytes || unlikely(i->count && !iov->iov_len)) {
+                       int copy;
 
+                       copy = min(bytes, iov->iov_len - base);
+                       BUG_ON(!i->count || i->count < copy);
+                       i->count -= copy;
                        bytes -= copy;
                        base += copy;
                        if (iov->iov_len == base) {
@@ -1752,14 +1922,6 @@ static void __iov_iter_advance_iov(struct iov_iter *i, size_t bytes)
                i->iov_offset = base;
        }
 }
-
-void iov_iter_advance(struct iov_iter *i, size_t bytes)
-{
-       BUG_ON(i->count < bytes);
-
-       __iov_iter_advance_iov(i, bytes);
-       i->count -= bytes;
-}
 EXPORT_SYMBOL(iov_iter_advance);
 
 /*
@@ -1880,48 +2042,8 @@ int pagecache_write_begin(struct file *file, struct address_space *mapping,
 {
        const struct address_space_operations *aops = mapping->a_ops;
 
-       if (aops->write_begin) {
-               return aops->write_begin(file, mapping, pos, len, flags,
+       return aops->write_begin(file, mapping, pos, len, flags,
                                                        pagep, fsdata);
-       } else {
-               int ret;
-               pgoff_t index = pos >> PAGE_CACHE_SHIFT;
-               unsigned offset = pos & (PAGE_CACHE_SIZE - 1);
-               struct inode *inode = mapping->host;
-               struct page *page;
-again:
-               page = __grab_cache_page(mapping, index);
-               *pagep = page;
-               if (!page)
-                       return -ENOMEM;
-
-               if (flags & AOP_FLAG_UNINTERRUPTIBLE && !PageUptodate(page)) {
-                       /*
-                        * There is no way to resolve a short write situation
-                        * for a !Uptodate page (except by double copying in
-                        * the caller done by generic_perform_write_2copy).
-                        *
-                        * Instead, we have to bring it uptodate here.
-                        */
-                       ret = aops->readpage(file, page);
-                       page_cache_release(page);
-                       if (ret) {
-                               if (ret == AOP_TRUNCATED_PAGE)
-                                       goto again;
-                               return ret;
-                       }
-                       goto again;
-               }
-
-               ret = aops->prepare_write(file, page, offset, offset+len);
-               if (ret) {
-                       unlock_page(page);
-                       page_cache_release(page);
-                       if (pos + len > inode->i_size)
-                               vmtruncate(inode, inode->i_size);
-               }
-               return ret;
-       }
 }
 EXPORT_SYMBOL(pagecache_write_begin);
 
@@ -1930,32 +2052,9 @@ int pagecache_write_end(struct file *file, struct address_space *mapping,
                                struct page *page, void *fsdata)
 {
        const struct address_space_operations *aops = mapping->a_ops;
-       int ret;
-
-       if (aops->write_end) {
-               mark_page_accessed(page);
-               ret = aops->write_end(file, mapping, pos, len, copied,
-                                                       page, fsdata);
-       } else {
-               unsigned offset = pos & (PAGE_CACHE_SIZE - 1);
-               struct inode *inode = mapping->host;
-
-               flush_dcache_page(page);
-               ret = aops->commit_write(file, page, offset, offset+len);
-               unlock_page(page);
-               mark_page_accessed(page);
-               page_cache_release(page);
-
-               if (ret < 0) {
-                       if (pos + len > inode->i_size)
-                               vmtruncate(inode, inode->i_size);
-               } else if (ret > 0)
-                       ret = min_t(size_t, copied, ret);
-               else
-                       ret = copied;
-       }
 
-       return ret;
+       mark_page_accessed(page);
+       return aops->write_end(file, mapping, pos, len, copied, page, fsdata);
 }
 EXPORT_SYMBOL(pagecache_write_end);
 
@@ -1968,11 +2067,54 @@ generic_file_direct_write(struct kiocb *iocb, const struct iovec *iov,
        struct address_space *mapping = file->f_mapping;
        struct inode    *inode = mapping->host;
        ssize_t         written;
+       size_t          write_len;
+       pgoff_t         end;
 
        if (count != ocount)
                *nr_segs = iov_shorten((struct iovec *)iov, *nr_segs, count);
 
-       written = generic_file_direct_IO(WRITE, iocb, iov, pos, *nr_segs);
+       write_len = iov_length(iov, *nr_segs);
+       end = (pos + write_len - 1) >> PAGE_CACHE_SHIFT;
+
+       written = filemap_write_and_wait_range(mapping, pos, pos + write_len - 1);
+       if (written)
+               goto out;
+
+       /*
+        * After a write we want buffered reads to be sure to go to disk to get
+        * the new data.  We invalidate clean cached page from the region we're
+        * about to write.  We do this *before* the write so that we can return
+        * without clobbering -EIOCBQUEUED from ->direct_IO().
+        */
+       if (mapping->nrpages) {
+               written = invalidate_inode_pages2_range(mapping,
+                                       pos >> PAGE_CACHE_SHIFT, end);
+               /*
+                * If a page can not be invalidated, return 0 to fall back
+                * to buffered write.
+                */
+               if (written) {
+                       if (written == -EBUSY)
+                               return 0;
+                       goto out;
+               }
+       }
+
+       written = mapping->a_ops->direct_IO(WRITE, iocb, iov, pos, *nr_segs);
+
+       /*
+        * Finally, try again to invalidate clean pages which might have been
+        * cached by non-direct readahead, or faulted in by get_user_pages()
+        * if the source of the write was an mmap'ed region of the file
+        * we're writing.  Either one is a pretty crazy thing to do,
+        * so we don't support it 100%.  If this invalidation
+        * fails, tough, the write still worked...
+        */
+       if (mapping->nrpages) {
+               invalidate_inode_pages2_range(mapping,
+                                             pos >> PAGE_CACHE_SHIFT, end);
+       }
+
        if (written > 0) {
                loff_t end = pos + written;
                if (end > i_size_read(inode) && !S_ISBLK(inode->i_mode)) {
@@ -1988,6 +2130,7 @@ generic_file_direct_write(struct kiocb *iocb, const struct iovec *iov,
         * i_mutex is held, which protects generic_osync_inode() from
         * livelocking.  AIO O_DIRECT ops attempt to sync metadata here.
         */
+out:
        if ((written >= 0 || written == -EIOCBQUEUED) &&
            ((file->f_flags & O_SYNC) || IS_SYNC(inode))) {
                int err = generic_osync_inode(inode, mapping, OSYNC_METADATA);
@@ -2002,19 +2145,24 @@ EXPORT_SYMBOL(generic_file_direct_write);
  * Find or create a page at the given pagecache position. Return the locked
  * page. This function is specifically for buffered writes.
  */
-struct page *__grab_cache_page(struct address_space *mapping, pgoff_t index)
+struct page *grab_cache_page_write_begin(struct address_space *mapping,
+                                       pgoff_t index, unsigned flags)
 {
        int status;
        struct page *page;
+       gfp_t gfp_notmask = 0;
+       if (flags & AOP_FLAG_NOFS)
+               gfp_notmask = __GFP_FS;
 repeat:
        page = find_lock_page(mapping, index);
        if (likely(page))
                return page;
 
-       page = page_cache_alloc(mapping);
+       page = __page_cache_alloc(mapping_gfp_mask(mapping) & ~gfp_notmask);
        if (!page)
                return NULL;
-       status = add_to_page_cache_lru(page, mapping, index, GFP_KERNEL);
+       status = add_to_page_cache_lru(page, mapping, index,
+                                               GFP_KERNEL & ~gfp_notmask);
        if (unlikely(status)) {
                page_cache_release(page);
                if (status == -EEXIST)
@@ -2023,175 +2171,7 @@ repeat:
        }
        return page;
 }
-EXPORT_SYMBOL(__grab_cache_page);
-
-static ssize_t generic_perform_write_2copy(struct file *file,
-                               struct iov_iter *i, loff_t pos)
-{
-       struct address_space *mapping = file->f_mapping;
-       const struct address_space_operations *a_ops = mapping->a_ops;
-       struct inode *inode = mapping->host;
-       long status = 0;
-       ssize_t written = 0;
-
-       do {
-               struct page *src_page;
-               struct page *page;
-               pgoff_t index;          /* Pagecache index for current page */
-               unsigned long offset;   /* Offset into pagecache page */
-               unsigned long bytes;    /* Bytes to write to page */
-               size_t copied;          /* Bytes copied from user */
-
-               offset = (pos & (PAGE_CACHE_SIZE - 1));
-               index = pos >> PAGE_CACHE_SHIFT;
-               bytes = min_t(unsigned long, PAGE_CACHE_SIZE - offset,
-                                               iov_iter_count(i));
-
-               /*
-                * a non-NULL src_page indicates that we're doing the
-                * copy via get_user_pages and kmap.
-                */
-               src_page = NULL;
-
-               /*
-                * Bring in the user page that we will copy from _first_.
-                * Otherwise there's a nasty deadlock on copying from the
-                * same page as we're writing to, without it being marked
-                * up-to-date.
-                *
-                * Not only is this an optimisation, but it is also required
-                * to check that the address is actually valid, when atomic
-                * usercopies are used, below.
-                */
-               if (unlikely(iov_iter_fault_in_readable(i, bytes))) {
-                       status = -EFAULT;
-                       break;
-               }
-
-               page = __grab_cache_page(mapping, index);
-               if (!page) {
-                       status = -ENOMEM;
-                       break;
-               }
-
-               /*
-                * non-uptodate pages cannot cope with short copies, and we
-                * cannot take a pagefault with the destination page locked.
-                * So pin the source page to copy it.
-                */
-               if (!PageUptodate(page) && !segment_eq(get_fs(), KERNEL_DS)) {
-                       unlock_page(page);
-
-                       src_page = alloc_page(GFP_KERNEL);
-                       if (!src_page) {
-                               page_cache_release(page);
-                               status = -ENOMEM;
-                               break;
-                       }
-
-                       /*
-                        * Cannot get_user_pages with a page locked for the
-                        * same reason as we can't take a page fault with a
-                        * page locked (as explained below).
-                        */
-                       copied = iov_iter_copy_from_user(src_page, i,
-                                                               offset, bytes);
-                       if (unlikely(copied == 0)) {
-                               status = -EFAULT;
-                               page_cache_release(page);
-                               page_cache_release(src_page);
-                               break;
-                       }
-                       bytes = copied;
-
-                       lock_page(page);
-                       /*
-                        * Can't handle the page going uptodate here, because
-                        * that means we would use non-atomic usercopies, which
-                        * zero out the tail of the page, which can cause
-                        * zeroes to become transiently visible. We could just
-                        * use a non-zeroing copy, but the APIs aren't too
-                        * consistent.
-                        */
-                       if (unlikely(!page->mapping || PageUptodate(page))) {
-                               unlock_page(page);
-                               page_cache_release(page);
-                               page_cache_release(src_page);
-                               continue;
-                       }
-               }
-
-               status = a_ops->prepare_write(file, page, offset, offset+bytes);
-               if (unlikely(status))
-                       goto fs_write_aop_error;
-
-               if (!src_page) {
-                       /*
-                        * Must not enter the pagefault handler here, because
-                        * we hold the page lock, so we might recursively
-                        * deadlock on the same lock, or get an ABBA deadlock
-                        * against a different lock, or against the mmap_sem
-                        * (which nests outside the page lock).  So increment
-                        * preempt count, and use _atomic usercopies.
-                        *
-                        * The page is uptodate so we are OK to encounter a
-                        * short copy: if unmodified parts of the page are
-                        * marked dirty and written out to disk, it doesn't
-                        * really matter.
-                        */
-                       pagefault_disable();
-                       copied = iov_iter_copy_from_user_atomic(page, i,
-                                                               offset, bytes);
-                       pagefault_enable();
-               } else {
-                       void *src, *dst;
-                       src = kmap_atomic(src_page, KM_USER0);
-                       dst = kmap_atomic(page, KM_USER1);
-                       memcpy(dst + offset, src + offset, bytes);
-                       kunmap_atomic(dst, KM_USER1);
-                       kunmap_atomic(src, KM_USER0);
-                       copied = bytes;
-               }
-               flush_dcache_page(page);
-
-               status = a_ops->commit_write(file, page, offset, offset+bytes);
-               if (unlikely(status < 0))
-                       goto fs_write_aop_error;
-               if (unlikely(status > 0)) /* filesystem did partial write */
-                       copied = min_t(size_t, copied, status);
-
-               unlock_page(page);
-               mark_page_accessed(page);
-               page_cache_release(page);
-               if (src_page)
-                       page_cache_release(src_page);
-
-               iov_iter_advance(i, copied);
-               pos += copied;
-               written += copied;
-
-               balance_dirty_pages_ratelimited(mapping);
-               cond_resched();
-               continue;
-
-fs_write_aop_error:
-               unlock_page(page);
-               page_cache_release(page);
-               if (src_page)
-                       page_cache_release(src_page);
-
-               /*
-                * prepare_write() may have instantiated a few blocks
-                * outside i_size.  Trim these off again. Don't need
-                * i_size_read because we hold i_mutex.
-                */
-               if (pos + bytes > inode->i_size)
-                       vmtruncate(inode, inode->i_size);
-               break;
-       } while (iov_iter_count(i));
-
-       return written ? written : status;
-}
+EXPORT_SYMBOL(grab_cache_page_write_begin);
 
 static ssize_t generic_perform_write(struct file *file,
                                struct iov_iter *i, loff_t pos)
@@ -2256,6 +2236,7 @@ again:
 
                cond_resched();
 
+               iov_iter_advance(i, copied);
                if (unlikely(copied == 0)) {
                        /*
                         * If we were unable to copy any data at all, we must
@@ -2269,7 +2250,6 @@ again:
                                                iov_iter_single_seg_count(i));
                        goto again;
                }
-               iov_iter_advance(i, copied);
                pos += copied;
                written += copied;
 
@@ -2293,10 +2273,7 @@ generic_file_buffered_write(struct kiocb *iocb, const struct iovec *iov,
        struct iov_iter i;
 
        iov_iter_init(&i, iov, nr_segs, count, written);
-       if (a_ops->write_begin)
-               status = generic_perform_write(file, &i, pos);
-       else
-               status = generic_perform_write_2copy(file, &i, pos);
+       status = generic_perform_write(file, &i, pos);
 
        if (likely(status >= 0)) {
                written += status;
@@ -2319,7 +2296,8 @@ generic_file_buffered_write(struct kiocb *iocb, const struct iovec *iov,
         * the file data here, to try to honour O_DIRECT expectations.
         */
        if (unlikely(file->f_flags & O_DIRECT) && written)
-               status = filemap_write_and_wait(mapping);
+               status = filemap_write_and_wait_range(mapping,
+                                       pos, pos + written - 1);
 
        return written ? written : status;
 }
@@ -2359,7 +2337,7 @@ __generic_file_aio_write_nolock(struct kiocb *iocb, const struct iovec *iov,
        if (count == 0)
                goto out;
 
-       err = remove_suid(file->f_path.dentry);
+       err = file_remove_suid(file);
        if (err)
                goto out;
 
@@ -2475,66 +2453,6 @@ ssize_t generic_file_aio_write(struct kiocb *iocb, const struct iovec *iov,
 }
 EXPORT_SYMBOL(generic_file_aio_write);
 
-/*
- * Called under i_mutex for writes to S_ISREG files.   Returns -EIO if something
- * went wrong during pagecache shootdown.
- */
-static ssize_t
-generic_file_direct_IO(int rw, struct kiocb *iocb, const struct iovec *iov,
-       loff_t offset, unsigned long nr_segs)
-{
-       struct file *file = iocb->ki_filp;
-       struct address_space *mapping = file->f_mapping;
-       ssize_t retval;
-       size_t write_len;
-       pgoff_t end = 0; /* silence gcc */
-
-       /*
-        * If it's a write, unmap all mmappings of the file up-front.  This
-        * will cause any pte dirty bits to be propagated into the pageframes
-        * for the subsequent filemap_write_and_wait().
-        */
-       if (rw == WRITE) {
-               write_len = iov_length(iov, nr_segs);
-               end = (offset + write_len - 1) >> PAGE_CACHE_SHIFT;
-               if (mapping_mapped(mapping))
-                       unmap_mapping_range(mapping, offset, write_len, 0);
-       }
-
-       retval = filemap_write_and_wait(mapping);
-       if (retval)
-               goto out;
-
-       /*
-        * After a write we want buffered reads to be sure to go to disk to get
-        * the new data.  We invalidate clean cached page from the region we're
-        * about to write.  We do this *before* the write so that we can return
-        * -EIO without clobbering -EIOCBQUEUED from ->direct_IO().
-        */
-       if (rw == WRITE && mapping->nrpages) {
-               retval = invalidate_inode_pages2_range(mapping,
-                                       offset >> PAGE_CACHE_SHIFT, end);
-               if (retval)
-                       goto out;
-       }
-
-       retval = mapping->a_ops->direct_IO(rw, iocb, iov, offset, nr_segs);
-
-       /*
-        * Finally, try again to invalidate clean pages which might have been
-        * cached by non-direct readahead, or faulted in by get_user_pages()
-        * if the source of the write was an mmap'ed region of the file
-        * we're writing.  Either one is a pretty crazy thing to do,
-        * so we don't support it 100%.  If this invalidation
-        * fails, tough, the write still worked...
-        */
-       if (rw == WRITE && mapping->nrpages) {
-               invalidate_inode_pages2_range(mapping, offset >> PAGE_CACHE_SHIFT, end);
-       }
-out:
-       return retval;
-}
-
 /**
  * try_to_release_page() - release old fs-specific metadata on a page
  *
@@ -2546,9 +2464,8 @@ out:
  * Otherwise return zero.
  *
  * The @gfp_mask argument specifies whether I/O may be performed to release
- * this page (__GFP_IO), and whether the call may block (__GFP_WAIT).
+ * this page (__GFP_IO), and whether the call may block (__GFP_WAIT & __GFP_FS).
  *
- * NOTE: @gfp_mask may go away, and this function may become non-blocking.
  */
 int try_to_release_page(struct page *page, gfp_t gfp_mask)
 {