mm: ZERO_PAGE without PTE_SPECIAL
[safe/jmp/linux-2.6] / mm / filemap.c
index f8c6927..bcc7372 100644 (file)
 /*
  * FIXME: remove all knowledge of the buffer layer from the core VM
  */
-#include <linux/buffer_head.h> /* for generic_osync_inode */
+#include <linux/buffer_head.h> /* for try_to_free_buffers */
 
 #include <asm/mman.h>
 
-
 /*
  * Shared mappings implemented 30.11.1994. It's not fully working yet,
  * though.
@@ -120,8 +119,9 @@ void __remove_from_page_cache(struct page *page)
        page->mapping = NULL;
        mapping->nrpages--;
        __dec_zone_page_state(page, NR_FILE_PAGES);
+       if (PageSwapBacked(page))
+               __dec_zone_page_state(page, NR_SHMEM);
        BUG_ON(page_mapped(page));
-       mem_cgroup_uncharge_cache_page(page);
 
        /*
         * Some filesystems seem to re-dirty the page even after
@@ -145,6 +145,7 @@ void remove_from_page_cache(struct page *page)
        spin_lock_irq(&mapping->tree_lock);
        __remove_from_page_cache(page);
        spin_unlock_irq(&mapping->tree_lock);
+       mem_cgroup_uncharge_cache_page(page);
 }
 
 static int sync_page(void *word)
@@ -210,7 +211,7 @@ int __filemap_fdatawrite_range(struct address_space *mapping, loff_t start,
        int ret;
        struct writeback_control wbc = {
                .sync_mode = sync_mode,
-               .nr_to_write = mapping->nrpages * 2,
+               .nr_to_write = LONG_MAX,
                .range_start = start,
                .range_end = end,
        };
@@ -307,68 +308,24 @@ int wait_on_page_writeback_range(struct address_space *mapping,
 }
 
 /**
- * sync_page_range - write and wait on all pages in the passed range
- * @inode:     target inode
- * @mapping:   target address_space
- * @pos:       beginning offset in pages to write
- * @count:     number of bytes to write
- *
- * Write and wait upon all the pages in the passed range.  This is a "data
- * integrity" operation.  It waits upon in-flight writeout before starting and
- * waiting upon new writeout.  If there was an IO error, return it.
+ * filemap_fdatawait_range - wait for all under-writeback pages to complete in a given range
+ * @mapping: address space structure to wait for
+ * @start:     offset in bytes where the range starts
+ * @end:       offset in bytes where the range ends (inclusive)
  *
- * We need to re-take i_mutex during the generic_osync_inode list walk because
- * it is otherwise livelockable.
- */
-int sync_page_range(struct inode *inode, struct address_space *mapping,
-                       loff_t pos, loff_t count)
-{
-       pgoff_t start = pos >> PAGE_CACHE_SHIFT;
-       pgoff_t end = (pos + count - 1) >> PAGE_CACHE_SHIFT;
-       int ret;
-
-       if (!mapping_cap_writeback_dirty(mapping) || !count)
-               return 0;
-       ret = filemap_fdatawrite_range(mapping, pos, pos + count - 1);
-       if (ret == 0) {
-               mutex_lock(&inode->i_mutex);
-               ret = generic_osync_inode(inode, mapping, OSYNC_METADATA);
-               mutex_unlock(&inode->i_mutex);
-       }
-       if (ret == 0)
-               ret = wait_on_page_writeback_range(mapping, start, end);
-       return ret;
-}
-EXPORT_SYMBOL(sync_page_range);
-
-/**
- * sync_page_range_nolock - write & wait on all pages in the passed range without locking
- * @inode:     target inode
- * @mapping:   target address_space
- * @pos:       beginning offset in pages to write
- * @count:     number of bytes to write
+ * Walk the list of under-writeback pages of the given address space
+ * in the given range and wait for all of them.
  *
- * Note: Holding i_mutex across sync_page_range_nolock() is not a good idea
- * as it forces O_SYNC writers to different parts of the same file
- * to be serialised right until io completion.
+ * This is just a simple wrapper so that callers don't have to convert offsets
+ * to page indexes themselves
  */
-int sync_page_range_nolock(struct inode *inode, struct address_space *mapping,
-                          loff_t pos, loff_t count)
+int filemap_fdatawait_range(struct address_space *mapping, loff_t start,
+                           loff_t end)
 {
-       pgoff_t start = pos >> PAGE_CACHE_SHIFT;
-       pgoff_t end = (pos + count - 1) >> PAGE_CACHE_SHIFT;
-       int ret;
-
-       if (!mapping_cap_writeback_dirty(mapping) || !count)
-               return 0;
-       ret = filemap_fdatawrite_range(mapping, pos, pos + count - 1);
-       if (ret == 0)
-               ret = generic_osync_inode(inode, mapping, OSYNC_METADATA);
-       if (ret == 0)
-               ret = wait_on_page_writeback_range(mapping, start, end);
-       return ret;
+       return wait_on_page_writeback_range(mapping, start >> PAGE_CACHE_SHIFT,
+                                           end >> PAGE_CACHE_SHIFT);
 }
-EXPORT_SYMBOL(sync_page_range_nolock);
+EXPORT_SYMBOL(filemap_fdatawait_range);
 
 /**
  * filemap_fdatawait - wait for all under-writeback pages to complete
@@ -441,6 +398,7 @@ int filemap_write_and_wait_range(struct address_space *mapping,
        }
        return err;
 }
+EXPORT_SYMBOL(filemap_write_and_wait_range);
 
 /**
  * add_to_page_cache_locked - add a locked page to the pagecache
@@ -460,7 +418,7 @@ int add_to_page_cache_locked(struct page *page, struct address_space *mapping,
        VM_BUG_ON(!PageLocked(page));
 
        error = mem_cgroup_cache_charge(page, current->mm,
-                                       gfp_mask & ~__GFP_HIGHMEM);
+                                       gfp_mask & GFP_RECLAIM_MASK);
        if (error)
                goto out;
 
@@ -475,13 +433,15 @@ int add_to_page_cache_locked(struct page *page, struct address_space *mapping,
                if (likely(!error)) {
                        mapping->nrpages++;
                        __inc_zone_page_state(page, NR_FILE_PAGES);
+                       if (PageSwapBacked(page))
+                               __inc_zone_page_state(page, NR_SHMEM);
+                       spin_unlock_irq(&mapping->tree_lock);
                } else {
                        page->mapping = NULL;
+                       spin_unlock_irq(&mapping->tree_lock);
                        mem_cgroup_uncharge_cache_page(page);
                        page_cache_release(page);
                }
-
-               spin_unlock_irq(&mapping->tree_lock);
                radix_tree_preload_end();
        } else
                mem_cgroup_uncharge_cache_page(page);
@@ -513,13 +473,14 @@ int add_to_page_cache_lru(struct page *page, struct address_space *mapping,
        }
        return ret;
 }
+EXPORT_SYMBOL_GPL(add_to_page_cache_lru);
 
 #ifdef CONFIG_NUMA
 struct page *__page_cache_alloc(gfp_t gfp)
 {
        if (cpuset_do_page_mem_spread()) {
                int n = cpuset_mem_spread_node();
-               return alloc_pages_node(n, gfp, 0);
+               return alloc_pages_exact_node(n, gfp, 0);
        }
        return alloc_pages(gfp, 0);
 }
@@ -565,6 +526,24 @@ void wait_on_page_bit(struct page *page, int bit_nr)
 EXPORT_SYMBOL(wait_on_page_bit);
 
 /**
+ * add_page_wait_queue - Add an arbitrary waiter to a page's wait queue
+ * @page: Page defining the wait queue of interest
+ * @waiter: Waiter to add to the queue
+ *
+ * Add an arbitrary @waiter to the wait queue for the nominated @page.
+ */
+void add_page_wait_queue(struct page *page, wait_queue_t *waiter)
+{
+       wait_queue_head_t *q = page_waitqueue(page);
+       unsigned long flags;
+
+       spin_lock_irqsave(&q->lock, flags);
+       __add_wait_queue(q, waiter);
+       spin_unlock_irqrestore(&q->lock, flags);
+}
+EXPORT_SYMBOL_GPL(add_page_wait_queue);
+
+/**
  * unlock_page - unlock a locked page
  * @page: the page
  *
@@ -627,6 +606,7 @@ int __lock_page_killable(struct page *page)
        return __wait_on_bit_lock(page_waitqueue(page), &wait,
                                        sync_page_killable, TASK_KILLABLE);
 }
+EXPORT_SYMBOL_GPL(__lock_page_killable);
 
 /**
  * __lock_page_nosync - get a lock on the page, without calling sync_page()
@@ -741,7 +721,14 @@ repeat:
                page = __page_cache_alloc(gfp_mask);
                if (!page)
                        return NULL;
-               err = add_to_page_cache_lru(page, mapping, index, gfp_mask);
+               /*
+                * We want a regular kernel memory (not highmem or DMA etc)
+                * allocation for the radix tree nodes, but we need to honour
+                * the context-specific requirements the caller has asked for.
+                * GFP_RECLAIM_MASK collects those requirements.
+                */
+               err = add_to_page_cache_lru(page, mapping, index,
+                       (gfp_mask & GFP_RECLAIM_MASK));
                if (unlikely(err)) {
                        page_cache_release(page);
                        page = NULL;
@@ -950,7 +937,7 @@ grab_cache_page_nowait(struct address_space *mapping, pgoff_t index)
                return NULL;
        }
        page = __page_cache_alloc(mapping_gfp_mask(mapping) & ~__GFP_FS);
-       if (page && add_to_page_cache_lru(page, mapping, index, GFP_KERNEL)) {
+       if (page && add_to_page_cache_lru(page, mapping, index, GFP_NOFS)) {
                page_cache_release(page);
                page = NULL;
        }
@@ -976,9 +963,6 @@ EXPORT_SYMBOL(grab_cache_page_nowait);
 static void shrink_readahead_size_eio(struct file *filp,
                                        struct file_ra_state *ra)
 {
-       if (!ra->ra_pages)
-               return;
-
        ra->ra_pages /= 4;
 }
 
@@ -1317,7 +1301,8 @@ generic_file_aio_read(struct kiocb *iocb, const struct iovec *iov,
                        goto out; /* skip atime */
                size = i_size_read(inode);
                if (pos < size) {
-                       retval = filemap_write_and_wait(mapping);
+                       retval = filemap_write_and_wait_range(mapping, pos,
+                                       pos + iov_length(iov, nr_segs) - 1);
                        if (!retval) {
                                retval = mapping->a_ops->direct_IO(READ, iocb,
                                                        iov, pos, nr_segs);
@@ -1361,12 +1346,11 @@ do_readahead(struct address_space *mapping, struct file *filp,
        if (!mapping || !mapping->a_ops || !mapping->a_ops->readpage)
                return -EINVAL;
 
-       force_page_cache_readahead(mapping, filp, index,
-                                       max_sane_readahead(nr));
+       force_page_cache_readahead(mapping, filp, index, nr);
        return 0;
 }
 
-asmlinkage ssize_t sys_readahead(int fd, loff_t offset, size_t count)
+SYSCALL_DEFINE(readahead)(int fd, loff_t offset, size_t count)
 {
        ssize_t ret;
        struct file *file;
@@ -1385,6 +1369,13 @@ asmlinkage ssize_t sys_readahead(int fd, loff_t offset, size_t count)
        }
        return ret;
 }
+#ifdef CONFIG_HAVE_SYSCALL_WRAPPERS
+asmlinkage long SyS_readahead(long fd, loff_t offset, long count)
+{
+       return SYSC_readahead((int) fd, offset, (size_t) count);
+}
+SYSCALL_ALIAS(sys_readahead, SyS_readahead);
+#endif
 
 #ifdef CONFIG_MMU
 /**
@@ -1421,6 +1412,73 @@ static int page_cache_read(struct file *file, pgoff_t offset)
 
 #define MMAP_LOTSAMISS  (100)
 
+/*
+ * Synchronous readahead happens when we don't even find
+ * a page in the page cache at all.
+ */
+static void do_sync_mmap_readahead(struct vm_area_struct *vma,
+                                  struct file_ra_state *ra,
+                                  struct file *file,
+                                  pgoff_t offset)
+{
+       unsigned long ra_pages;
+       struct address_space *mapping = file->f_mapping;
+
+       /* If we don't want any read-ahead, don't bother */
+       if (VM_RandomReadHint(vma))
+               return;
+
+       if (VM_SequentialReadHint(vma) ||
+                       offset - 1 == (ra->prev_pos >> PAGE_CACHE_SHIFT)) {
+               page_cache_sync_readahead(mapping, ra, file, offset,
+                                         ra->ra_pages);
+               return;
+       }
+
+       if (ra->mmap_miss < INT_MAX)
+               ra->mmap_miss++;
+
+       /*
+        * Do we miss much more than hit in this file? If so,
+        * stop bothering with read-ahead. It will only hurt.
+        */
+       if (ra->mmap_miss > MMAP_LOTSAMISS)
+               return;
+
+       /*
+        * mmap read-around
+        */
+       ra_pages = max_sane_readahead(ra->ra_pages);
+       if (ra_pages) {
+               ra->start = max_t(long, 0, offset - ra_pages/2);
+               ra->size = ra_pages;
+               ra->async_size = 0;
+               ra_submit(ra, mapping, file);
+       }
+}
+
+/*
+ * Asynchronous readahead happens when we find the page and PG_readahead,
+ * so we want to possibly extend the readahead further..
+ */
+static void do_async_mmap_readahead(struct vm_area_struct *vma,
+                                   struct file_ra_state *ra,
+                                   struct file *file,
+                                   struct page *page,
+                                   pgoff_t offset)
+{
+       struct address_space *mapping = file->f_mapping;
+
+       /* If we don't want any read-ahead, don't bother */
+       if (VM_RandomReadHint(vma))
+               return;
+       if (ra->mmap_miss > 0)
+               ra->mmap_miss--;
+       if (PageReadahead(page))
+               page_cache_async_readahead(mapping, ra, file,
+                                          page, offset, ra->ra_pages);
+}
+
 /**
  * filemap_fault - read in file data for page fault handling
  * @vma:       vma in which the fault was taken
@@ -1440,78 +1498,44 @@ int filemap_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
        struct address_space *mapping = file->f_mapping;
        struct file_ra_state *ra = &file->f_ra;
        struct inode *inode = mapping->host;
+       pgoff_t offset = vmf->pgoff;
        struct page *page;
        pgoff_t size;
-       int did_readaround = 0;
        int ret = 0;
 
        size = (i_size_read(inode) + PAGE_CACHE_SIZE - 1) >> PAGE_CACHE_SHIFT;
-       if (vmf->pgoff >= size)
+       if (offset >= size)
                return VM_FAULT_SIGBUS;
 
-       /* If we don't want any read-ahead, don't bother */
-       if (VM_RandomReadHint(vma))
-               goto no_cached_page;
-
        /*
         * Do we have something in the page cache already?
         */
-retry_find:
-       page = find_lock_page(mapping, vmf->pgoff);
-       /*
-        * For sequential accesses, we use the generic readahead logic.
-        */
-       if (VM_SequentialReadHint(vma)) {
-               if (!page) {
-                       page_cache_sync_readahead(mapping, ra, file,
-                                                          vmf->pgoff, 1);
-                       page = find_lock_page(mapping, vmf->pgoff);
-                       if (!page)
-                               goto no_cached_page;
-               }
-               if (PageReadahead(page)) {
-                       page_cache_async_readahead(mapping, ra, file, page,
-                                                          vmf->pgoff, 1);
-               }
-       }
-
-       if (!page) {
-               unsigned long ra_pages;
-
-               ra->mmap_miss++;
-
+       page = find_get_page(mapping, offset);
+       if (likely(page)) {
                /*
-                * Do we miss much more than hit in this file? If so,
-                * stop bothering with read-ahead. It will only hurt.
+                * We found the page, so try async readahead before
+                * waiting for the lock.
                 */
-               if (ra->mmap_miss > MMAP_LOTSAMISS)
-                       goto no_cached_page;
+               do_async_mmap_readahead(vma, ra, file, page, offset);
+               lock_page(page);
 
-               /*
-                * To keep the pgmajfault counter straight, we need to
-                * check did_readaround, as this is an inner loop.
-                */
-               if (!did_readaround) {
-                       ret = VM_FAULT_MAJOR;
-                       count_vm_event(PGMAJFAULT);
-               }
-               did_readaround = 1;
-               ra_pages = max_sane_readahead(file->f_ra.ra_pages);
-               if (ra_pages) {
-                       pgoff_t start = 0;
-
-                       if (vmf->pgoff > ra_pages / 2)
-                               start = vmf->pgoff - ra_pages / 2;
-                       do_page_cache_readahead(mapping, file, start, ra_pages);
+               /* Did it get truncated? */
+               if (unlikely(page->mapping != mapping)) {
+                       unlock_page(page);
+                       put_page(page);
+                       goto no_cached_page;
                }
-               page = find_lock_page(mapping, vmf->pgoff);
+       } else {
+               /* No page in the page cache at all */
+               do_sync_mmap_readahead(vma, ra, file, offset);
+               count_vm_event(PGMAJFAULT);
+               ret = VM_FAULT_MAJOR;
+retry_find:
+               page = find_lock_page(mapping, offset);
                if (!page)
                        goto no_cached_page;
        }
 
-       if (!did_readaround)
-               ra->mmap_miss--;
-
        /*
         * We have a locked page in the page cache, now we need to check
         * that it's up-to-date. If not, it is going to be due to an error.
@@ -1519,19 +1543,18 @@ retry_find:
        if (unlikely(!PageUptodate(page)))
                goto page_not_uptodate;
 
-       /* Must recheck i_size under page lock */
+       /*
+        * Found the page and have a reference on it.
+        * We must recheck i_size under page lock.
+        */
        size = (i_size_read(inode) + PAGE_CACHE_SIZE - 1) >> PAGE_CACHE_SHIFT;
-       if (unlikely(vmf->pgoff >= size)) {
+       if (unlikely(offset >= size)) {
                unlock_page(page);
                page_cache_release(page);
                return VM_FAULT_SIGBUS;
        }
 
-       /*
-        * Found the page and have a reference on it.
-        */
-       mark_page_accessed(page);
-       ra->prev_pos = (loff_t)page->index << PAGE_CACHE_SHIFT;
+       ra->prev_pos = (loff_t)offset << PAGE_CACHE_SHIFT;
        vmf->page = page;
        return ret | VM_FAULT_LOCKED;
 
@@ -1540,7 +1563,7 @@ no_cached_page:
         * We're only likely to ever get here if MADV_RANDOM is in
         * effect.
         */
-       error = page_cache_read(file, vmf->pgoff);
+       error = page_cache_read(file, offset);
 
        /*
         * The page we want has now been added to the page cache.
@@ -1560,12 +1583,6 @@ no_cached_page:
        return VM_FAULT_SIGBUS;
 
 page_not_uptodate:
-       /* IO error path */
-       if (!did_readaround) {
-               ret = VM_FAULT_MAJOR;
-               count_vm_event(PGMAJFAULT);
-       }
-
        /*
         * Umm, take care of errors if the page isn't up-to-date.
         * Try to re-read it _once_. We do this synchronously,
@@ -1766,7 +1783,7 @@ int should_remove_suid(struct dentry *dentry)
        if (unlikely((mode & S_ISGID) && (mode & S_IXGRP)))
                kill |= ATTR_KILL_SGID;
 
-       if (unlikely(kill && !capable(CAP_FSETID)))
+       if (unlikely(kill && !capable(CAP_FSETID) && S_ISREG(mode)))
                return kill;
 
        return 0;
@@ -1809,7 +1826,7 @@ static size_t __iovec_copy_from_user_inatomic(char *vaddr,
                int copy = min(bytes, iov->iov_len - base);
 
                base = 0;
-               left = __copy_from_user_inatomic_nocache(vaddr, buf, copy);
+               left = __copy_from_user_inatomic(vaddr, buf, copy);
                copied += copy;
                bytes -= copy;
                vaddr += copy;
@@ -1837,8 +1854,7 @@ size_t iov_iter_copy_from_user_atomic(struct page *page,
        if (likely(i->nr_segs == 1)) {
                int left;
                char __user *buf = i->iov->iov_base + i->iov_offset;
-               left = __copy_from_user_inatomic_nocache(kaddr + offset,
-                                                       buf, bytes);
+               left = __copy_from_user_inatomic(kaddr + offset, buf, bytes);
                copied = bytes - left;
        } else {
                copied = __iovec_copy_from_user_inatomic(kaddr + offset,
@@ -1866,7 +1882,7 @@ size_t iov_iter_copy_from_user(struct page *page,
        if (likely(i->nr_segs == 1)) {
                int left;
                char __user *buf = i->iov->iov_base + i->iov_offset;
-               left = __copy_from_user_nocache(kaddr + offset, buf, bytes);
+               left = __copy_from_user(kaddr + offset, buf, bytes);
                copied = bytes - left;
        } else {
                copied = __iovec_copy_from_user_inatomic(kaddr + offset,
@@ -2060,18 +2076,10 @@ generic_file_direct_write(struct kiocb *iocb, const struct iovec *iov,
        if (count != ocount)
                *nr_segs = iov_shorten((struct iovec *)iov, *nr_segs, count);
 
-       /*
-        * Unmap all mmappings of the file up-front.
-        *
-        * This will cause any pte dirty bits to be propagated into the
-        * pageframes for the subsequent filemap_write_and_wait().
-        */
        write_len = iov_length(iov, *nr_segs);
        end = (pos + write_len - 1) >> PAGE_CACHE_SHIFT;
-       if (mapping_mapped(mapping))
-               unmap_mapping_range(mapping, pos, write_len, 0);
 
-       written = filemap_write_and_wait(mapping);
+       written = filemap_write_and_wait_range(mapping, pos, pos + write_len - 1);
        if (written)
                goto out;
 
@@ -2118,20 +2126,7 @@ generic_file_direct_write(struct kiocb *iocb, const struct iovec *iov,
                }
                *ppos = end;
        }
-
-       /*
-        * Sync the fs metadata but not the minor inode changes and
-        * of course not the data as we did direct DMA for the IO.
-        * i_mutex is held, which protects generic_osync_inode() from
-        * livelocking.  AIO O_DIRECT ops attempt to sync metadata here.
-        */
 out:
-       if ((written >= 0 || written == -EIOCBQUEUED) &&
-           ((file->f_flags & O_SYNC) || IS_SYNC(inode))) {
-               int err = generic_osync_inode(inode, mapping, OSYNC_METADATA);
-               if (err < 0)
-                       written = err;
-       }
        return written;
 }
 EXPORT_SYMBOL(generic_file_direct_write);
@@ -2223,6 +2218,7 @@ again:
                pagefault_enable();
                flush_dcache_page(page);
 
+               mark_page_accessed(page);
                status = a_ops->write_end(file, mapping, pos, bytes, copied,
                                                page, fsdata);
                if (unlikely(status < 0))
@@ -2262,8 +2258,6 @@ generic_file_buffered_write(struct kiocb *iocb, const struct iovec *iov,
 {
        struct file *file = iocb->ki_filp;
        struct address_space *mapping = file->f_mapping;
-       const struct address_space_operations *a_ops = mapping->a_ops;
-       struct inode *inode = mapping->host;
        ssize_t status;
        struct iov_iter i;
 
@@ -2273,16 +2267,6 @@ generic_file_buffered_write(struct kiocb *iocb, const struct iovec *iov,
        if (likely(status >= 0)) {
                written += status;
                *ppos = pos + status;
-
-               /*
-                * For now, when the user asks for O_SYNC, we'll actually give
-                * O_DSYNC
-                */
-               if (unlikely((file->f_flags & O_SYNC) || IS_SYNC(inode))) {
-                       if (!a_ops->writepage || !is_sync_kiocb(iocb))
-                               status = generic_osync_inode(inode, mapping,
-                                               OSYNC_METADATA|OSYNC_DATA);
-               }
        }
        
        /*
@@ -2291,15 +2275,34 @@ generic_file_buffered_write(struct kiocb *iocb, const struct iovec *iov,
         * the file data here, to try to honour O_DIRECT expectations.
         */
        if (unlikely(file->f_flags & O_DIRECT) && written)
-               status = filemap_write_and_wait(mapping);
+               status = filemap_write_and_wait_range(mapping,
+                                       pos, pos + written - 1);
 
        return written ? written : status;
 }
 EXPORT_SYMBOL(generic_file_buffered_write);
 
-static ssize_t
-__generic_file_aio_write_nolock(struct kiocb *iocb, const struct iovec *iov,
-                               unsigned long nr_segs, loff_t *ppos)
+/**
+ * __generic_file_aio_write - write data to a file
+ * @iocb:      IO state structure (file, offset, etc.)
+ * @iov:       vector with data to write
+ * @nr_segs:   number of segments in the vector
+ * @ppos:      position where to write
+ *
+ * This function does all the work needed for actually writing data to a
+ * file. It does all basic checks, removes SUID from the file, updates
+ * modification times and calls proper subroutines depending on whether we
+ * do direct IO or a standard buffered write.
+ *
+ * It expects i_mutex to be grabbed unless we work on a block device or similar
+ * object which does not need locking at all.
+ *
+ * This function does *not* take care of syncing data in case of O_SYNC write.
+ * A caller has to handle it. This is mainly due to the fact that we want to
+ * avoid syncing under i_mutex.
+ */
+ssize_t __generic_file_aio_write(struct kiocb *iocb, const struct iovec *iov,
+                                unsigned long nr_segs, loff_t *ppos)
 {
        struct file *file = iocb->ki_filp;
        struct address_space * mapping = file->f_mapping;
@@ -2396,51 +2399,37 @@ out:
        current->backing_dev_info = NULL;
        return written ? written : err;
 }
+EXPORT_SYMBOL(__generic_file_aio_write);
 
-ssize_t generic_file_aio_write_nolock(struct kiocb *iocb,
-               const struct iovec *iov, unsigned long nr_segs, loff_t pos)
-{
-       struct file *file = iocb->ki_filp;
-       struct address_space *mapping = file->f_mapping;
-       struct inode *inode = mapping->host;
-       ssize_t ret;
-
-       BUG_ON(iocb->ki_pos != pos);
-
-       ret = __generic_file_aio_write_nolock(iocb, iov, nr_segs,
-                       &iocb->ki_pos);
-
-       if (ret > 0 && ((file->f_flags & O_SYNC) || IS_SYNC(inode))) {
-               ssize_t err;
-
-               err = sync_page_range_nolock(inode, mapping, pos, ret);
-               if (err < 0)
-                       ret = err;
-       }
-       return ret;
-}
-EXPORT_SYMBOL(generic_file_aio_write_nolock);
-
+/**
+ * generic_file_aio_write - write data to a file
+ * @iocb:      IO state structure
+ * @iov:       vector with data to write
+ * @nr_segs:   number of segments in the vector
+ * @pos:       position in file where to write
+ *
+ * This is a wrapper around __generic_file_aio_write() to be used by most
+ * filesystems. It takes care of syncing the file in case of O_SYNC file
+ * and acquires i_mutex as needed.
+ */
 ssize_t generic_file_aio_write(struct kiocb *iocb, const struct iovec *iov,
                unsigned long nr_segs, loff_t pos)
 {
        struct file *file = iocb->ki_filp;
-       struct address_space *mapping = file->f_mapping;
-       struct inode *inode = mapping->host;
+       struct inode *inode = file->f_mapping->host;
        ssize_t ret;
 
        BUG_ON(iocb->ki_pos != pos);
 
        mutex_lock(&inode->i_mutex);
-       ret = __generic_file_aio_write_nolock(iocb, iov, nr_segs,
-                       &iocb->ki_pos);
+       ret = __generic_file_aio_write(iocb, iov, nr_segs, &iocb->ki_pos);
        mutex_unlock(&inode->i_mutex);
 
-       if (ret > 0 && ((file->f_flags & O_SYNC) || IS_SYNC(inode))) {
+       if (ret > 0 || ret == -EIOCBQUEUED) {
                ssize_t err;
 
-               err = sync_page_range(inode, mapping, pos, ret);
-               if (err < 0)
+               err = generic_write_sync(file, pos, ret);
+               if (err < 0 && ret > 0)
                        ret = err;
        }
        return ret;
@@ -2457,6 +2446,9 @@ EXPORT_SYMBOL(generic_file_aio_write);
  * (presumably at page->private).  If the release was successful, return `1'.
  * Otherwise return zero.
  *
+ * This may also be called if PG_fscache is set on a page, indicating that the
+ * page is known to the local caching routines.
+ *
  * The @gfp_mask argument specifies whether I/O may be performed to release
  * this page (__GFP_IO), and whether the call may block (__GFP_WAIT & __GFP_FS).
  *