drop unused dentry argument to ->fsync
[safe/jmp/linux-2.6] / fs / ntfs / file.c
index c73c886..113ebd9 100644 (file)
@@ -1,7 +1,7 @@
 /*
  * file.c - NTFS kernel file operations.  Part of the Linux-NTFS project.
  *
- * Copyright (c) 2001-2005 Anton Altaparmakov
+ * Copyright (c) 2001-2007 Anton Altaparmakov
  *
  * This program/include file is free software; you can redistribute it and/or
  * modify it under the terms of the GNU General Public License as published
@@ -20,6 +20,7 @@
  */
 
 #include <linux/buffer_head.h>
+#include <linux/gfp.h>
 #include <linux/pagemap.h>
 #include <linux/pagevec.h>
 #include <linux/sched.h>
@@ -61,7 +62,7 @@ static int ntfs_file_open(struct inode *vi, struct file *filp)
 {
        if (sizeof(unsigned long) < 8) {
                if (i_size_read(vi) > MAX_LFS_FILESIZE)
-                       return -EFBIG;
+                       return -EOVERFLOW;
        }
        return generic_file_open(vi, filp);
 }
@@ -97,20 +98,16 @@ static int ntfs_file_open(struct inode *vi, struct file *filp)
  * the page at all.  For a more detailed explanation see ntfs_truncate() in
  * fs/ntfs/inode.c.
  *
- * @cached_page and @lru_pvec are just optimizations for dealing with multiple
- * pages.
- *
  * Return 0 on success and -errno on error.  In the case that an error is
  * encountered it is possible that the initialized size will already have been
  * incremented some way towards @new_init_size but it is guaranteed that if
  * this is the case, the necessary zeroing will also have happened and that all
  * metadata is self-consistent.
  *
- * Locking: i_sem on the vfs inode corrseponsind to the ntfs inode @ni must be
+ * Locking: i_mutex on the vfs inode corrseponsind to the ntfs inode @ni must be
  *         held by the caller.
  */
-static int ntfs_attr_extend_initialized(ntfs_inode *ni, const s64 new_init_size,
-               struct page **cached_page, struct pagevec *lru_pvec)
+static int ntfs_attr_extend_initialized(ntfs_inode *ni, const s64 new_init_size)
 {
        s64 old_init_size;
        loff_t old_i_size;
@@ -231,14 +228,12 @@ do_non_resident_extend:
                 * Read the page.  If the page is not present, this will zero
                 * the uninitialized regions for us.
                 */
-               page = read_cache_page(mapping, index,
-                               (filler_t*)mapping->a_ops->readpage, NULL);
+               page = read_mapping_page(mapping, index, NULL);
                if (IS_ERR(page)) {
                        err = PTR_ERR(page);
                        goto init_err_out;
                }
-               wait_on_page_locked(page);
-               if (unlikely(!PageUptodate(page) || PageError(page))) {
+               if (unlikely(PageError(page))) {
                        page_cache_release(page);
                        err = -EIO;
                        goto init_err_out;
@@ -363,7 +358,7 @@ static inline void ntfs_fault_in_pages_readable(const char __user *uaddr,
        volatile char c;
 
        /* Set @end to the first byte outside the last page we care about. */
-       end = (const char __user*)PAGE_ALIGN((ptrdiff_t __user)uaddr + bytes);
+       end = (const char __user*)PAGE_ALIGN((unsigned long)uaddr + bytes);
 
        while (!__get_user(c, uaddr) && (uaddr += PAGE_SIZE, uaddr < end))
                ;
@@ -401,21 +396,16 @@ static inline void ntfs_fault_in_pages_readable_iovec(const struct iovec *iov,
  * @cached_page: allocated but as yet unused page
  * @lru_pvec:  lru-buffering pagevec of caller
  *
- * Obtain @nr_pages locked page cache pages from the mapping @maping and
+ * Obtain @nr_pages locked page cache pages from the mapping @mapping and
  * starting at index @index.
  *
- * If a page is newly created, increment its refcount and add it to the
- * caller's lru-buffering pagevec @lru_pvec.
- *
- * This is the same as mm/filemap.c::__grab_cache_page(), except that @nr_pages
- * are obtained at once instead of just one page and that 0 is returned on
- * success and -errno on error.
+ * If a page is newly created, add it to lru list
  *
  * Note, the page locks are obtained in ascending page index order.
  */
 static inline int __ntfs_grab_cache_pages(struct address_space *mapping,
                pgoff_t index, const unsigned nr_pages, struct page **pages,
-               struct page **cached_page, struct pagevec *lru_pvec)
+               struct page **cached_page)
 {
        int err, nr;
 
@@ -431,7 +421,7 @@ static inline int __ntfs_grab_cache_pages(struct address_space *mapping,
                                        goto err_out;
                                }
                        }
-                       err = add_to_page_cache(*cached_page, mapping, index,
+                       err = add_to_page_cache_lru(*cached_page, mapping, index,
                                        GFP_KERNEL);
                        if (unlikely(err)) {
                                if (err == -EEXIST)
@@ -439,9 +429,6 @@ static inline int __ntfs_grab_cache_pages(struct address_space *mapping,
                                goto err_out;
                        }
                        pages[nr] = *cached_page;
-                       page_cache_get(*cached_page);
-                       if (unlikely(!pagevec_add(lru_pvec, *cached_page)))
-                               __pagevec_lru_add(lru_pvec);
                        *cached_page = NULL;
                }
                index++;
@@ -473,7 +460,7 @@ static inline int ntfs_submit_bh_for_read(struct buffer_head *bh)
  * @bytes:     number of bytes to be written
  *
  * This is called for non-resident attributes from ntfs_file_buffered_write()
- * with i_sem held on the inode (@pages[0]->mapping->host).  There are
+ * with i_mutex held on the inode (@pages[0]->mapping->host).  There are
  * @nr_pages pages in @pages which are locked but not kmap()ped.  The source
  * data has not yet been copied into the @pages.
  * 
@@ -510,7 +497,7 @@ static int ntfs_prepare_pages_for_non_resident_write(struct page **pages,
        u32 attr_rec_len = 0;
        unsigned blocksize, u;
        int err, mp_size;
-       BOOL rl_write_locked, was_hole, is_retry;
+       bool rl_write_locked, was_hole, is_retry;
        unsigned char blocksize_bits;
        struct {
                u8 runlist_merged:1;
@@ -529,11 +516,12 @@ static int ntfs_prepare_pages_for_non_resident_write(struct page **pages,
                        "index 0x%lx, nr_pages 0x%x, pos 0x%llx, bytes 0x%zx.",
                        vi->i_ino, ni->type, pages[0]->index, nr_pages,
                        (long long)pos, bytes);
-       blocksize_bits = vi->i_blkbits;
-       blocksize = 1 << blocksize_bits;
+       blocksize = vol->sb->s_blocksize;
+       blocksize_bits = vol->sb->s_blocksize_bits;
        u = 0;
        do {
-               struct page *page = pages[u];
+               page = pages[u];
+               BUG_ON(!page);
                /*
                 * create_empty_buffers() will create uptodate/dirty buffers if
                 * the page is uptodate/dirty.
@@ -544,13 +532,13 @@ static int ntfs_prepare_pages_for_non_resident_write(struct page **pages,
                                return -ENOMEM;
                }
        } while (++u < nr_pages);
-       rl_write_locked = FALSE;
+       rl_write_locked = false;
        rl = NULL;
        err = 0;
        vcn = lcn = -1;
        vcn_len = 0;
        lcn_block = -1;
-       was_hole = FALSE;
+       was_hole = false;
        cpos = pos >> vol->cluster_size_bits;
        end = pos + bytes;
        cend = (end + vol->cluster_size - 1) >> vol->cluster_size_bits;
@@ -608,11 +596,8 @@ do_next_page:
                                        ntfs_submit_bh_for_read(bh);
                                        *wait_bh++ = bh;
                                } else {
-                                       u8 *kaddr = kmap_atomic(page, KM_USER0);
-                                       memset(kaddr + bh_offset(bh), 0,
+                                       zero_user(page, bh_offset(bh),
                                                        blocksize);
-                                       kunmap_atomic(kaddr, KM_USER0);
-                                       flush_dcache_page(page);
                                        set_buffer_uptodate(bh);
                                }
                        }
@@ -687,12 +672,8 @@ map_buffer_cached:
                                                ntfs_submit_bh_for_read(bh);
                                                *wait_bh++ = bh;
                                        } else {
-                                               u8 *kaddr = kmap_atomic(page,
-                                                               KM_USER0);
-                                               memset(kaddr + bh_offset(bh),
-                                                               0, blocksize);
-                                               kunmap_atomic(kaddr, KM_USER0);
-                                               flush_dcache_page(page);
+                                               zero_user(page, bh_offset(bh),
+                                                               blocksize);
                                                set_buffer_uptodate(bh);
                                        }
                                }
@@ -710,11 +691,8 @@ map_buffer_cached:
                         */
                        if (bh_end <= pos || bh_pos >= end) {
                                if (!buffer_uptodate(bh)) {
-                                       u8 *kaddr = kmap_atomic(page, KM_USER0);
-                                       memset(kaddr + bh_offset(bh), 0,
+                                       zero_user(page, bh_offset(bh),
                                                        blocksize);
-                                       kunmap_atomic(kaddr, KM_USER0);
-                                       flush_dcache_page(page);
                                        set_buffer_uptodate(bh);
                                }
                                mark_buffer_dirty(bh);
@@ -753,15 +731,12 @@ map_buffer_cached:
                                if (!buffer_uptodate(bh))
                                        set_buffer_uptodate(bh);
                        } else if (!buffer_uptodate(bh)) {
-                               u8 *kaddr = kmap_atomic(page, KM_USER0);
-                               memset(kaddr + bh_offset(bh), 0, blocksize);
-                               kunmap_atomic(kaddr, KM_USER0);
-                               flush_dcache_page(page);
+                               zero_user(page, bh_offset(bh), blocksize);
                                set_buffer_uptodate(bh);
                        }
                        continue;
                }
-               is_retry = FALSE;
+               is_retry = false;
                if (!rl) {
                        down_read(&ni->runlist.lock);
 retry_remap:
@@ -777,7 +752,7 @@ retry_remap:
                                 * Successful remap, setup the map cache and
                                 * use that to deal with the buffer.
                                 */
-                               was_hole = FALSE;
+                               was_hole = false;
                                vcn = bh_cpos;
                                vcn_len = rl[1].vcn - vcn;
                                lcn_block = lcn << (vol->cluster_size_bits -
@@ -793,7 +768,7 @@ retry_remap:
                                if (likely(vcn + vcn_len >= cend)) {
                                        if (rl_write_locked) {
                                                up_write(&ni->runlist.lock);
-                                               rl_write_locked = FALSE;
+                                               rl_write_locked = false;
                                        } else
                                                up_read(&ni->runlist.lock);
                                        rl = NULL;
@@ -819,13 +794,13 @@ retry_remap:
                                         */
                                        up_read(&ni->runlist.lock);
                                        down_write(&ni->runlist.lock);
-                                       rl_write_locked = TRUE;
+                                       rl_write_locked = true;
                                        goto retry_remap;
                                }
                                err = ntfs_map_runlist_nolock(ni, bh_cpos,
                                                NULL);
                                if (likely(!err)) {
-                                       is_retry = TRUE;
+                                       is_retry = true;
                                        goto retry_remap;
                                }
                                /*
@@ -880,11 +855,8 @@ rl_not_mapped_enoent:
                                        if (!buffer_uptodate(bh))
                                                set_buffer_uptodate(bh);
                                } else if (!buffer_uptodate(bh)) {
-                                       u8 *kaddr = kmap_atomic(page, KM_USER0);
-                                       memset(kaddr + bh_offset(bh), 0,
-                                                       blocksize);
-                                       kunmap_atomic(kaddr, KM_USER0);
-                                       flush_dcache_page(page);
+                                       zero_user(page, bh_offset(bh),
+                                               blocksize);
                                        set_buffer_uptodate(bh);
                                }
                                continue;
@@ -904,7 +876,7 @@ rl_not_mapped_enoent:
                if (!rl_write_locked) {
                        up_read(&ni->runlist.lock);
                        down_write(&ni->runlist.lock);
-                       rl_write_locked = TRUE;
+                       rl_write_locked = true;
                        goto retry_remap;
                }
                /* Find the previous last allocated cluster. */
@@ -918,7 +890,7 @@ rl_not_mapped_enoent:
                        }
                }
                rl2 = ntfs_cluster_alloc(vol, bh_cpos, 1, lcn, DATA_ZONE,
-                               FALSE);
+                               false);
                if (IS_ERR(rl2)) {
                        err = PTR_ERR(rl2);
                        ntfs_debug("Failed to allocate cluster, error code %i.",
@@ -943,7 +915,8 @@ rl_not_mapped_enoent:
                }
                ni->runlist.rl = rl;
                status.runlist_merged = 1;
-               ntfs_debug("Allocated cluster, lcn 0x%llx.", lcn);
+               ntfs_debug("Allocated cluster, lcn 0x%llx.",
+                               (unsigned long long)lcn);
                /* Map and lock the mft record and get the attribute record. */
                if (!NInoAttr(ni))
                        base_ni = ni;
@@ -1093,7 +1066,7 @@ rl_not_mapped_enoent:
                status.mft_attr_mapped = 0;
                status.mp_rebuilt = 0;
                /* Setup the map cache and use that to deal with the buffer. */
-               was_hole = TRUE;
+               was_hole = true;
                vcn = bh_cpos;
                vcn_len = 1;
                lcn_block = lcn << (vol->cluster_size_bits - blocksize_bits);
@@ -1105,7 +1078,7 @@ rl_not_mapped_enoent:
                 */
                if (likely(vcn + vcn_len >= cend)) {
                        up_write(&ni->runlist.lock);
-                       rl_write_locked = FALSE;
+                       rl_write_locked = false;
                        rl = NULL;
                }
                goto map_buffer_cached;
@@ -1117,7 +1090,7 @@ rl_not_mapped_enoent:
        if (likely(!err)) {
                if (unlikely(rl_write_locked)) {
                        up_write(&ni->runlist.lock);
-                       rl_write_locked = FALSE;
+                       rl_write_locked = false;
                } else if (unlikely(rl))
                        up_read(&ni->runlist.lock);
                rl = NULL;
@@ -1138,16 +1111,12 @@ rl_not_mapped_enoent:
                         * to zero the overflowing region.
                         */
                        if (unlikely(bh_pos + blocksize > initialized_size)) {
-                               u8 *kaddr;
                                int ofs = 0;
 
                                if (likely(bh_pos < initialized_size))
                                        ofs = initialized_size - bh_pos;
-                               kaddr = kmap_atomic(page, KM_USER0);
-                               memset(kaddr + bh_offset(bh) + ofs, 0,
-                                               blocksize - ofs);
-                               kunmap_atomic(kaddr, KM_USER0);
-                               flush_dcache_page(page);
+                               zero_user_segment(page, bh_offset(bh) + ofs,
+                                               blocksize);
                        }
                } else /* if (unlikely(!buffer_uptodate(bh))) */
                        err = -EIO;
@@ -1206,8 +1175,6 @@ rl_not_mapped_enoent:
                                        "attribute runlist in error code "
                                        "path.  Run chkdsk to recover the "
                                        "lost cluster.");
-                       make_bad_inode(vi);
-                       make_bad_inode(VFS_I(base_ni));
                        NVolSetErrors(vol);
                } else /* if (success) */ {
                        status.runlist_merged = 0;
@@ -1238,8 +1205,6 @@ rl_not_mapped_enoent:
                        ntfs_error(vol->sb, "Failed to restore attribute "
                                        "record in error code path.  Run "
                                        "chkdsk to recover.");
-                       make_bad_inode(vi);
-                       make_bad_inode(VFS_I(base_ni));
                        NVolSetErrors(vol);
                } else /* if (success) */ {
                        if (ntfs_mapping_pairs_build(vol, (u8*)a +
@@ -1252,8 +1217,6 @@ rl_not_mapped_enoent:
                                                "mapping pairs array in error "
                                                "code path.  Run chkdsk to "
                                                "recover.");
-                               make_bad_inode(vi);
-                               make_bad_inode(VFS_I(base_ni));
                                NVolSetErrors(vol);
                        }
                        flush_dcache_mft_record_page(ctx->ntfs_ino);
@@ -1293,11 +1256,8 @@ rl_not_mapped_enoent:
                                if (PageUptodate(page))
                                        set_buffer_uptodate(bh);
                                else {
-                                       u8 *kaddr = kmap_atomic(page, KM_USER0);
-                                       memset(kaddr + bh_offset(bh), 0,
+                                       zero_user(page, bh_offset(bh),
                                                        blocksize);
-                                       kunmap_atomic(kaddr, KM_USER0);
-                                       flush_dcache_page(page);
                                        set_buffer_uptodate(bh);
                                }
                        }
@@ -1310,7 +1270,7 @@ rl_not_mapped_enoent:
 
 /*
  * Copy as much as we can into the pages and return the number of bytes which
- * were sucessfully copied.  If a fault is encountered then clear the pages
+ * were successfully copied.  If a fault is encountered then clear the pages
  * out to (ofs + bytes) and return the number of bytes which were copied.
  */
 static inline size_t ntfs_copy_from_user(struct page **pages,
@@ -1318,7 +1278,7 @@ static inline size_t ntfs_copy_from_user(struct page **pages,
                size_t bytes)
 {
        struct page **last_page = pages + nr_pages;
-       char *kaddr;
+       char *addr;
        size_t total = 0;
        unsigned len;
        int left;
@@ -1327,13 +1287,13 @@ static inline size_t ntfs_copy_from_user(struct page **pages,
                len = PAGE_CACHE_SIZE - ofs;
                if (len > bytes)
                        len = bytes;
-               kaddr = kmap_atomic(*pages, KM_USER0);
-               left = __copy_from_user_inatomic(kaddr + ofs, buf, len);
-               kunmap_atomic(kaddr, KM_USER0);
+               addr = kmap_atomic(*pages, KM_USER0);
+               left = __copy_from_user_inatomic(addr + ofs, buf, len);
+               kunmap_atomic(addr, KM_USER0);
                if (unlikely(left)) {
                        /* Do it the slow way. */
-                       kaddr = kmap(*pages);
-                       left = __copy_from_user(kaddr + ofs, buf, len);
+                       addr = kmap(*pages);
+                       left = __copy_from_user(addr + ofs, buf, len);
                        kunmap(*pages);
                        if (unlikely(left))
                                goto err_out;
@@ -1357,14 +1317,12 @@ err_out:
                len = PAGE_CACHE_SIZE;
                if (len > bytes)
                        len = bytes;
-               kaddr = kmap_atomic(*pages, KM_USER0);
-               memset(kaddr, 0, len);
-               kunmap_atomic(kaddr, KM_USER0);
+               zero_user(*pages, 0, len);
        }
        goto out;
 }
 
-static size_t __ntfs_copy_from_user_iovec(char *vaddr,
+static size_t __ntfs_copy_from_user_iovec_inatomic(char *vaddr,
                const struct iovec *iov, size_t iov_ofs, size_t bytes)
 {
        size_t total = 0;
@@ -1382,10 +1340,6 @@ static size_t __ntfs_copy_from_user_iovec(char *vaddr,
                bytes -= len;
                vaddr += len;
                if (unlikely(left)) {
-                       /*
-                        * Zero the rest of the target like __copy_from_user().
-                        */
-                       memset(vaddr, 0, bytes);
                        total -= left;
                        break;
                }
@@ -1426,11 +1380,13 @@ static inline void ntfs_set_next_iovec(const struct iovec **iovp,
  * pages (out to offset + bytes), to emulate ntfs_copy_from_user()'s
  * single-segment behaviour.
  *
- * We call the same helper (__ntfs_copy_from_user_iovec()) both when atomic and
- * when not atomic.  This is ok because __ntfs_copy_from_user_iovec() calls
- * __copy_from_user_inatomic() and it is ok to call this when non-atomic.  In
- * fact, the only difference between __copy_from_user_inatomic() and
- * __copy_from_user() is that the latter calls might_sleep().  And on many
+ * We call the same helper (__ntfs_copy_from_user_iovec_inatomic()) both
+ * when atomic and when not atomic.  This is ok because
+ * __ntfs_copy_from_user_iovec_inatomic() calls __copy_from_user_inatomic()
+ * and it is ok to call this when non-atomic.
+ * Infact, the only difference between __copy_from_user_inatomic() and
+ * __copy_from_user() is that the latter calls might_sleep() and the former
+ * should not zero the tail of the buffer on error.  And on many
  * architectures __copy_from_user_inatomic() is just defined to
  * __copy_from_user() so it makes no difference at all on those architectures.
  */
@@ -1439,22 +1395,26 @@ static inline size_t ntfs_copy_from_user_iovec(struct page **pages,
                size_t *iov_ofs, size_t bytes)
 {
        struct page **last_page = pages + nr_pages;
-       char *kaddr;
+       char *addr;
        size_t copied, len, total = 0;
 
        do {
                len = PAGE_CACHE_SIZE - ofs;
                if (len > bytes)
                        len = bytes;
-               kaddr = kmap_atomic(*pages, KM_USER0);
-               copied = __ntfs_copy_from_user_iovec(kaddr + ofs,
+               addr = kmap_atomic(*pages, KM_USER0);
+               copied = __ntfs_copy_from_user_iovec_inatomic(addr + ofs,
                                *iov, *iov_ofs, len);
-               kunmap_atomic(kaddr, KM_USER0);
+               kunmap_atomic(addr, KM_USER0);
                if (unlikely(copied != len)) {
                        /* Do it the slow way. */
-                       kaddr = kmap(*pages);
-                       copied = __ntfs_copy_from_user_iovec(kaddr + ofs,
+                       addr = kmap(*pages);
+                       copied = __ntfs_copy_from_user_iovec_inatomic(addr + ofs,
                                        *iov, *iov_ofs, len);
+                       /*
+                        * Zero the rest of the target like __copy_from_user().
+                        */
+                       memset(addr + ofs + copied, 0, len - copied);
                        kunmap(*pages);
                        if (unlikely(copied != len))
                                goto err_out;
@@ -1478,9 +1438,7 @@ err_out:
                len = PAGE_CACHE_SIZE;
                if (len > bytes)
                        len = bytes;
-               kaddr = kmap_atomic(*pages, KM_USER0);
-               memset(kaddr, 0, len);
-               kunmap_atomic(kaddr, KM_USER0);
+               zero_user(*pages, 0, len);
        }
        goto out;
 }
@@ -1489,14 +1447,15 @@ static inline void ntfs_flush_dcache_pages(struct page **pages,
                unsigned nr_pages)
 {
        BUG_ON(!nr_pages);
+       /*
+        * Warning: Do not do the decrement at the same time as the call to
+        * flush_dcache_page() because it is a NULL macro on i386 and hence the
+        * decrement never happens so the loop never terminates.
+        */
        do {
-               /*
-                * Warning: Do not do the decrement at the same time as the
-                * call because flush_dcache_page() is a NULL macro on i386
-                * and hence the decrement never happens.
-                */
+               --nr_pages;
                flush_dcache_page(pages[nr_pages]);
-       } while (--nr_pages > 0);
+       } while (nr_pages > 0);
 }
 
 /**
@@ -1525,25 +1484,25 @@ static inline int ntfs_commit_pages_after_non_resident_write(
 
        vi = pages[0]->mapping->host;
        ni = NTFS_I(vi);
-       blocksize = 1 << vi->i_blkbits;
+       blocksize = vi->i_sb->s_blocksize;
        end = pos + bytes;
        u = 0;
        do {
                s64 bh_pos;
                struct page *page;
-               BOOL partial;
+               bool partial;
 
                page = pages[u];
                bh_pos = (s64)page->index << PAGE_CACHE_SHIFT;
                bh = head = page_buffers(page);
-               partial = FALSE;
+               partial = false;
                do {
                        s64 bh_end;
 
                        bh_end = bh_pos + blocksize;
                        if (bh_end <= pos || bh_pos >= end) {
                                if (!buffer_uptodate(bh))
-                                       partial = TRUE;
+                                       partial = true;
                        } else {
                                set_buffer_uptodate(bh);
                                mark_buffer_dirty(bh);
@@ -1622,11 +1581,8 @@ err_out:
                unmap_mft_record(base_ni);
        ntfs_error(vi->i_sb, "Failed to update initialized_size/i_size (error "
                        "code %i).", err);
-       if (err != -ENOMEM) {
+       if (err != -ENOMEM)
                NVolSetErrors(ni->vol);
-               make_bad_inode(VFS_I(base_ni));
-               make_bad_inode(vi);
-       }
        return err;
 }
 
@@ -1637,7 +1593,7 @@ err_out:
  * @pos:       byte position in file at which the write begins
  * @bytes:     number of bytes to be written
  *
- * This is called from ntfs_file_buffered_write() with i_sem held on the inode
+ * This is called from ntfs_file_buffered_write() with i_mutex held on the inode
  * (@pages[0]->mapping->host).  There are @nr_pages pages in @pages which are
  * locked but not kmap()ped.  The source data has already been copied into the
  * @page.  ntfs_prepare_pages_for_non_resident_write() has been called before
@@ -1766,8 +1722,6 @@ static int ntfs_commit_pages_after_write(struct page **pages,
        read_unlock_irqrestore(&ni->size_lock, flags);
        BUG_ON(initialized_size != i_size);
        if (end > initialized_size) {
-               unsigned long flags;
-
                write_lock_irqsave(&ni->size_lock, flags);
                ni->initialized_size = end;
                i_size_write(vi, end);
@@ -1801,8 +1755,6 @@ err_out:
                ntfs_error(vi->i_sb, "Resident attribute commit write failed "
                                "with error %i.", err);
                NVolSetErrors(ni->vol);
-               make_bad_inode(VFS_I(base_ni));
-               make_bad_inode(vi);
        }
        if (ctx)
                ntfs_attr_put_search_ctx(ctx);
@@ -1814,7 +1766,7 @@ err_out:
 /**
  * ntfs_file_buffered_write -
  *
- * Locking: The vfs is holding ->i_sem on the inode.
+ * Locking: The vfs is holding ->i_mutex on the inode.
  */
 static ssize_t ntfs_file_buffered_write(struct kiocb *iocb,
                const struct iovec *iov, unsigned long nr_segs,
@@ -1836,7 +1788,6 @@ static ssize_t ntfs_file_buffered_write(struct kiocb *iocb,
        ssize_t status, written;
        unsigned nr_pages;
        int err;
-       struct pagevec lru_pvec;
 
        ntfs_debug("Entering for i_ino 0x%lx, attribute type 0x%x, "
                        "pos 0x%llx, count 0x%lx.",
@@ -1948,7 +1899,6 @@ static ssize_t ntfs_file_buffered_write(struct kiocb *iocb,
                        }
                }
        }
-       pagevec_init(&lru_pvec, 0);
        written = 0;
        /*
         * If the write starts beyond the initialized size, extend it up to the
@@ -1961,8 +1911,7 @@ static ssize_t ntfs_file_buffered_write(struct kiocb *iocb,
        ll = ni->initialized_size;
        read_unlock_irqrestore(&ni->size_lock, flags);
        if (pos > ll) {
-               err = ntfs_attr_extend_initialized(ni, pos, &cached_page,
-                               &lru_pvec);
+               err = ntfs_attr_extend_initialized(ni, pos);
                if (err < 0) {
                        ntfs_error(vol->sb, "Cannot perform write to inode "
                                        "0x%lx, attribute type 0x%x, because "
@@ -2005,7 +1954,7 @@ static ssize_t ntfs_file_buffered_write(struct kiocb *iocb,
                                 */
                                down_read(&ni->runlist.lock);
                                lcn = ntfs_attr_vcn_to_lcn_nolock(ni, pos >>
-                                               vol->cluster_size_bits, FALSE);
+                                               vol->cluster_size_bits, false);
                                up_read(&ni->runlist.lock);
                                if (unlikely(lcn < LCN_HOLE)) {
                                        status = -EIO;
@@ -2048,7 +1997,7 @@ static ssize_t ntfs_file_buffered_write(struct kiocb *iocb,
                        ntfs_fault_in_pages_readable_iovec(iov, iov_ofs, bytes);
                /* Get and lock @do_pages starting at index @start_idx. */
                status = __ntfs_grab_cache_pages(mapping, start_idx, do_pages,
-                               pages, &cached_page, &lru_pvec);
+                               pages, &cached_page);
                if (unlikely(status))
                        break;
                /*
@@ -2113,15 +2062,6 @@ err_out:
        *ppos = pos;
        if (cached_page)
                page_cache_release(cached_page);
-       /* For now, when the user asks for O_SYNC, we actually give O_DSYNC. */
-       if (likely(!status)) {
-               if (unlikely((file->f_flags & O_SYNC) || IS_SYNC(vi))) {
-                       if (!mapping->a_ops->writepage || !is_sync_kiocb(iocb))
-                               status = generic_osync_inode(vi, mapping,
-                                               OSYNC_METADATA|OSYNC_DATA);
-               }
-       }
-       pagevec_lru_add(&lru_pvec);
        ntfs_debug("Done.  Returning %s (written 0x%lx, status %li).",
                        written ? "written" : "status", (unsigned long)written,
                        (long)status);
@@ -2138,28 +2078,13 @@ static ssize_t ntfs_file_aio_write_nolock(struct kiocb *iocb,
        struct address_space *mapping = file->f_mapping;
        struct inode *inode = mapping->host;
        loff_t pos;
-       unsigned long seg;
        size_t count;           /* after file limit checks */
        ssize_t written, err;
 
        count = 0;
-       for (seg = 0; seg < nr_segs; seg++) {
-               const struct iovec *iv = &iov[seg];
-               /*
-                * If any segment has a negative length, or the cumulative
-                * length ever wraps negative then return -EINVAL.
-                */
-               count += iv->iov_len;
-               if (unlikely((ssize_t)(count|iv->iov_len) < 0))
-                       return -EINVAL;
-               if (access_ok(VERIFY_READ, iv->iov_base, iv->iov_len))
-                       continue;
-               if (!seg)
-                       return -EFAULT;
-               nr_segs = seg;
-               count -= iv->iov_len;   /* This segment is no good */
-               break;
-       }
+       err = generic_segment_checks(iov, &nr_segs, &count, VERIFY_READ);
+       if (err)
+               return err;
        pos = *ppos;
        vfs_check_frozen(inode->i_sb, SB_FREEZE_WRITE);
        /* We can write back this queue in page reclaim. */
@@ -2170,10 +2095,10 @@ static ssize_t ntfs_file_aio_write_nolock(struct kiocb *iocb,
                goto out;
        if (!count)
                goto out;
-       err = remove_suid(file->f_dentry);
+       err = file_remove_suid(file);
        if (err)
                goto out;
-       inode_update_time(inode, 1);
+       file_update_time(file);
        written = ntfs_file_buffered_write(iocb, iov, nr_segs, pos, ppos,
                        count);
 out:
@@ -2184,23 +2109,21 @@ out:
 /**
  * ntfs_file_aio_write -
  */
-static ssize_t ntfs_file_aio_write(struct kiocb *iocb, const char __user *buf,
-               size_t count, loff_t pos)
+static ssize_t ntfs_file_aio_write(struct kiocb *iocb, const struct iovec *iov,
+               unsigned long nr_segs, loff_t pos)
 {
        struct file *file = iocb->ki_filp;
        struct address_space *mapping = file->f_mapping;
        struct inode *inode = mapping->host;
        ssize_t ret;
-       struct iovec local_iov = { .iov_base = (void __user *)buf,
-                                  .iov_len = count };
 
        BUG_ON(iocb->ki_pos != pos);
 
-       down(&inode->i_sem);
-       ret = ntfs_file_aio_write_nolock(iocb, &local_iov, 1, &iocb->ki_pos);
-       up(&inode->i_sem);
-       if (ret > 0 && ((file->f_flags & O_SYNC) || IS_SYNC(inode))) {
-               int err = sync_page_range(inode, mapping, pos, ret);
+       mutex_lock(&inode->i_mutex);
+       ret = ntfs_file_aio_write_nolock(iocb, iov, nr_segs, &iocb->ki_pos);
+       mutex_unlock(&inode->i_mutex);
+       if (ret > 0) {
+               int err = generic_write_sync(file, pos, ret);
                if (err < 0)
                        ret = err;
        }
@@ -2208,49 +2131,8 @@ static ssize_t ntfs_file_aio_write(struct kiocb *iocb, const char __user *buf,
 }
 
 /**
- * ntfs_file_writev -
- *
- * Basically the same as generic_file_writev() except that it ends up calling
- * ntfs_file_aio_write_nolock() instead of __generic_file_aio_write_nolock().
- */
-static ssize_t ntfs_file_writev(struct file *file, const struct iovec *iov,
-               unsigned long nr_segs, loff_t *ppos)
-{
-       struct address_space *mapping = file->f_mapping;
-       struct inode *inode = mapping->host;
-       struct kiocb kiocb;
-       ssize_t ret;
-
-       down(&inode->i_sem);
-       init_sync_kiocb(&kiocb, file);
-       ret = ntfs_file_aio_write_nolock(&kiocb, iov, nr_segs, ppos);
-       if (ret == -EIOCBQUEUED)
-               ret = wait_on_sync_kiocb(&kiocb);
-       up(&inode->i_sem);
-       if (ret > 0 && ((file->f_flags & O_SYNC) || IS_SYNC(inode))) {
-               int err = sync_page_range(inode, mapping, *ppos - ret, ret);
-               if (err < 0)
-                       ret = err;
-       }
-       return ret;
-}
-
-/**
- * ntfs_file_write - simple wrapper for ntfs_file_writev()
- */
-static ssize_t ntfs_file_write(struct file *file, const char __user *buf,
-               size_t count, loff_t *ppos)
-{
-       struct iovec local_iov = { .iov_base = (void __user *)buf,
-                                  .iov_len = count };
-
-       return ntfs_file_writev(file, &local_iov, 1, ppos);
-}
-
-/**
  * ntfs_file_fsync - sync a file to disk
  * @filp:      file to be synced
- * @dentry:    dentry describing the file to sync
  * @datasync:  if non-zero only flush user data and not metadata
  *
  * Data integrity sync of a file to disk.  Used for fsync, fdatasync, and msync
@@ -2266,25 +2148,21 @@ static ssize_t ntfs_file_write(struct file *file, const char __user *buf,
  * Also, if @datasync is true, we do not wait on the inode to be written out
  * but we always wait on the page cache pages to be written out.
  *
- * Note: In the past @filp could be NULL so we ignore it as we don't need it
- * anyway.
- *
- * Locking: Caller must hold i_sem on the inode.
+ * Locking: Caller must hold i_mutex on the inode.
  *
  * TODO: We should probably also write all attribute/index inodes associated
  * with this inode but since we have no simple way of getting to them we ignore
  * this problem for now.
  */
-static int ntfs_file_fsync(struct file *filp, struct dentry *dentry,
-               int datasync)
+static int ntfs_file_fsync(struct file *filp, int datasync)
 {
-       struct inode *vi = dentry->d_inode;
+       struct inode *vi = filp->f_mapping->host;
        int err, ret = 0;
 
        ntfs_debug("Entering for inode 0x%lx.", vi->i_ino);
        BUG_ON(S_ISDIR(vi->i_mode));
        if (!datasync || !NInoNonResident(NTFS_I(vi)))
-               ret = ntfs_write_inode(vi, 1);
+               ret = __ntfs_write_inode(vi, 1);
        write_inode_now(vi, !datasync);
        /*
         * NOTE: If we were to use mapping->private_list (see ext2 and
@@ -2304,15 +2182,13 @@ static int ntfs_file_fsync(struct file *filp, struct dentry *dentry,
 
 #endif /* NTFS_RW */
 
-struct file_operations ntfs_file_ops = {
+const struct file_operations ntfs_file_ops = {
        .llseek         = generic_file_llseek,   /* Seek inside file. */
-       .read           = generic_file_read,     /* Read from file. */
+       .read           = do_sync_read,          /* Read from file. */
        .aio_read       = generic_file_aio_read, /* Async read from file. */
-       .readv          = generic_file_readv,    /* Read from file. */
 #ifdef NTFS_RW
-       .write          = ntfs_file_write,       /* Write to file. */
+       .write          = do_sync_write,         /* Write to file. */
        .aio_write      = ntfs_file_aio_write,   /* Async write to file. */
-       .writev         = ntfs_file_writev,      /* Write to file. */
        /*.release      = ,*/                    /* Last file is closed.  See
                                                    fs/ext2/file.c::
                                                    ext2_release_file() for
@@ -2328,7 +2204,7 @@ struct file_operations ntfs_file_ops = {
                                                    mounted filesystem. */
        .mmap           = generic_file_mmap,     /* Mmap file. */
        .open           = ntfs_file_open,        /* Open file. */
-       .sendfile       = generic_file_sendfile, /* Zero-copy data send with
+       .splice_read    = generic_file_splice_read /* Zero-copy data send with
                                                    the data source being on
                                                    the ntfs partition.  We do
                                                    not need to care about the
@@ -2340,13 +2216,13 @@ struct file_operations ntfs_file_ops = {
                                                    the data source. */
 };
 
-struct inode_operations ntfs_file_inode_ops = {
+const struct inode_operations ntfs_file_inode_ops = {
 #ifdef NTFS_RW
        .truncate       = ntfs_truncate_vfs,
        .setattr        = ntfs_setattr,
 #endif /* NTFS_RW */
 };
 
-struct file_operations ntfs_empty_file_ops = {};
+const struct file_operations ntfs_empty_file_ops = {};
 
-struct inode_operations ntfs_empty_inode_ops = {};
+const struct inode_operations ntfs_empty_inode_ops = {};