/*
* file.c - NTFS kernel file operations. Part of the Linux-NTFS project.
*
- * Copyright (c) 2001-2005 Anton Altaparmakov
+ * Copyright (c) 2001-2007 Anton Altaparmakov
*
* This program/include file is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License as published
*/
#include <linux/buffer_head.h>
+#include <linux/gfp.h>
#include <linux/pagemap.h>
#include <linux/pagevec.h>
#include <linux/sched.h>
{
if (sizeof(unsigned long) < 8) {
if (i_size_read(vi) > MAX_LFS_FILESIZE)
- return -EFBIG;
+ return -EOVERFLOW;
}
return generic_file_open(vi, filp);
}
* the page at all. For a more detailed explanation see ntfs_truncate() in
* fs/ntfs/inode.c.
*
- * @cached_page and @lru_pvec are just optimizations for dealing with multiple
- * pages.
- *
* Return 0 on success and -errno on error. In the case that an error is
* encountered it is possible that the initialized size will already have been
* incremented some way towards @new_init_size but it is guaranteed that if
* Locking: i_mutex on the vfs inode corrseponsind to the ntfs inode @ni must be
* held by the caller.
*/
-static int ntfs_attr_extend_initialized(ntfs_inode *ni, const s64 new_init_size,
- struct page **cached_page, struct pagevec *lru_pvec)
+static int ntfs_attr_extend_initialized(ntfs_inode *ni, const s64 new_init_size)
{
s64 old_init_size;
loff_t old_i_size;
* Read the page. If the page is not present, this will zero
* the uninitialized regions for us.
*/
- page = read_cache_page(mapping, index,
- (filler_t*)mapping->a_ops->readpage, NULL);
+ page = read_mapping_page(mapping, index, NULL);
if (IS_ERR(page)) {
err = PTR_ERR(page);
goto init_err_out;
}
- wait_on_page_locked(page);
- if (unlikely(!PageUptodate(page) || PageError(page))) {
+ if (unlikely(PageError(page))) {
page_cache_release(page);
err = -EIO;
goto init_err_out;
volatile char c;
/* Set @end to the first byte outside the last page we care about. */
- end = (const char __user*)PAGE_ALIGN((ptrdiff_t __user)uaddr + bytes);
+ end = (const char __user*)PAGE_ALIGN((unsigned long)uaddr + bytes);
while (!__get_user(c, uaddr) && (uaddr += PAGE_SIZE, uaddr < end))
;
* @cached_page: allocated but as yet unused page
* @lru_pvec: lru-buffering pagevec of caller
*
- * Obtain @nr_pages locked page cache pages from the mapping @maping and
+ * Obtain @nr_pages locked page cache pages from the mapping @mapping and
* starting at index @index.
*
- * If a page is newly created, increment its refcount and add it to the
- * caller's lru-buffering pagevec @lru_pvec.
- *
- * This is the same as mm/filemap.c::__grab_cache_page(), except that @nr_pages
- * are obtained at once instead of just one page and that 0 is returned on
- * success and -errno on error.
+ * If a page is newly created, add it to lru list
*
* Note, the page locks are obtained in ascending page index order.
*/
static inline int __ntfs_grab_cache_pages(struct address_space *mapping,
pgoff_t index, const unsigned nr_pages, struct page **pages,
- struct page **cached_page, struct pagevec *lru_pvec)
+ struct page **cached_page)
{
int err, nr;
goto err_out;
}
}
- err = add_to_page_cache(*cached_page, mapping, index,
+ err = add_to_page_cache_lru(*cached_page, mapping, index,
GFP_KERNEL);
if (unlikely(err)) {
if (err == -EEXIST)
goto err_out;
}
pages[nr] = *cached_page;
- page_cache_get(*cached_page);
- if (unlikely(!pagevec_add(lru_pvec, *cached_page)))
- __pagevec_lru_add(lru_pvec);
*cached_page = NULL;
}
index++;
u32 attr_rec_len = 0;
unsigned blocksize, u;
int err, mp_size;
- BOOL rl_write_locked, was_hole, is_retry;
+ bool rl_write_locked, was_hole, is_retry;
unsigned char blocksize_bits;
struct {
u8 runlist_merged:1;
"index 0x%lx, nr_pages 0x%x, pos 0x%llx, bytes 0x%zx.",
vi->i_ino, ni->type, pages[0]->index, nr_pages,
(long long)pos, bytes);
- blocksize_bits = vi->i_blkbits;
- blocksize = 1 << blocksize_bits;
+ blocksize = vol->sb->s_blocksize;
+ blocksize_bits = vol->sb->s_blocksize_bits;
u = 0;
do {
- struct page *page = pages[u];
+ page = pages[u];
+ BUG_ON(!page);
/*
* create_empty_buffers() will create uptodate/dirty buffers if
* the page is uptodate/dirty.
return -ENOMEM;
}
} while (++u < nr_pages);
- rl_write_locked = FALSE;
+ rl_write_locked = false;
rl = NULL;
err = 0;
vcn = lcn = -1;
vcn_len = 0;
lcn_block = -1;
- was_hole = FALSE;
+ was_hole = false;
cpos = pos >> vol->cluster_size_bits;
end = pos + bytes;
cend = (end + vol->cluster_size - 1) >> vol->cluster_size_bits;
ntfs_submit_bh_for_read(bh);
*wait_bh++ = bh;
} else {
- u8 *kaddr = kmap_atomic(page, KM_USER0);
- memset(kaddr + bh_offset(bh), 0,
+ zero_user(page, bh_offset(bh),
blocksize);
- kunmap_atomic(kaddr, KM_USER0);
- flush_dcache_page(page);
set_buffer_uptodate(bh);
}
}
ntfs_submit_bh_for_read(bh);
*wait_bh++ = bh;
} else {
- u8 *kaddr = kmap_atomic(page,
- KM_USER0);
- memset(kaddr + bh_offset(bh),
- 0, blocksize);
- kunmap_atomic(kaddr, KM_USER0);
- flush_dcache_page(page);
+ zero_user(page, bh_offset(bh),
+ blocksize);
set_buffer_uptodate(bh);
}
}
*/
if (bh_end <= pos || bh_pos >= end) {
if (!buffer_uptodate(bh)) {
- u8 *kaddr = kmap_atomic(page, KM_USER0);
- memset(kaddr + bh_offset(bh), 0,
+ zero_user(page, bh_offset(bh),
blocksize);
- kunmap_atomic(kaddr, KM_USER0);
- flush_dcache_page(page);
set_buffer_uptodate(bh);
}
mark_buffer_dirty(bh);
if (!buffer_uptodate(bh))
set_buffer_uptodate(bh);
} else if (!buffer_uptodate(bh)) {
- u8 *kaddr = kmap_atomic(page, KM_USER0);
- memset(kaddr + bh_offset(bh), 0, blocksize);
- kunmap_atomic(kaddr, KM_USER0);
- flush_dcache_page(page);
+ zero_user(page, bh_offset(bh), blocksize);
set_buffer_uptodate(bh);
}
continue;
}
- is_retry = FALSE;
+ is_retry = false;
if (!rl) {
down_read(&ni->runlist.lock);
retry_remap:
* Successful remap, setup the map cache and
* use that to deal with the buffer.
*/
- was_hole = FALSE;
+ was_hole = false;
vcn = bh_cpos;
vcn_len = rl[1].vcn - vcn;
lcn_block = lcn << (vol->cluster_size_bits -
if (likely(vcn + vcn_len >= cend)) {
if (rl_write_locked) {
up_write(&ni->runlist.lock);
- rl_write_locked = FALSE;
+ rl_write_locked = false;
} else
up_read(&ni->runlist.lock);
rl = NULL;
*/
up_read(&ni->runlist.lock);
down_write(&ni->runlist.lock);
- rl_write_locked = TRUE;
+ rl_write_locked = true;
goto retry_remap;
}
err = ntfs_map_runlist_nolock(ni, bh_cpos,
NULL);
if (likely(!err)) {
- is_retry = TRUE;
+ is_retry = true;
goto retry_remap;
}
/*
if (!buffer_uptodate(bh))
set_buffer_uptodate(bh);
} else if (!buffer_uptodate(bh)) {
- u8 *kaddr = kmap_atomic(page, KM_USER0);
- memset(kaddr + bh_offset(bh), 0,
- blocksize);
- kunmap_atomic(kaddr, KM_USER0);
- flush_dcache_page(page);
+ zero_user(page, bh_offset(bh),
+ blocksize);
set_buffer_uptodate(bh);
}
continue;
if (!rl_write_locked) {
up_read(&ni->runlist.lock);
down_write(&ni->runlist.lock);
- rl_write_locked = TRUE;
+ rl_write_locked = true;
goto retry_remap;
}
/* Find the previous last allocated cluster. */
}
}
rl2 = ntfs_cluster_alloc(vol, bh_cpos, 1, lcn, DATA_ZONE,
- FALSE);
+ false);
if (IS_ERR(rl2)) {
err = PTR_ERR(rl2);
ntfs_debug("Failed to allocate cluster, error code %i.",
}
ni->runlist.rl = rl;
status.runlist_merged = 1;
- ntfs_debug("Allocated cluster, lcn 0x%llx.", lcn);
+ ntfs_debug("Allocated cluster, lcn 0x%llx.",
+ (unsigned long long)lcn);
/* Map and lock the mft record and get the attribute record. */
if (!NInoAttr(ni))
base_ni = ni;
status.mft_attr_mapped = 0;
status.mp_rebuilt = 0;
/* Setup the map cache and use that to deal with the buffer. */
- was_hole = TRUE;
+ was_hole = true;
vcn = bh_cpos;
vcn_len = 1;
lcn_block = lcn << (vol->cluster_size_bits - blocksize_bits);
*/
if (likely(vcn + vcn_len >= cend)) {
up_write(&ni->runlist.lock);
- rl_write_locked = FALSE;
+ rl_write_locked = false;
rl = NULL;
}
goto map_buffer_cached;
if (likely(!err)) {
if (unlikely(rl_write_locked)) {
up_write(&ni->runlist.lock);
- rl_write_locked = FALSE;
+ rl_write_locked = false;
} else if (unlikely(rl))
up_read(&ni->runlist.lock);
rl = NULL;
* to zero the overflowing region.
*/
if (unlikely(bh_pos + blocksize > initialized_size)) {
- u8 *kaddr;
int ofs = 0;
if (likely(bh_pos < initialized_size))
ofs = initialized_size - bh_pos;
- kaddr = kmap_atomic(page, KM_USER0);
- memset(kaddr + bh_offset(bh) + ofs, 0,
- blocksize - ofs);
- kunmap_atomic(kaddr, KM_USER0);
- flush_dcache_page(page);
+ zero_user_segment(page, bh_offset(bh) + ofs,
+ blocksize);
}
} else /* if (unlikely(!buffer_uptodate(bh))) */
err = -EIO;
"attribute runlist in error code "
"path. Run chkdsk to recover the "
"lost cluster.");
- make_bad_inode(vi);
- make_bad_inode(VFS_I(base_ni));
NVolSetErrors(vol);
} else /* if (success) */ {
status.runlist_merged = 0;
ntfs_error(vol->sb, "Failed to restore attribute "
"record in error code path. Run "
"chkdsk to recover.");
- make_bad_inode(vi);
- make_bad_inode(VFS_I(base_ni));
NVolSetErrors(vol);
} else /* if (success) */ {
if (ntfs_mapping_pairs_build(vol, (u8*)a +
"mapping pairs array in error "
"code path. Run chkdsk to "
"recover.");
- make_bad_inode(vi);
- make_bad_inode(VFS_I(base_ni));
NVolSetErrors(vol);
}
flush_dcache_mft_record_page(ctx->ntfs_ino);
if (PageUptodate(page))
set_buffer_uptodate(bh);
else {
- u8 *kaddr = kmap_atomic(page, KM_USER0);
- memset(kaddr + bh_offset(bh), 0,
+ zero_user(page, bh_offset(bh),
blocksize);
- kunmap_atomic(kaddr, KM_USER0);
- flush_dcache_page(page);
set_buffer_uptodate(bh);
}
}
/*
* Copy as much as we can into the pages and return the number of bytes which
- * were sucessfully copied. If a fault is encountered then clear the pages
+ * were successfully copied. If a fault is encountered then clear the pages
* out to (ofs + bytes) and return the number of bytes which were copied.
*/
static inline size_t ntfs_copy_from_user(struct page **pages,
size_t bytes)
{
struct page **last_page = pages + nr_pages;
- char *kaddr;
+ char *addr;
size_t total = 0;
unsigned len;
int left;
len = PAGE_CACHE_SIZE - ofs;
if (len > bytes)
len = bytes;
- kaddr = kmap_atomic(*pages, KM_USER0);
- left = __copy_from_user_inatomic(kaddr + ofs, buf, len);
- kunmap_atomic(kaddr, KM_USER0);
+ addr = kmap_atomic(*pages, KM_USER0);
+ left = __copy_from_user_inatomic(addr + ofs, buf, len);
+ kunmap_atomic(addr, KM_USER0);
if (unlikely(left)) {
/* Do it the slow way. */
- kaddr = kmap(*pages);
- left = __copy_from_user(kaddr + ofs, buf, len);
+ addr = kmap(*pages);
+ left = __copy_from_user(addr + ofs, buf, len);
kunmap(*pages);
if (unlikely(left))
goto err_out;
len = PAGE_CACHE_SIZE;
if (len > bytes)
len = bytes;
- kaddr = kmap_atomic(*pages, KM_USER0);
- memset(kaddr, 0, len);
- kunmap_atomic(kaddr, KM_USER0);
+ zero_user(*pages, 0, len);
}
goto out;
}
-static size_t __ntfs_copy_from_user_iovec(char *vaddr,
+static size_t __ntfs_copy_from_user_iovec_inatomic(char *vaddr,
const struct iovec *iov, size_t iov_ofs, size_t bytes)
{
size_t total = 0;
bytes -= len;
vaddr += len;
if (unlikely(left)) {
- /*
- * Zero the rest of the target like __copy_from_user().
- */
- memset(vaddr, 0, bytes);
total -= left;
break;
}
* pages (out to offset + bytes), to emulate ntfs_copy_from_user()'s
* single-segment behaviour.
*
- * We call the same helper (__ntfs_copy_from_user_iovec()) both when atomic and
- * when not atomic. This is ok because __ntfs_copy_from_user_iovec() calls
- * __copy_from_user_inatomic() and it is ok to call this when non-atomic. In
- * fact, the only difference between __copy_from_user_inatomic() and
- * __copy_from_user() is that the latter calls might_sleep(). And on many
+ * We call the same helper (__ntfs_copy_from_user_iovec_inatomic()) both
+ * when atomic and when not atomic. This is ok because
+ * __ntfs_copy_from_user_iovec_inatomic() calls __copy_from_user_inatomic()
+ * and it is ok to call this when non-atomic.
+ * Infact, the only difference between __copy_from_user_inatomic() and
+ * __copy_from_user() is that the latter calls might_sleep() and the former
+ * should not zero the tail of the buffer on error. And on many
* architectures __copy_from_user_inatomic() is just defined to
* __copy_from_user() so it makes no difference at all on those architectures.
*/
size_t *iov_ofs, size_t bytes)
{
struct page **last_page = pages + nr_pages;
- char *kaddr;
+ char *addr;
size_t copied, len, total = 0;
do {
len = PAGE_CACHE_SIZE - ofs;
if (len > bytes)
len = bytes;
- kaddr = kmap_atomic(*pages, KM_USER0);
- copied = __ntfs_copy_from_user_iovec(kaddr + ofs,
+ addr = kmap_atomic(*pages, KM_USER0);
+ copied = __ntfs_copy_from_user_iovec_inatomic(addr + ofs,
*iov, *iov_ofs, len);
- kunmap_atomic(kaddr, KM_USER0);
+ kunmap_atomic(addr, KM_USER0);
if (unlikely(copied != len)) {
/* Do it the slow way. */
- kaddr = kmap(*pages);
- copied = __ntfs_copy_from_user_iovec(kaddr + ofs,
+ addr = kmap(*pages);
+ copied = __ntfs_copy_from_user_iovec_inatomic(addr + ofs,
*iov, *iov_ofs, len);
+ /*
+ * Zero the rest of the target like __copy_from_user().
+ */
+ memset(addr + ofs + copied, 0, len - copied);
kunmap(*pages);
if (unlikely(copied != len))
goto err_out;
len = PAGE_CACHE_SIZE;
if (len > bytes)
len = bytes;
- kaddr = kmap_atomic(*pages, KM_USER0);
- memset(kaddr, 0, len);
- kunmap_atomic(kaddr, KM_USER0);
+ zero_user(*pages, 0, len);
}
goto out;
}
unsigned nr_pages)
{
BUG_ON(!nr_pages);
+ /*
+ * Warning: Do not do the decrement at the same time as the call to
+ * flush_dcache_page() because it is a NULL macro on i386 and hence the
+ * decrement never happens so the loop never terminates.
+ */
do {
- /*
- * Warning: Do not do the decrement at the same time as the
- * call because flush_dcache_page() is a NULL macro on i386
- * and hence the decrement never happens.
- */
+ --nr_pages;
flush_dcache_page(pages[nr_pages]);
- } while (--nr_pages > 0);
+ } while (nr_pages > 0);
}
/**
vi = pages[0]->mapping->host;
ni = NTFS_I(vi);
- blocksize = 1 << vi->i_blkbits;
+ blocksize = vi->i_sb->s_blocksize;
end = pos + bytes;
u = 0;
do {
s64 bh_pos;
struct page *page;
- BOOL partial;
+ bool partial;
page = pages[u];
bh_pos = (s64)page->index << PAGE_CACHE_SHIFT;
bh = head = page_buffers(page);
- partial = FALSE;
+ partial = false;
do {
s64 bh_end;
bh_end = bh_pos + blocksize;
if (bh_end <= pos || bh_pos >= end) {
if (!buffer_uptodate(bh))
- partial = TRUE;
+ partial = true;
} else {
set_buffer_uptodate(bh);
mark_buffer_dirty(bh);
unmap_mft_record(base_ni);
ntfs_error(vi->i_sb, "Failed to update initialized_size/i_size (error "
"code %i).", err);
- if (err != -ENOMEM) {
+ if (err != -ENOMEM)
NVolSetErrors(ni->vol);
- make_bad_inode(VFS_I(base_ni));
- make_bad_inode(vi);
- }
return err;
}
read_unlock_irqrestore(&ni->size_lock, flags);
BUG_ON(initialized_size != i_size);
if (end > initialized_size) {
- unsigned long flags;
-
write_lock_irqsave(&ni->size_lock, flags);
ni->initialized_size = end;
i_size_write(vi, end);
ntfs_error(vi->i_sb, "Resident attribute commit write failed "
"with error %i.", err);
NVolSetErrors(ni->vol);
- make_bad_inode(VFS_I(base_ni));
- make_bad_inode(vi);
}
if (ctx)
ntfs_attr_put_search_ctx(ctx);
ssize_t status, written;
unsigned nr_pages;
int err;
- struct pagevec lru_pvec;
ntfs_debug("Entering for i_ino 0x%lx, attribute type 0x%x, "
"pos 0x%llx, count 0x%lx.",
}
}
}
- pagevec_init(&lru_pvec, 0);
written = 0;
/*
* If the write starts beyond the initialized size, extend it up to the
ll = ni->initialized_size;
read_unlock_irqrestore(&ni->size_lock, flags);
if (pos > ll) {
- err = ntfs_attr_extend_initialized(ni, pos, &cached_page,
- &lru_pvec);
+ err = ntfs_attr_extend_initialized(ni, pos);
if (err < 0) {
ntfs_error(vol->sb, "Cannot perform write to inode "
"0x%lx, attribute type 0x%x, because "
*/
down_read(&ni->runlist.lock);
lcn = ntfs_attr_vcn_to_lcn_nolock(ni, pos >>
- vol->cluster_size_bits, FALSE);
+ vol->cluster_size_bits, false);
up_read(&ni->runlist.lock);
if (unlikely(lcn < LCN_HOLE)) {
status = -EIO;
ntfs_fault_in_pages_readable_iovec(iov, iov_ofs, bytes);
/* Get and lock @do_pages starting at index @start_idx. */
status = __ntfs_grab_cache_pages(mapping, start_idx, do_pages,
- pages, &cached_page, &lru_pvec);
+ pages, &cached_page);
if (unlikely(status))
break;
/*
*ppos = pos;
if (cached_page)
page_cache_release(cached_page);
- /* For now, when the user asks for O_SYNC, we actually give O_DSYNC. */
- if (likely(!status)) {
- if (unlikely((file->f_flags & O_SYNC) || IS_SYNC(vi))) {
- if (!mapping->a_ops->writepage || !is_sync_kiocb(iocb))
- status = generic_osync_inode(vi, mapping,
- OSYNC_METADATA|OSYNC_DATA);
- }
- }
- pagevec_lru_add(&lru_pvec);
ntfs_debug("Done. Returning %s (written 0x%lx, status %li).",
written ? "written" : "status", (unsigned long)written,
(long)status);
struct address_space *mapping = file->f_mapping;
struct inode *inode = mapping->host;
loff_t pos;
- unsigned long seg;
size_t count; /* after file limit checks */
ssize_t written, err;
count = 0;
- for (seg = 0; seg < nr_segs; seg++) {
- const struct iovec *iv = &iov[seg];
- /*
- * If any segment has a negative length, or the cumulative
- * length ever wraps negative then return -EINVAL.
- */
- count += iv->iov_len;
- if (unlikely((ssize_t)(count|iv->iov_len) < 0))
- return -EINVAL;
- if (access_ok(VERIFY_READ, iv->iov_base, iv->iov_len))
- continue;
- if (!seg)
- return -EFAULT;
- nr_segs = seg;
- count -= iv->iov_len; /* This segment is no good */
- break;
- }
+ err = generic_segment_checks(iov, &nr_segs, &count, VERIFY_READ);
+ if (err)
+ return err;
pos = *ppos;
vfs_check_frozen(inode->i_sb, SB_FREEZE_WRITE);
/* We can write back this queue in page reclaim. */
goto out;
if (!count)
goto out;
- err = remove_suid(file->f_dentry);
+ err = file_remove_suid(file);
if (err)
goto out;
file_update_time(file);
/**
* ntfs_file_aio_write -
*/
-static ssize_t ntfs_file_aio_write(struct kiocb *iocb, const char __user *buf,
- size_t count, loff_t pos)
+static ssize_t ntfs_file_aio_write(struct kiocb *iocb, const struct iovec *iov,
+ unsigned long nr_segs, loff_t pos)
{
struct file *file = iocb->ki_filp;
struct address_space *mapping = file->f_mapping;
struct inode *inode = mapping->host;
ssize_t ret;
- struct iovec local_iov = { .iov_base = (void __user *)buf,
- .iov_len = count };
BUG_ON(iocb->ki_pos != pos);
mutex_lock(&inode->i_mutex);
- ret = ntfs_file_aio_write_nolock(iocb, &local_iov, 1, &iocb->ki_pos);
- mutex_unlock(&inode->i_mutex);
- if (ret > 0 && ((file->f_flags & O_SYNC) || IS_SYNC(inode))) {
- int err = sync_page_range(inode, mapping, pos, ret);
- if (err < 0)
- ret = err;
- }
- return ret;
-}
-
-/**
- * ntfs_file_writev -
- *
- * Basically the same as generic_file_writev() except that it ends up calling
- * ntfs_file_aio_write_nolock() instead of __generic_file_aio_write_nolock().
- */
-static ssize_t ntfs_file_writev(struct file *file, const struct iovec *iov,
- unsigned long nr_segs, loff_t *ppos)
-{
- struct address_space *mapping = file->f_mapping;
- struct inode *inode = mapping->host;
- struct kiocb kiocb;
- ssize_t ret;
-
- mutex_lock(&inode->i_mutex);
- init_sync_kiocb(&kiocb, file);
- ret = ntfs_file_aio_write_nolock(&kiocb, iov, nr_segs, ppos);
- if (ret == -EIOCBQUEUED)
- ret = wait_on_sync_kiocb(&kiocb);
+ ret = ntfs_file_aio_write_nolock(iocb, iov, nr_segs, &iocb->ki_pos);
mutex_unlock(&inode->i_mutex);
- if (ret > 0 && ((file->f_flags & O_SYNC) || IS_SYNC(inode))) {
- int err = sync_page_range(inode, mapping, *ppos - ret, ret);
+ if (ret > 0) {
+ int err = generic_write_sync(file, pos, ret);
if (err < 0)
ret = err;
}
}
/**
- * ntfs_file_write - simple wrapper for ntfs_file_writev()
- */
-static ssize_t ntfs_file_write(struct file *file, const char __user *buf,
- size_t count, loff_t *ppos)
-{
- struct iovec local_iov = { .iov_base = (void __user *)buf,
- .iov_len = count };
-
- return ntfs_file_writev(file, &local_iov, 1, ppos);
-}
-
-/**
* ntfs_file_fsync - sync a file to disk
* @filp: file to be synced
- * @dentry: dentry describing the file to sync
* @datasync: if non-zero only flush user data and not metadata
*
* Data integrity sync of a file to disk. Used for fsync, fdatasync, and msync
* Also, if @datasync is true, we do not wait on the inode to be written out
* but we always wait on the page cache pages to be written out.
*
- * Note: In the past @filp could be NULL so we ignore it as we don't need it
- * anyway.
- *
* Locking: Caller must hold i_mutex on the inode.
*
* TODO: We should probably also write all attribute/index inodes associated
* with this inode but since we have no simple way of getting to them we ignore
* this problem for now.
*/
-static int ntfs_file_fsync(struct file *filp, struct dentry *dentry,
- int datasync)
+static int ntfs_file_fsync(struct file *filp, int datasync)
{
- struct inode *vi = dentry->d_inode;
+ struct inode *vi = filp->f_mapping->host;
int err, ret = 0;
ntfs_debug("Entering for inode 0x%lx.", vi->i_ino);
BUG_ON(S_ISDIR(vi->i_mode));
if (!datasync || !NInoNonResident(NTFS_I(vi)))
- ret = ntfs_write_inode(vi, 1);
+ ret = __ntfs_write_inode(vi, 1);
write_inode_now(vi, !datasync);
/*
* NOTE: If we were to use mapping->private_list (see ext2 and
#endif /* NTFS_RW */
-struct file_operations ntfs_file_ops = {
+const struct file_operations ntfs_file_ops = {
.llseek = generic_file_llseek, /* Seek inside file. */
- .read = generic_file_read, /* Read from file. */
+ .read = do_sync_read, /* Read from file. */
.aio_read = generic_file_aio_read, /* Async read from file. */
- .readv = generic_file_readv, /* Read from file. */
#ifdef NTFS_RW
- .write = ntfs_file_write, /* Write to file. */
+ .write = do_sync_write, /* Write to file. */
.aio_write = ntfs_file_aio_write, /* Async write to file. */
- .writev = ntfs_file_writev, /* Write to file. */
/*.release = ,*/ /* Last file is closed. See
fs/ext2/file.c::
ext2_release_file() for
mounted filesystem. */
.mmap = generic_file_mmap, /* Mmap file. */
.open = ntfs_file_open, /* Open file. */
- .sendfile = generic_file_sendfile, /* Zero-copy data send with
+ .splice_read = generic_file_splice_read /* Zero-copy data send with
the data source being on
the ntfs partition. We do
not need to care about the
the data source. */
};
-struct inode_operations ntfs_file_inode_ops = {
+const struct inode_operations ntfs_file_inode_ops = {
#ifdef NTFS_RW
.truncate = ntfs_truncate_vfs,
.setattr = ntfs_setattr,
#endif /* NTFS_RW */
};
-struct file_operations ntfs_empty_file_ops = {};
+const struct file_operations ntfs_empty_file_ops = {};
-struct inode_operations ntfs_empty_inode_ops = {};
+const struct inode_operations ntfs_empty_inode_ops = {};