X-Git-Url: http://ftp.safe.ca/?a=blobdiff_plain;f=fs%2Fsync.c;h=c9f83f480ec55ec751d41021c73c9b3aa8c0b99a;hb=75de46b98dda624397ccb17c106e51f478a79c15;hp=aab5ffe77e9fd67e3ac9173a4cb9488fd393f074;hpb=5246d0503130fa58904c8beb987fcf93b96d8ab6;p=safe%2Fjmp%2Flinux-2.6 diff --git a/fs/sync.c b/fs/sync.c index aab5ffe..c9f83f4 100644 --- a/fs/sync.c +++ b/fs/sync.c @@ -5,16 +5,247 @@ #include #include #include +#include #include +#include #include #include #include #include +#include +#include +#include +#include "internal.h" #define VALID_FLAGS (SYNC_FILE_RANGE_WAIT_BEFORE|SYNC_FILE_RANGE_WRITE| \ SYNC_FILE_RANGE_WAIT_AFTER) /* + * Do the filesystem syncing work. For simple filesystems + * writeback_inodes_sb(sb) just dirties buffers with inodes so we have to + * submit IO for these buffers via __sync_blockdev(). This also speeds up the + * wait == 1 case since in that case write_inode() functions do + * sync_dirty_buffer() and thus effectively write one block at a time. + */ +static int __sync_filesystem(struct super_block *sb, int wait) +{ + /* + * This should be safe, as we require bdi backing to actually + * write out data in the first place + */ + if (!sb->s_bdi || sb->s_bdi == &noop_backing_dev_info) + return 0; + + if (sb->s_qcop && sb->s_qcop->quota_sync) + sb->s_qcop->quota_sync(sb, -1, wait); + + if (wait) + sync_inodes_sb(sb); + else + writeback_inodes_sb_locked(sb); + + if (sb->s_op->sync_fs) + sb->s_op->sync_fs(sb, wait); + return __sync_blockdev(sb->s_bdev, wait); +} + +/* + * Write out and wait upon all dirty data associated with this + * superblock. Filesystem data as well as the underlying block + * device. Takes the superblock lock. + */ +int sync_filesystem(struct super_block *sb) +{ + int ret; + + /* + * We need to be protected against the filesystem going from + * r/o to r/w or vice versa. + */ + WARN_ON(!rwsem_is_locked(&sb->s_umount)); + + /* + * No point in syncing out anything if the filesystem is read-only. + */ + if (sb->s_flags & MS_RDONLY) + return 0; + + ret = __sync_filesystem(sb, 0); + if (ret < 0) + return ret; + return __sync_filesystem(sb, 1); +} +EXPORT_SYMBOL_GPL(sync_filesystem); + +static void sync_one_sb(struct super_block *sb, void *arg) +{ + if (!(sb->s_flags & MS_RDONLY) && sb->s_bdi) + __sync_filesystem(sb, *(int *)arg); +} +/* + * Sync all the data for all the filesystems (called by sys_sync() and + * emergency sync) + */ +static void sync_filesystems(int wait) +{ + iterate_supers(sync_one_sb, &wait); +} + +/* + * sync everything. Start out by waking pdflush, because that writes back + * all queues in parallel. + */ +SYSCALL_DEFINE0(sync) +{ + wakeup_flusher_threads(0); + sync_filesystems(0); + sync_filesystems(1); + if (unlikely(laptop_mode)) + laptop_sync_completion(); + return 0; +} + +static void do_sync_work(struct work_struct *work) +{ + /* + * Sync twice to reduce the possibility we skipped some inodes / pages + * because they were temporarily locked + */ + sync_filesystems(0); + sync_filesystems(0); + printk("Emergency Sync complete\n"); + kfree(work); +} + +void emergency_sync(void) +{ + struct work_struct *work; + + work = kmalloc(sizeof(*work), GFP_ATOMIC); + if (work) { + INIT_WORK(work, do_sync_work); + schedule_work(work); + } +} + +/* + * Generic function to fsync a file. + */ +int file_fsync(struct file *filp, int datasync) +{ + struct inode *inode = filp->f_mapping->host; + struct super_block * sb; + int ret, err; + + /* sync the inode to buffers */ + ret = write_inode_now(inode, 0); + + /* sync the superblock to buffers */ + sb = inode->i_sb; + if (sb->s_dirt && sb->s_op->write_super) + sb->s_op->write_super(sb); + + /* .. finally sync the buffers to disk */ + err = sync_blockdev(sb->s_bdev); + if (!ret) + ret = err; + return ret; +} +EXPORT_SYMBOL(file_fsync); + +/** + * vfs_fsync_range - helper to sync a range of data & metadata to disk + * @file: file to sync + * @start: offset in bytes of the beginning of data range to sync + * @end: offset in bytes of the end of data range (inclusive) + * @datasync: perform only datasync + * + * Write back data in range @start..@end and metadata for @file to disk. If + * @datasync is set only metadata needed to access modified file data is + * written. + */ +int vfs_fsync_range(struct file *file, loff_t start, loff_t end, int datasync) +{ + struct address_space *mapping = file->f_mapping; + int err, ret; + + if (!file->f_op || !file->f_op->fsync) { + ret = -EINVAL; + goto out; + } + + ret = filemap_write_and_wait_range(mapping, start, end); + + /* + * We need to protect against concurrent writers, which could cause + * livelocks in fsync_buffers_list(). + */ + mutex_lock(&mapping->host->i_mutex); + err = file->f_op->fsync(file, datasync); + if (!ret) + ret = err; + mutex_unlock(&mapping->host->i_mutex); + +out: + return ret; +} +EXPORT_SYMBOL(vfs_fsync_range); + +/** + * vfs_fsync - perform a fsync or fdatasync on a file + * @file: file to sync + * @datasync: only perform a fdatasync operation + * + * Write back data and metadata for @file to disk. If @datasync is + * set only metadata needed to access modified file data is written. + */ +int vfs_fsync(struct file *file, int datasync) +{ + return vfs_fsync_range(file, 0, LLONG_MAX, datasync); +} +EXPORT_SYMBOL(vfs_fsync); + +static int do_fsync(unsigned int fd, int datasync) +{ + struct file *file; + int ret = -EBADF; + + file = fget(fd); + if (file) { + ret = vfs_fsync(file, datasync); + fput(file); + } + return ret; +} + +SYSCALL_DEFINE1(fsync, unsigned int, fd) +{ + return do_fsync(fd, 0); +} + +SYSCALL_DEFINE1(fdatasync, unsigned int, fd) +{ + return do_fsync(fd, 1); +} + +/** + * generic_write_sync - perform syncing after a write if file / inode is sync + * @file: file to which the write happened + * @pos: offset where the write started + * @count: length of the write + * + * This is just a simple wrapper about our general syncing function. + */ +int generic_write_sync(struct file *file, loff_t pos, loff_t count) +{ + if (!(file->f_flags & O_DSYNC) && !IS_SYNC(file->f_mapping->host)) + return 0; + return vfs_fsync_range(file, pos, pos + count - 1, + (file->f_flags & __O_SYNC) ? 0 : 1); +} +EXPORT_SYMBOL(generic_write_sync); + +/* * sys_sync_file_range() permits finely controlled syncing over a segment of * a file in the range offset .. (offset+nbytes-1) inclusive. If nbytes is * zero then sys_sync_file_range() will operate from offset out to EOF. @@ -25,7 +256,8 @@ * before performing the write. * * SYNC_FILE_RANGE_WRITE: initiate writeout of all those dirty pages in the - * range which are not presently under writeback. + * range which are not presently under writeback. Note that this may block for + * significant periods due to exhaustion of disk request structures. * * SYNC_FILE_RANGE_WAIT_AFTER: wait upon writeout of all pages in the range * after performing the write. @@ -60,11 +292,12 @@ * already-instantiated disk blocks, there are no guarantees here that the data * will be available after a crash. */ -asmlinkage long sys_sync_file_range(int fd, loff_t offset, loff_t nbytes, - unsigned int flags) +SYSCALL_DEFINE(sync_file_range)(int fd, loff_t offset, loff_t nbytes, + unsigned int flags) { int ret; struct file *file; + struct address_space *mapping; loff_t endbyte; /* inclusive */ int fput_needed; umode_t i_mode; @@ -100,7 +333,7 @@ asmlinkage long sys_sync_file_range(int fd, loff_t offset, loff_t nbytes, } if (nbytes == 0) - endbyte = -1; + endbyte = LLONG_MAX; else endbyte--; /* inclusive */ @@ -109,56 +342,62 @@ asmlinkage long sys_sync_file_range(int fd, loff_t offset, loff_t nbytes, if (!file) goto out; - i_mode = file->f_dentry->d_inode->i_mode; + i_mode = file->f_path.dentry->d_inode->i_mode; ret = -ESPIPE; if (!S_ISREG(i_mode) && !S_ISBLK(i_mode) && !S_ISDIR(i_mode) && !S_ISLNK(i_mode)) goto out_put; - ret = do_sync_file_range(file, offset, endbyte, flags); -out_put: - fput_light(file, fput_needed); -out: - return ret; -} - -/* - * `endbyte' is inclusive - */ -int do_sync_file_range(struct file *file, loff_t offset, loff_t endbyte, - unsigned int flags) -{ - int ret; - struct address_space *mapping; - mapping = file->f_mapping; if (!mapping) { ret = -EINVAL; - goto out; + goto out_put; } ret = 0; if (flags & SYNC_FILE_RANGE_WAIT_BEFORE) { - ret = wait_on_page_writeback_range(mapping, - offset >> PAGE_CACHE_SHIFT, - endbyte >> PAGE_CACHE_SHIFT); + ret = filemap_fdatawait_range(mapping, offset, endbyte); if (ret < 0) - goto out; + goto out_put; } if (flags & SYNC_FILE_RANGE_WRITE) { - ret = __filemap_fdatawrite_range(mapping, offset, endbyte, - WB_SYNC_NONE); + ret = filemap_fdatawrite_range(mapping, offset, endbyte); if (ret < 0) - goto out; + goto out_put; } - if (flags & SYNC_FILE_RANGE_WAIT_AFTER) { - ret = wait_on_page_writeback_range(mapping, - offset >> PAGE_CACHE_SHIFT, - endbyte >> PAGE_CACHE_SHIFT); - } + if (flags & SYNC_FILE_RANGE_WAIT_AFTER) + ret = filemap_fdatawait_range(mapping, offset, endbyte); + +out_put: + fput_light(file, fput_needed); out: return ret; } -EXPORT_SYMBOL_GPL(do_sync_file_range); +#ifdef CONFIG_HAVE_SYSCALL_WRAPPERS +asmlinkage long SyS_sync_file_range(long fd, loff_t offset, loff_t nbytes, + long flags) +{ + return SYSC_sync_file_range((int) fd, offset, nbytes, + (unsigned int) flags); +} +SYSCALL_ALIAS(sys_sync_file_range, SyS_sync_file_range); +#endif + +/* It would be nice if people remember that not all the world's an i386 + when they introduce new system calls */ +SYSCALL_DEFINE(sync_file_range2)(int fd, unsigned int flags, + loff_t offset, loff_t nbytes) +{ + return sys_sync_file_range(fd, offset, nbytes, flags); +} +#ifdef CONFIG_HAVE_SYSCALL_WRAPPERS +asmlinkage long SyS_sync_file_range2(long fd, long flags, + loff_t offset, loff_t nbytes) +{ + return SYSC_sync_file_range2((int) fd, (unsigned int) flags, + offset, nbytes); +} +SYSCALL_ALIAS(sys_sync_file_range2, SyS_sync_file_range2); +#endif