nfsd: make nfs4_client->cl_addr a struct sockaddr_storage
[safe/jmp/linux-2.6] / fs / sync.c
index d5fa7b7..3422ba6 100644 (file)
--- a/fs/sync.c
+++ b/fs/sync.c
  * case write_inode() functions do sync_dirty_buffer() and thus effectively
  * write one block at a time.
  */
-static int __fsync_super(struct super_block *sb, int wait)
+static int __sync_filesystem(struct super_block *sb, int wait)
 {
-       vfs_dq_sync(sb);
+       /* Avoid doing twice syncing and cache pruning for quota sync */
+       if (!wait)
+               writeout_quota_sb(sb, -1);
+       else
+               sync_quota_sb(sb, -1);
        sync_inodes_sb(sb, wait);
-       lock_super(sb);
-       if (sb->s_dirt && sb->s_op->write_super)
-               sb->s_op->write_super(sb);
-       unlock_super(sb);
        if (sb->s_op->sync_fs)
                sb->s_op->sync_fs(sb, wait);
        return __sync_blockdev(sb->s_bdev, wait);
@@ -43,16 +43,28 @@ static int __fsync_super(struct super_block *sb, int wait)
  * superblock.  Filesystem data as well as the underlying block
  * device.  Takes the superblock lock.
  */
-int fsync_super(struct super_block *sb)
+int sync_filesystem(struct super_block *sb)
 {
        int ret;
 
-       ret = __fsync_super(sb, 0);
+       /*
+        * We need to be protected against the filesystem going from
+        * r/o to r/w or vice versa.
+        */
+       WARN_ON(!rwsem_is_locked(&sb->s_umount));
+
+       /*
+        * No point in syncing out anything if the filesystem is read-only.
+        */
+       if (sb->s_flags & MS_RDONLY)
+               return 0;
+
+       ret = __sync_filesystem(sb, 0);
        if (ret < 0)
                return ret;
-       return __fsync_super(sb, 1);
+       return __sync_filesystem(sb, 1);
 }
-EXPORT_SYMBOL_GPL(fsync_super);
+EXPORT_SYMBOL_GPL(sync_filesystem);
 
 /*
  * Sync all the data for all the filesystems (called by sys_sync() and
@@ -75,25 +87,22 @@ static void sync_filesystems(int wait)
 
        mutex_lock(&mutex);             /* Could be down_interruptible */
        spin_lock(&sb_lock);
-       list_for_each_entry(sb, &super_blocks, s_list) {
-               if (sb->s_flags & MS_RDONLY)
-                       continue;
+       list_for_each_entry(sb, &super_blocks, s_list)
                sb->s_need_sync = 1;
-       }
 
 restart:
        list_for_each_entry(sb, &super_blocks, s_list) {
                if (!sb->s_need_sync)
                        continue;
                sb->s_need_sync = 0;
-               if (sb->s_flags & MS_RDONLY)
-                       continue;       /* hm.  Was remounted r/o meanwhile */
                sb->s_count++;
                spin_unlock(&sb_lock);
+
                down_read(&sb->s_umount);
-               if (sb->s_root)
-                       __fsync_super(sb, wait);
+               if (!(sb->s_flags & MS_RDONLY) && sb->s_root)
+                       __sync_filesystem(sb, wait);
                up_read(&sb->s_umount);
+
                /* restart only when sb is no longer on the list */
                spin_lock(&sb_lock);
                if (__put_super_and_need_restart(sb))
@@ -103,8 +112,13 @@ restart:
        mutex_unlock(&mutex);
 }
 
+/*
+ * sync everything.  Start out by waking pdflush, because that writes back
+ * all queues in parallel.
+ */
 SYSCALL_DEFINE0(sync)
 {
+       wakeup_pdflush(0);
        sync_filesystems(0);
        sync_filesystems(1);
        if (unlikely(laptop_mode))
@@ -151,10 +165,8 @@ int file_fsync(struct file *filp, struct dentry *dentry, int datasync)
 
        /* sync the superblock to buffers */
        sb = inode->i_sb;
-       lock_super(sb);
        if (sb->s_dirt && sb->s_op->write_super)
                sb->s_op->write_super(sb);
-       unlock_super(sb);
 
        /* .. finally sync the buffers to disk */
        err = sync_blockdev(sb->s_bdev);