Merge branch 'master' into for-linus
authorJens Axboe <jaxboe@fusionio.com>
Tue, 1 Jun 2010 10:42:12 +0000 (12:42 +0200)
committerJens Axboe <jaxboe@fusionio.com>
Tue, 1 Jun 2010 10:42:12 +0000 (12:42 +0200)
Conflicts:
fs/pipe.c

Signed-off-by: Jens Axboe <jaxboe@fusionio.com>
1  2 
fs/fs-writeback.c
fs/pipe.c
fs/sync.c

diff --combined fs/fs-writeback.c
@@@ -45,6 -45,7 +45,6 @@@ struct wb_writeback_args 
        unsigned int for_kupdate:1;
        unsigned int range_cyclic:1;
        unsigned int for_background:1;
 -      unsigned int sb_pinned:1;
  };
  
  /*
@@@ -192,7 -193,8 +192,7 @@@ static void bdi_wait_on_work_clear(stru
  }
  
  static void bdi_alloc_queue_work(struct backing_dev_info *bdi,
 -                               struct wb_writeback_args *args,
 -                               int wait)
 +                               struct wb_writeback_args *args)
  {
        struct bdi_work *work;
  
        if (work) {
                bdi_work_init(work, args);
                bdi_queue_work(bdi, work);
 -              if (wait)
 -                      bdi_wait_on_work_clear(work);
        } else {
                struct bdi_writeback *wb = &bdi->wb;
  
@@@ -230,6 -234,11 +230,6 @@@ static void bdi_sync_writeback(struct b
                .sync_mode      = WB_SYNC_ALL,
                .nr_pages       = LONG_MAX,
                .range_cyclic   = 0,
 -              /*
 -               * Setting sb_pinned is not necessary for WB_SYNC_ALL, but
 -               * lets make it explicitly clear.
 -               */
 -              .sb_pinned      = 1,
        };
        struct bdi_work work;
  
   * @bdi: the backing device to write from
   * @sb: write inodes from this super_block
   * @nr_pages: the number of pages to write
 - * @sb_locked: caller already holds sb umount sem.
   *
   * Description:
   *   This does WB_SYNC_NONE opportunistic writeback. The IO is only
   *   started when this function returns, we make no guarentees on
 - *   completion. Caller specifies whether sb umount sem is held already or not.
 + *   completion. Caller need not hold sb s_umount semaphore.
   *
   */
  void bdi_start_writeback(struct backing_dev_info *bdi, struct super_block *sb,
 -                       long nr_pages, int sb_locked)
 +                       long nr_pages)
  {
        struct wb_writeback_args args = {
                .sb             = sb,
                .sync_mode      = WB_SYNC_NONE,
                .nr_pages       = nr_pages,
                .range_cyclic   = 1,
 -              .sb_pinned      = sb_locked,
        };
  
        /*
                args.for_background = 1;
        }
  
 -      bdi_alloc_queue_work(bdi, &args, sb_locked);
 +      bdi_alloc_queue_work(bdi, &args);
  }
  
  /*
@@@ -398,11 -409,11 +398,11 @@@ static void inode_wait_for_writeback(st
        wait_queue_head_t *wqh;
  
        wqh = bit_waitqueue(&inode->i_state, __I_SYNC);
-       do {
+        while (inode->i_state & I_SYNC) {
                spin_unlock(&inode_lock);
                __wait_on_bit(wqh, &wq, inode_wait, TASK_UNINTERRUPTIBLE);
                spin_lock(&inode_lock);
-       } while (inode->i_state & I_SYNC);
+       }
  }
  
  /*
@@@ -584,7 -595,7 +584,7 @@@ static enum sb_pin_state pin_sb_for_wri
        /*
         * Caller must already hold the ref for this
         */
 -      if (wbc->sync_mode == WB_SYNC_ALL || wbc->sb_pinned) {
 +      if (wbc->sync_mode == WB_SYNC_ALL) {
                WARN_ON(!rwsem_is_locked(&sb->s_umount));
                return SB_NOT_PINNED;
        }
@@@ -758,6 -769,7 +758,6 @@@ static long wb_writeback(struct bdi_wri
                .for_kupdate            = args->for_kupdate,
                .for_background         = args->for_background,
                .range_cyclic           = args->range_cyclic,
 -              .sb_pinned              = args->sb_pinned,
        };
        unsigned long oldest_jif;
        long wrote = 0;
@@@ -900,6 -912,7 +900,6 @@@ long wb_do_writeback(struct bdi_writeba
  
        while ((work = get_next_work_item(bdi, wb)) != NULL) {
                struct wb_writeback_args args = work->args;
 -              int post_clear;
  
                /*
                 * Override sync mode, in case we must wait for completion
                if (force_wait)
                        work->args.sync_mode = args.sync_mode = WB_SYNC_ALL;
  
 -              post_clear = WB_SYNC_ALL || args.sb_pinned;
 -
                /*
                 * If this isn't a data integrity operation, just notify
                 * that we have seen this work and we are now starting it.
                 */
 -              if (!post_clear)
 +              if (args.sync_mode == WB_SYNC_NONE)
                        wb_clear_pending(wb, work);
  
                wrote += wb_writeback(wb, &args);
                 * This is a data integrity writeback, so only do the
                 * notification when we have completed the work.
                 */
 -              if (post_clear)
 +              if (args.sync_mode == WB_SYNC_ALL)
                        wb_clear_pending(wb, work);
        }
  
@@@ -996,7 -1011,7 +996,7 @@@ static void bdi_writeback_all(struct su
                if (!bdi_has_dirty_io(bdi))
                        continue;
  
 -              bdi_alloc_queue_work(bdi, &args, 0);
 +              bdi_alloc_queue_work(bdi, &args);
        }
  
        rcu_read_unlock();
@@@ -1205,6 -1220,18 +1205,6 @@@ static void wait_sb_inodes(struct super
        iput(old_inode);
  }
  
 -static void __writeback_inodes_sb(struct super_block *sb, int sb_locked)
 -{
 -      unsigned long nr_dirty = global_page_state(NR_FILE_DIRTY);
 -      unsigned long nr_unstable = global_page_state(NR_UNSTABLE_NFS);
 -      long nr_to_write;
 -
 -      nr_to_write = nr_dirty + nr_unstable +
 -                      (inodes_stat.nr_inodes - inodes_stat.nr_unused);
 -
 -      bdi_start_writeback(sb->s_bdi, sb, nr_to_write, sb_locked);
 -}
 -
  /**
   * writeback_inodes_sb        -       writeback dirty inodes from given super_block
   * @sb: the superblock
   */
  void writeback_inodes_sb(struct super_block *sb)
  {
 -      __writeback_inodes_sb(sb, 0);
 -}
 -EXPORT_SYMBOL(writeback_inodes_sb);
 +      unsigned long nr_dirty = global_page_state(NR_FILE_DIRTY);
 +      unsigned long nr_unstable = global_page_state(NR_UNSTABLE_NFS);
 +      long nr_to_write;
  
 -/**
 - * writeback_inodes_sb_locked - writeback dirty inodes from given super_block
 - * @sb: the superblock
 - *
 - * Like writeback_inodes_sb(), except the caller already holds the
 - * sb umount sem.
 - */
 -void writeback_inodes_sb_locked(struct super_block *sb)
 -{
 -      __writeback_inodes_sb(sb, 1);
 +      nr_to_write = nr_dirty + nr_unstable +
 +                      (inodes_stat.nr_inodes - inodes_stat.nr_unused);
 +
 +      bdi_start_writeback(sb->s_bdi, sb, nr_to_write);
  }
 +EXPORT_SYMBOL(writeback_inodes_sb);
  
  /**
   * writeback_inodes_sb_if_idle        -       start writeback if none underway
diff --combined fs/pipe.c
+++ b/fs/pipe.c
@@@ -230,6 -230,7 +230,7 @@@ void *generic_pipe_buf_map(struct pipe_
  
        return kmap(buf->page);
  }
+ EXPORT_SYMBOL(generic_pipe_buf_map);
  
  /**
   * generic_pipe_buf_unmap - unmap a previously mapped pipe buffer
@@@ -249,6 -250,7 +250,7 @@@ void generic_pipe_buf_unmap(struct pipe
        } else
                kunmap(buf->page);
  }
+ EXPORT_SYMBOL(generic_pipe_buf_unmap);
  
  /**
   * generic_pipe_buf_steal - attempt to take ownership of a &pipe_buffer
@@@ -279,6 -281,7 +281,7 @@@ int generic_pipe_buf_steal(struct pipe_
  
        return 1;
  }
+ EXPORT_SYMBOL(generic_pipe_buf_steal);
  
  /**
   * generic_pipe_buf_get - get a reference to a &struct pipe_buffer
@@@ -294,6 -297,7 +297,7 @@@ void generic_pipe_buf_get(struct pipe_i
  {
        page_cache_get(buf->page);
  }
+ EXPORT_SYMBOL(generic_pipe_buf_get);
  
  /**
   * generic_pipe_buf_confirm - verify contents of the pipe buffer
@@@ -309,6 -313,7 +313,7 @@@ int generic_pipe_buf_confirm(struct pip
  {
        return 0;
  }
+ EXPORT_SYMBOL(generic_pipe_buf_confirm);
  
  /**
   * generic_pipe_buf_release - put a reference to a &struct pipe_buffer
@@@ -323,6 -328,7 +328,7 @@@ void generic_pipe_buf_release(struct pi
  {
        page_cache_release(buf->page);
  }
+ EXPORT_SYMBOL(generic_pipe_buf_release);
  
  static const struct pipe_buf_operations anon_pipe_buf_ops = {
        .can_merge = 1,
@@@ -1112,20 -1118,26 +1118,20 @@@ SYSCALL_DEFINE1(pipe, int __user *, fil
   * Allocate a new array of pipe buffers and copy the info over. Returns the
   * pipe size if successful, or return -ERROR on error.
   */
 -static long pipe_set_size(struct pipe_inode_info *pipe, unsigned long arg)
 +static long pipe_set_size(struct pipe_inode_info *pipe, unsigned long nr_pages)
  {
        struct pipe_buffer *bufs;
  
        /*
 -       * Must be a power-of-2 currently
 -       */
 -      if (!is_power_of_2(arg))
 -              return -EINVAL;
 -
 -      /*
         * We can shrink the pipe, if arg >= pipe->nrbufs. Since we don't
         * expect a lot of shrink+grow operations, just free and allocate
         * again like we would do for growing. If the pipe currently
         * contains more buffers than arg, then return busy.
         */
 -      if (arg < pipe->nrbufs)
 +      if (nr_pages < pipe->nrbufs)
                return -EBUSY;
  
 -      bufs = kcalloc(arg, sizeof(struct pipe_buffer), GFP_KERNEL);
 +      bufs = kcalloc(nr_pages, sizeof(struct pipe_buffer), GFP_KERNEL);
        if (unlikely(!bufs))
                return -ENOMEM;
  
        pipe->curbuf = 0;
        kfree(pipe->bufs);
        pipe->bufs = bufs;
 -      pipe->buffers = arg;
 -      return arg;
 +      pipe->buffers = nr_pages;
 +      return nr_pages * PAGE_SIZE;
  }
  
  long pipe_fcntl(struct file *file, unsigned int cmd, unsigned long arg)
        mutex_lock(&pipe->inode->i_mutex);
  
        switch (cmd) {
 -      case F_SETPIPE_SZ:
 -              if (!capable(CAP_SYS_ADMIN) && arg > pipe_max_pages) {
 -                      ret = -EINVAL;
 +      case F_SETPIPE_SZ: {
 +              unsigned long nr_pages;
 +
 +              /*
 +               * Currently the array must be a power-of-2 size, so adjust
 +               * upwards if needed.
 +               */
 +              nr_pages = (arg + PAGE_SIZE - 1) >> PAGE_SHIFT;
 +              nr_pages = roundup_pow_of_two(nr_pages);
 +
-               if (!capable(CAP_SYS_ADMIN) && nr_pages > pipe_max_pages)
-                       return -EPERM;
++              if (!capable(CAP_SYS_ADMIN) && nr_pages > pipe_max_pages) {
++                      ret = -EPERM;
+                       goto out;
+               }
 +
                /*
                 * The pipe needs to be at least 2 pages large to
                 * guarantee POSIX behaviour.
                 */
-               if (nr_pages < 2)
-                       return -EINVAL;
-               ret = pipe_set_size(pipe, nr_pages);
+               if (arg < 2) {
+                       ret = -EINVAL;
+                       goto out;
+               }
+               ret = pipe_set_size(pipe, arg);
                break;
 +              }
        case F_GETPIPE_SZ:
 -              ret = pipe->buffers;
 +              ret = pipe->buffers * PAGE_SIZE;
                break;
        default:
                ret = -EINVAL;
                break;
        }
  
+ out:
        mutex_unlock(&pipe->inode->i_mutex);
        return ret;
  }
diff --combined fs/sync.c
+++ b/fs/sync.c
@@@ -42,7 -42,7 +42,7 @@@ static int __sync_filesystem(struct sup
        if (wait)
                sync_inodes_sb(sb);
        else
 -              writeback_inodes_sb_locked(sb);
 +              writeback_inodes_sb(sb);
  
        if (sb->s_op->sync_fs)
                sb->s_op->sync_fs(sb, wait);
@@@ -130,12 -130,10 +130,10 @@@ void emergency_sync(void
  
  /*
   * Generic function to fsync a file.
-  *
-  * filp may be NULL if called via the msync of a vma.
   */
- int file_fsync(struct file *filp, struct dentry *dentry, int datasync)
+ int file_fsync(struct file *filp, int datasync)
  {
-       struct inode * inode = dentry->d_inode;
+       struct inode *inode = filp->f_mapping->host;
        struct super_block * sb;
        int ret, err;
  
@@@ -183,7 -181,7 +181,7 @@@ int vfs_fsync_range(struct file *file, 
         * livelocks in fsync_buffers_list().
         */
        mutex_lock(&mapping->host->i_mutex);
-       err = file->f_op->fsync(file, file->f_path.dentry, datasync);
+       err = file->f_op->fsync(file, datasync);
        if (!ret)
                ret = err;
        mutex_unlock(&mapping->host->i_mutex);