nfsd: Use write gathering only with NFSv2
[safe/jmp/linux-2.6] / fs / gfs2 / ops_address.c
index e72fd47..a6dde17 100644 (file)
@@ -19,7 +19,6 @@
 #include <linux/writeback.h>
 #include <linux/swap.h>
 #include <linux/gfs2_ondisk.h>
-#include <linux/lm_interface.h>
 #include <linux/backing-dev.h>
 
 #include "gfs2.h"
@@ -210,25 +209,23 @@ static int gfs2_jdata_writepage(struct page *page, struct writeback_control *wbc
 {
        struct inode *inode = page->mapping->host;
        struct gfs2_sbd *sdp = GFS2_SB(inode);
-       int error;
+       int ret;
        int done_trans = 0;
 
-       error = gfs2_writepage_common(page, wbc);
-       if (error <= 0)
-               return error;
-
        if (PageChecked(page)) {
                if (wbc->sync_mode != WB_SYNC_ALL)
                        goto out_ignore;
-               error = gfs2_trans_begin(sdp, RES_DINODE + 1, 0);
-               if (error)
+               ret = gfs2_trans_begin(sdp, RES_DINODE + 1, 0);
+               if (ret)
                        goto out_ignore;
                done_trans = 1;
        }
-       error = __gfs2_jdata_writepage(page, wbc);
+       ret = gfs2_writepage_common(page, wbc);
+       if (ret > 0)
+               ret = __gfs2_jdata_writepage(page, wbc);
        if (done_trans)
                gfs2_trans_end(sdp);
-       return error;
+       return ret;
 
 out_ignore:
        redirty_page_for_writepage(wbc, page);
@@ -438,12 +435,13 @@ static int stuffed_readpage(struct gfs2_inode *ip, struct page *page)
        int error;
 
        /*
-        * Due to the order of unstuffing files and ->nopage(), we can be
+        * Due to the order of unstuffing files and ->fault(), we can be
         * asked for a zero page in the case of a stuffed file being extended,
         * so we need to supply one here. It doesn't happen often.
         */
        if (unlikely(page->index)) {
                zero_user(page, 0, PAGE_CACHE_SIZE);
+               SetPageUptodate(page);
                return 0;
        }
 
@@ -453,8 +451,8 @@ static int stuffed_readpage(struct gfs2_inode *ip, struct page *page)
 
        kaddr = kmap_atomic(page, KM_USER0);
        memcpy(kaddr, dibh->b_data + sizeof(struct gfs2_dinode),
-              ip->i_di.di_size);
-       memset(kaddr + ip->i_di.di_size, 0, PAGE_CACHE_SIZE - ip->i_di.di_size);
+              ip->i_disksize);
+       memset(kaddr + ip->i_disksize, 0, PAGE_CACHE_SIZE - ip->i_disksize);
        kunmap_atomic(kaddr, KM_USER0);
        flush_dcache_page(page);
        brelse(dibh);
@@ -499,34 +497,34 @@ static int __gfs2_readpage(void *file, struct page *page)
  * @file: The file to read
  * @page: The page of the file
  *
- * This deals with the locking required. We use a trylock in order to
- * avoid the page lock / glock ordering problems returning AOP_TRUNCATED_PAGE
- * in the event that we are unable to get the lock.
+ * This deals with the locking required. We have to unlock and
+ * relock the page in order to get the locking in the right
+ * order.
  */
 
 static int gfs2_readpage(struct file *file, struct page *page)
 {
-       struct gfs2_inode *ip = GFS2_I(page->mapping->host);
-       struct gfs2_holder *gh;
+       struct address_space *mapping = page->mapping;
+       struct gfs2_inode *ip = GFS2_I(mapping->host);
+       struct gfs2_holder gh;
        int error;
 
-       gh = gfs2_glock_is_locked_by_me(ip->i_gl);
-       if (!gh) {
-               gh = kmalloc(sizeof(struct gfs2_holder), GFP_NOFS);
-               if (!gh)
-                       return -ENOBUFS;
-               gfs2_holder_init(ip->i_gl, LM_ST_SHARED, GL_ATIME, gh);
+       unlock_page(page);
+       gfs2_holder_init(ip->i_gl, LM_ST_SHARED, 0, &gh);
+       error = gfs2_glock_nq(&gh);
+       if (unlikely(error))
+               goto out;
+       error = AOP_TRUNCATED_PAGE;
+       lock_page(page);
+       if (page->mapping == mapping && !PageUptodate(page))
+               error = __gfs2_readpage(file, page);
+       else
                unlock_page(page);
-               error = gfs2_glock_nq_atime(gh);
-               if (likely(error != 0))
-                       goto out;
-               return AOP_TRUNCATED_PAGE;
-       }
-       error = __gfs2_readpage(file, page);
-       gfs2_glock_dq(gh);
+       gfs2_glock_dq(&gh);
 out:
-       gfs2_holder_uninit(gh);
-       kfree(gh);
+       gfs2_holder_uninit(&gh);
+       if (error && error != AOP_TRUNCATED_PAGE)
+               lock_page(page);
        return error;
 }
 
@@ -594,8 +592,8 @@ static int gfs2_readpages(struct file *file, struct address_space *mapping,
        struct gfs2_holder gh;
        int ret;
 
-       gfs2_holder_init(ip->i_gl, LM_ST_SHARED, GL_ATIME, &gh);
-       ret = gfs2_glock_nq_atime(&gh);
+       gfs2_holder_init(ip->i_gl, LM_ST_SHARED, 0, &gh);
+       ret = gfs2_glock_nq(&gh);
        if (unlikely(ret))
                goto out_uninit;
        if (!gfs2_is_stuffed(ip))
@@ -627,7 +625,7 @@ static int gfs2_write_begin(struct file *file, struct address_space *mapping,
 {
        struct gfs2_inode *ip = GFS2_I(mapping->host);
        struct gfs2_sbd *sdp = GFS2_SB(mapping->host);
-       unsigned int data_blocks, ind_blocks, rblocks;
+       unsigned int data_blocks = 0, ind_blocks = 0, rblocks;
        int alloc_required;
        int error = 0;
        struct gfs2_alloc *al;
@@ -636,16 +634,18 @@ static int gfs2_write_begin(struct file *file, struct address_space *mapping,
        unsigned to = from + len;
        struct page *page;
 
-       gfs2_holder_init(ip->i_gl, LM_ST_EXCLUSIVE, GL_ATIME, &ip->i_gh);
-       error = gfs2_glock_nq_atime(&ip->i_gh);
+       gfs2_holder_init(ip->i_gl, LM_ST_EXCLUSIVE, 0, &ip->i_gh);
+       error = gfs2_glock_nq(&ip->i_gh);
        if (unlikely(error))
                goto out_uninit;
 
-       gfs2_write_calc_reserv(ip, len, &data_blocks, &ind_blocks);
        error = gfs2_write_alloc_required(ip, pos, len, &alloc_required);
        if (error)
                goto out_unlock;
 
+       if (alloc_required || gfs2_is_jdata(ip))
+               gfs2_write_calc_reserv(ip, len, &data_blocks, &ind_blocks);
+
        if (alloc_required) {
                al = gfs2_alloc_get(ip);
                if (!al) {
@@ -653,14 +653,10 @@ static int gfs2_write_begin(struct file *file, struct address_space *mapping,
                        goto out_unlock;
                }
 
-               error = gfs2_quota_lock(ip, NO_QUOTA_CHANGE, NO_QUOTA_CHANGE);
+               error = gfs2_quota_lock_check(ip);
                if (error)
                        goto out_alloc_put;
 
-               error = gfs2_quota_check(ip, ip->i_inode.i_uid, ip->i_inode.i_gid);
-               if (error)
-                       goto out_qunlock;
-
                al->al_requested = data_blocks + ind_blocks;
                error = gfs2_inplace_reserve(ip);
                if (error)
@@ -679,7 +675,8 @@ static int gfs2_write_begin(struct file *file, struct address_space *mapping,
                goto out_trans_fail;
 
        error = -ENOMEM;
-       page = __grab_cache_page(mapping, index);
+       flags |= AOP_FLAG_NOFS;
+       page = grab_cache_page_write_begin(mapping, index, flags);
        *pagep = page;
        if (unlikely(!page))
                goto out_endtrans;
@@ -786,7 +783,7 @@ static int gfs2_stuffed_write_end(struct inode *inode, struct buffer_head *dibh,
 
        if (inode->i_size < to) {
                i_size_write(inode, to);
-               ip->i_di.di_size = inode->i_size;
+               ip->i_disksize = inode->i_size;
                di->di_size = cpu_to_be64(inode->i_size);
                mark_inode_dirty(inode);
        }
@@ -851,9 +848,9 @@ static int gfs2_write_end(struct file *file, struct address_space *mapping,
 
        ret = generic_write_end(file, mapping, pos, len, copied, page, fsdata);
 
-       if (likely(ret >= 0) && (inode->i_size > ip->i_di.di_size)) {
+       if (likely(ret >= 0) && (inode->i_size > ip->i_disksize)) {
                di = (struct gfs2_dinode *)dibh->b_data;
-               ip->i_di.di_size = inode->i_size;
+               ip->i_disksize = inode->i_size;
                di->di_size = cpu_to_be64(inode->i_size);
                mark_inode_dirty(inode);
        }
@@ -979,7 +976,7 @@ static int gfs2_ok_for_dio(struct gfs2_inode *ip, int rw, loff_t offset)
        if (gfs2_is_stuffed(ip))
                return 0;
 
-       if (offset > i_size_read(&ip->i_inode))
+       if (offset >= i_size_read(&ip->i_inode))
                return 0;
        return 1;
 }
@@ -1004,8 +1001,8 @@ static ssize_t gfs2_direct_IO(int rw, struct kiocb *iocb,
         * unfortunately have the option of only flushing a range like
         * the VFS does.
         */
-       gfs2_holder_init(ip->i_gl, LM_ST_DEFERRED, GL_ATIME, &gh);
-       rv = gfs2_glock_nq_atime(&gh);
+       gfs2_holder_init(ip->i_gl, LM_ST_DEFERRED, 0, &gh);
+       rv = gfs2_glock_nq(&gh);
        if (rv)
                return rv;
        rv = gfs2_ok_for_dio(ip, rw, offset);
@@ -1099,6 +1096,7 @@ static const struct address_space_operations gfs2_writeback_aops = {
        .releasepage = gfs2_releasepage,
        .direct_IO = gfs2_direct_IO,
        .migratepage = buffer_migrate_page,
+       .is_partially_uptodate = block_is_partially_uptodate,
 };
 
 static const struct address_space_operations gfs2_ordered_aops = {
@@ -1114,6 +1112,7 @@ static const struct address_space_operations gfs2_ordered_aops = {
        .releasepage = gfs2_releasepage,
        .direct_IO = gfs2_direct_IO,
        .migratepage = buffer_migrate_page,
+       .is_partially_uptodate = block_is_partially_uptodate,
 };
 
 static const struct address_space_operations gfs2_jdata_aops = {
@@ -1128,6 +1127,7 @@ static const struct address_space_operations gfs2_jdata_aops = {
        .bmap = gfs2_bmap,
        .invalidatepage = gfs2_invalidatepage,
        .releasepage = gfs2_releasepage,
+       .is_partially_uptodate = block_is_partially_uptodate,
 };
 
 void gfs2_set_aops(struct inode *inode)