tree-wide: fix assorted typos all over the place
[safe/jmp/linux-2.6] / fs / ext4 / inode.c
index fd5f27a..0282ec7 100644 (file)
 #include <linux/namei.h>
 #include <linux/uio.h>
 #include <linux/bio.h>
+#include <linux/workqueue.h>
+
 #include "ext4_jbd2.h"
 #include "xattr.h"
 #include "acl.h"
 #include "ext4_extents.h"
 
+#include <trace/events/ext4.h>
+
 #define MPAGE_DA_EXTENT_TAIL 0x01
 
 static inline int ext4_begin_ordered_truncate(struct inode *inode,
@@ -75,22 +79,20 @@ static int ext4_inode_is_fast_symlink(struct inode *inode)
  * but there may still be a record of it in the journal, and that record
  * still needs to be revoked.
  *
- * If the handle isn't valid we're not journaling so there's nothing to do.
+ * If the handle isn't valid we're not journaling, but we still need to
+ * call into ext4_journal_revoke() to put the buffer head.
  */
 int ext4_forget(handle_t *handle, int is_metadata, struct inode *inode,
-                       struct buffer_head *bh, ext4_fsblk_t blocknr)
+               struct buffer_head *bh, ext4_fsblk_t blocknr)
 {
        int err;
 
-       if (!ext4_handle_valid(handle))
-               return 0;
-
        might_sleep();
 
        BUFFER_TRACE(bh, "enter");
 
        jbd_debug(4, "forgetting bh %p: is_metadata = %d, mode %o, "
-                 "data mode %lx\n",
+                 "data mode %x\n",
                  bh, is_metadata, inode->i_mode,
                  test_opt(inode->i_sb, DATA_FLAGS));
 
@@ -191,11 +193,24 @@ static int try_to_extend_transaction(handle_t *handle, struct inode *inode)
  * so before we call here everything must be consistently dirtied against
  * this transaction.
  */
-static int ext4_journal_test_restart(handle_t *handle, struct inode *inode)
+ int ext4_truncate_restart_trans(handle_t *handle, struct inode *inode,
+                                int nblocks)
 {
+       int ret;
+
+       /*
+        * Drop i_data_sem to avoid deadlock with ext4_get_blocks At this
+        * moment, get_block can be called only for blocks inside i_size since
+        * page cache has been already dropped and writes are blocked by
+        * i_mutex. So we can safely drop the i_data_sem here.
+        */
        BUG_ON(EXT4_JOURNAL(inode) == NULL);
        jbd_debug(2, "restarting handle %p\n", handle);
-       return ext4_journal_restart(handle, blocks_for_truncate(inode));
+       up_write(&EXT4_I(inode)->i_data_sem);
+       ret = ext4_journal_restart(handle, blocks_for_truncate(inode));
+       down_write(&EXT4_I(inode)->i_data_sem);
+
+       return ret;
 }
 
 /*
@@ -329,8 +344,8 @@ static inline void add_chain(Indirect *p, struct buffer_head *bh, __le32 *v)
  */
 
 static int ext4_block_to_path(struct inode *inode,
-                       ext4_lblk_t i_block,
-                       ext4_lblk_t offsets[4], int *boundary)
+                             ext4_lblk_t i_block,
+                             ext4_lblk_t offsets[4], int *boundary)
 {
        int ptrs = EXT4_ADDR_PER_BLOCK(inode->i_sb);
        int ptrs_bits = EXT4_ADDR_PER_BLOCK_BITS(inode->i_sb);
@@ -340,9 +355,7 @@ static int ext4_block_to_path(struct inode *inode,
        int n = 0;
        int final = 0;
 
-       if (i_block < 0) {
-               ext4_warning(inode->i_sb, "ext4_block_to_path", "block < 0");
-       } else if (i_block < direct_blocks) {
+       if (i_block < direct_blocks) {
                offsets[n++] = i_block;
                final = direct_blocks;
        } else if ((i_block -= direct_blocks) < indirect_blocks) {
@@ -362,9 +375,9 @@ static int ext4_block_to_path(struct inode *inode,
                final = ptrs;
        } else {
                ext4_warning(inode->i_sb, "ext4_block_to_path",
-                               "block %lu > max in inode %lu",
-                               i_block + direct_blocks +
-                               indirect_blocks + double_blocks, inode->i_ino);
+                            "block %lu > max in inode %lu",
+                            i_block + direct_blocks +
+                            indirect_blocks + double_blocks, inode->i_ino);
        }
        if (boundary)
                *boundary = final - 1 - (i_block & (ptrs - 1));
@@ -372,31 +385,32 @@ static int ext4_block_to_path(struct inode *inode,
 }
 
 static int __ext4_check_blockref(const char *function, struct inode *inode,
-                                __le32 *p, unsigned int max) {
-
-       unsigned int maxblocks = ext4_blocks_count(EXT4_SB(inode->i_sb)->s_es);
+                                __le32 *p, unsigned int max)
+{
        __le32 *bref = p;
+       unsigned int blk;
+
        while (bref < p+max) {
-               if (unlikely(le32_to_cpu(*bref) >= maxblocks)) {
+               blk = le32_to_cpu(*bref++);
+               if (blk &&
+                   unlikely(!ext4_data_block_valid(EXT4_SB(inode->i_sb),
+                                                   blk, 1))) {
                        ext4_error(inode->i_sb, function,
-                                  "block reference %u >= max (%u) "
-                                  "in inode #%lu, offset=%d",
-                                  le32_to_cpu(*bref), maxblocks,
-                                  inode->i_ino, (int)(bref-p));
-                       return -EIO;
-               }
-               bref++;
-       }
-       return 0;
+                                  "invalid block reference %u "
+                                  "in inode #%lu", blk, inode->i_ino);
+                       return -EIO;
+               }
+       }
+       return 0;
 }
 
 
 #define ext4_check_indirect_blockref(inode, bh)                         \
-        __ext4_check_blockref(__func__, inode, (__le32 *)(bh)->b_data,  \
+       __ext4_check_blockref(__func__, inode, (__le32 *)(bh)->b_data,  \
                              EXT4_ADDR_PER_BLOCK((inode)->i_sb))
 
 #define ext4_check_inode_blockref(inode)                                \
-        __ext4_check_blockref(__func__, inode, EXT4_I(inode)->i_data,   \
+       __ext4_check_blockref(__func__, inode, EXT4_I(inode)->i_data,   \
                              EXT4_NDIR_BLOCKS)
 
 /**
@@ -446,7 +460,7 @@ static Indirect *ext4_get_branch(struct inode *inode, int depth,
                bh = sb_getblk(sb, le32_to_cpu(p->key));
                if (unlikely(!bh))
                        goto failure;
-                  
+
                if (!bh_uptodate_or_lock(bh)) {
                        if (bh_submit_read(bh) < 0) {
                                put_bh(bh);
@@ -458,7 +472,7 @@ static Indirect *ext4_get_branch(struct inode *inode, int depth,
                                goto failure;
                        }
                }
-               
+
                add_chain(++p, bh, (__le32 *)bh->b_data + *++offsets);
                /* Reader: end */
                if (!p->key)
@@ -549,15 +563,21 @@ static ext4_fsblk_t ext4_find_near(struct inode *inode, Indirect *ind)
  *
  *     Normally this function find the preferred place for block allocation,
  *     returns it.
+ *     Because this is only used for non-extent files, we limit the block nr
+ *     to 32 bits.
  */
 static ext4_fsblk_t ext4_find_goal(struct inode *inode, ext4_lblk_t block,
-               Indirect *partial)
+                                  Indirect *partial)
 {
+       ext4_fsblk_t goal;
+
        /*
         * XXX need to get goal block from mballoc's data structures
         */
 
-       return ext4_find_near(inode, partial);
+       goal = ext4_find_near(inode, partial);
+       goal = goal & EXT4_MAX_BLOCK_FILE_PHYS;
+       return goal;
 }
 
 /**
@@ -573,7 +593,7 @@ static ext4_fsblk_t ext4_find_goal(struct inode *inode, ext4_lblk_t block,
  *     direct and indirect blocks.
  */
 static int ext4_blks_to_allocate(Indirect *branch, int k, unsigned int blks,
-               int blocks_to_boundary)
+                                int blocks_to_boundary)
 {
        unsigned int count = 0;
 
@@ -609,9 +629,9 @@ static int ext4_blks_to_allocate(Indirect *branch, int k, unsigned int blks,
  *             direct blocks
  */
 static int ext4_alloc_blocks(handle_t *handle, struct inode *inode,
-                               ext4_lblk_t iblock, ext4_fsblk_t goal,
-                               int indirect_blks, int blks,
-                               ext4_fsblk_t new_blocks[4], int *err)
+                            ext4_lblk_t iblock, ext4_fsblk_t goal,
+                            int indirect_blks, int blks,
+                            ext4_fsblk_t new_blocks[4], int *err)
 {
        struct ext4_allocation_request ar;
        int target, i;
@@ -638,6 +658,8 @@ static int ext4_alloc_blocks(handle_t *handle, struct inode *inode,
                if (*err)
                        goto failed_out;
 
+               BUG_ON(current_block + count > EXT4_MAX_BLOCK_FILE_PHYS);
+
                target -= count;
                /* allocate blocks for indirect blocks */
                while (index < indirect_blks && count) {
@@ -672,6 +694,7 @@ static int ext4_alloc_blocks(handle_t *handle, struct inode *inode,
                ar.flags = EXT4_MB_HINT_DATA;
 
        current_block = ext4_mb_new_blocks(handle, &ar, err);
+       BUG_ON(current_block + ar.len > EXT4_MAX_BLOCK_FILE_PHYS);
 
        if (*err && (target == blks)) {
                /*
@@ -682,10 +705,10 @@ static int ext4_alloc_blocks(handle_t *handle, struct inode *inode,
        }
        if (!*err) {
                if (target == blks) {
-               /*
-                * save the new block number
-                * for the first direct block
-                */
+                       /*
+                        * save the new block number
+                        * for the first direct block
+                        */
                        new_blocks[index] = current_block;
                }
                blk_allocated += ar.len;
@@ -727,9 +750,9 @@ failed_out:
  *     as described above and return 0.
  */
 static int ext4_alloc_branch(handle_t *handle, struct inode *inode,
-                               ext4_lblk_t iblock, int indirect_blks,
-                               int *blks, ext4_fsblk_t goal,
-                               ext4_lblk_t *offsets, Indirect *branch)
+                            ext4_lblk_t iblock, int indirect_blks,
+                            int *blks, ext4_fsblk_t goal,
+                            ext4_lblk_t *offsets, Indirect *branch)
 {
        int blocksize = inode->i_sb->s_blocksize;
        int i, n = 0;
@@ -760,8 +783,9 @@ static int ext4_alloc_branch(handle_t *handle, struct inode *inode,
                BUFFER_TRACE(bh, "call get_create_access");
                err = ext4_journal_get_create_access(handle, bh);
                if (err) {
+                       /* Don't brelse(bh) here; it's done in
+                        * ext4_journal_forget() below */
                        unlock_buffer(bh);
-                       brelse(bh);
                        goto failed;
                }
 
@@ -776,7 +800,7 @@ static int ext4_alloc_branch(handle_t *handle, struct inode *inode,
                         * the chain to point to the new allocated
                         * data blocks numbers
                         */
-                       for (i=1; i < num; i++)
+                       for (i = 1; i < num; i++)
                                *(branch[n].p + i) = cpu_to_le32(++current_block);
                }
                BUFFER_TRACE(bh, "marking uptodate");
@@ -819,7 +843,8 @@ failed:
  * chain to new block and return 0.
  */
 static int ext4_splice_branch(handle_t *handle, struct inode *inode,
-                       ext4_lblk_t block, Indirect *where, int num, int blks)
+                             ext4_lblk_t block, Indirect *where, int num,
+                             int blks)
 {
        int i;
        int err = 0;
@@ -851,10 +876,6 @@ static int ext4_splice_branch(handle_t *handle, struct inode *inode,
        }
 
        /* We are done with atomic stuff, now do the rest of housekeeping */
-
-       inode->i_ctime = ext4_current_time(inode);
-       ext4_mark_inode_dirty(handle, inode);
-
        /* had we spliced it onto indirect block? */
        if (where->bh) {
                /*
@@ -873,8 +894,8 @@ static int ext4_splice_branch(handle_t *handle, struct inode *inode,
        } else {
                /*
                 * OK, we spliced it into the inode itself on a direct block.
-                * Inode was dirtied above.
                 */
+               ext4_mark_inode_dirty(handle, inode);
                jbd_debug(5, "splicing direct\n");
        }
        return err;
@@ -920,9 +941,9 @@ err_out:
  * blocks.
  */
 static int ext4_ind_get_blocks(handle_t *handle, struct inode *inode,
-                                 ext4_lblk_t iblock, unsigned int maxblocks,
-                                 struct buffer_head *bh_result,
-                                 int flags)
+                              ext4_lblk_t iblock, unsigned int maxblocks,
+                              struct buffer_head *bh_result,
+                              int flags)
 {
        int err = -EIO;
        ext4_lblk_t offsets[4];
@@ -932,16 +953,13 @@ static int ext4_ind_get_blocks(handle_t *handle, struct inode *inode,
        int indirect_blks;
        int blocks_to_boundary = 0;
        int depth;
-       struct ext4_inode_info *ei = EXT4_I(inode);
        int count = 0;
        ext4_fsblk_t first_block = 0;
-       loff_t disksize;
-
 
        J_ASSERT(!(EXT4_I(inode)->i_flags & EXT4_EXTENTS_FL));
        J_ASSERT(handle != NULL || (flags & EXT4_GET_BLOCKS_CREATE) == 0);
        depth = ext4_block_to_path(inode, iblock, offsets,
-                                       &blocks_to_boundary);
+                                  &blocks_to_boundary);
 
        if (depth == 0)
                goto out;
@@ -989,8 +1007,8 @@ static int ext4_ind_get_blocks(handle_t *handle, struct inode *inode,
         * Block out ext4_truncate while we alter the tree
         */
        err = ext4_alloc_branch(handle, inode, iblock, indirect_blks,
-                                       &count, goal,
-                                       offsets + (partial - chain), partial);
+                               &count, goal,
+                               offsets + (partial - chain), partial);
 
        /*
         * The ext4_splice_branch call will free and forget any buffers
@@ -1001,20 +1019,8 @@ static int ext4_ind_get_blocks(handle_t *handle, struct inode *inode,
         */
        if (!err)
                err = ext4_splice_branch(handle, inode, iblock,
-                                       partial, indirect_blks, count);
-       /*
-        * i_disksize growing is protected by i_data_sem.  Don't forget to
-        * protect it if you're about to implement concurrent
-        * ext4_get_block() -bzzz
-       */
-       if (!err && (flags & EXT4_GET_BLOCKS_EXTEND_DISKSIZE)) {
-               disksize = ((loff_t) iblock + count) << inode->i_blkbits;
-               if (disksize > i_size_read(inode))
-                       disksize = i_size_read(inode);
-               if (disksize > ei->i_disksize)
-                       ei->i_disksize = disksize;
-       }
-       if (err)
+                                        partial, indirect_blks, count);
+       else
                goto cleanup;
 
        set_buffer_new(bh_result);
@@ -1125,6 +1131,78 @@ static void ext4_da_update_reserve_space(struct inode *inode, int used)
                ext4_discard_preallocations(inode);
 }
 
+static int check_block_validity(struct inode *inode, const char *msg,
+                               sector_t logical, sector_t phys, int len)
+{
+       if (!ext4_data_block_valid(EXT4_SB(inode->i_sb), phys, len)) {
+               ext4_error(inode->i_sb, msg,
+                          "inode #%lu logical block %llu mapped to %llu "
+                          "(size %d)", inode->i_ino,
+                          (unsigned long long) logical,
+                          (unsigned long long) phys, len);
+               return -EIO;
+       }
+       return 0;
+}
+
+/*
+ * Return the number of contiguous dirty pages in a given inode
+ * starting at page frame idx.
+ */
+static pgoff_t ext4_num_dirty_pages(struct inode *inode, pgoff_t idx,
+                                   unsigned int max_pages)
+{
+       struct address_space *mapping = inode->i_mapping;
+       pgoff_t index;
+       struct pagevec pvec;
+       pgoff_t num = 0;
+       int i, nr_pages, done = 0;
+
+       if (max_pages == 0)
+               return 0;
+       pagevec_init(&pvec, 0);
+       while (!done) {
+               index = idx;
+               nr_pages = pagevec_lookup_tag(&pvec, mapping, &index,
+                                             PAGECACHE_TAG_DIRTY,
+                                             (pgoff_t)PAGEVEC_SIZE);
+               if (nr_pages == 0)
+                       break;
+               for (i = 0; i < nr_pages; i++) {
+                       struct page *page = pvec.pages[i];
+                       struct buffer_head *bh, *head;
+
+                       lock_page(page);
+                       if (unlikely(page->mapping != mapping) ||
+                           !PageDirty(page) ||
+                           PageWriteback(page) ||
+                           page->index != idx) {
+                               done = 1;
+                               unlock_page(page);
+                               break;
+                       }
+                       if (page_has_buffers(page)) {
+                               bh = head = page_buffers(page);
+                               do {
+                                       if (!buffer_delay(bh) &&
+                                           !buffer_unwritten(bh))
+                                               done = 1;
+                                       bh = bh->b_this_page;
+                               } while (!done && (bh != head));
+                       }
+                       unlock_page(page);
+                       if (done)
+                               break;
+                       idx++;
+                       num++;
+                       if (num >= max_pages)
+                               break;
+               }
+               pagevec_release(&pvec);
+       }
+       return num;
+}
+
 /*
  * The ext4_get_blocks() function tries to look up the requested blocks,
  * and returns if the blocks are already mapped.
@@ -1156,6 +1234,9 @@ int ext4_get_blocks(handle_t *handle, struct inode *inode, sector_t block,
        clear_buffer_mapped(bh);
        clear_buffer_unwritten(bh);
 
+       ext_debug("ext4_get_blocks(): inode %lu, flag %d, max_blocks %u,"
+                 "logical block %lu\n", inode->i_ino, flags, max_blocks,
+                 (unsigned long)block);
        /*
         * Try to see if we can get the block without requesting a new
         * file system block.
@@ -1170,6 +1251,13 @@ int ext4_get_blocks(handle_t *handle, struct inode *inode, sector_t block,
        }
        up_read((&EXT4_I(inode)->i_data_sem));
 
+       if (retval > 0 && buffer_mapped(bh)) {
+               int ret = check_block_validity(inode, "file system corruption",
+                                              block, bh->b_blocknr, retval);
+               if (ret != 0)
+                       return ret;
+       }
+
        /* If it is only a block(s) look up */
        if ((flags & EXT4_GET_BLOCKS_CREATE) == 0)
                return retval;
@@ -1229,23 +1317,28 @@ int ext4_get_blocks(handle_t *handle, struct inode *inode, sector_t block,
                         * i_data's format changing.  Force the migrate
                         * to fail by clearing migrate flags
                         */
-                       EXT4_I(inode)->i_flags = EXT4_I(inode)->i_flags &
-                                                       ~EXT4_EXT_MIGRATE;
+                       EXT4_I(inode)->i_state &= ~EXT4_STATE_EXT_MIGRATE;
                }
        }
 
-       if (flags & EXT4_GET_BLOCKS_DELALLOC_RESERVE) {
+       if (flags & EXT4_GET_BLOCKS_DELALLOC_RESERVE)
                EXT4_I(inode)->i_delalloc_reserved_flag = 0;
-               /*
-                * Update reserved blocks/metadata blocks
-                * after successful block allocation
-                * which were deferred till now
-                */
-               if ((retval > 0) && buffer_delay(bh))
-                       ext4_da_update_reserve_space(inode, retval);
-       }
+
+       /*
+        * Update reserved blocks/metadata blocks after successful
+        * block allocation which had been deferred till now.
+        */
+       if ((retval > 0) && (flags & EXT4_GET_BLOCKS_UPDATE_RESERVE_SPACE))
+               ext4_da_update_reserve_space(inode, retval);
 
        up_write((&EXT4_I(inode)->i_data_sem));
+       if (retval > 0 && buffer_mapped(bh)) {
+               int ret = check_block_validity(inode, "file system "
+                                              "corruption after allocation",
+                                              block, bh->b_blocknr, retval);
+               if (ret != 0)
+                       return ret;
+       }
        return retval;
 }
 
@@ -1293,7 +1386,7 @@ struct buffer_head *ext4_getblk(handle_t *handle, struct inode *inode,
 {
        struct buffer_head dummy;
        int fatal = 0, err;
-       int flags = EXT4_GET_BLOCKS_EXTEND_DISKSIZE;
+       int flags = 0;
 
        J_ASSERT(handle != NULL || create == 0);
 
@@ -1392,8 +1485,7 @@ static int walk_page_buffers(handle_t *handle,
 
        for (bh = head, block_start = 0;
             ret == 0 && (bh != head || !block_start);
-            block_start = block_end, bh = next)
-       {
+            block_start = block_end, bh = next) {
                next = bh->b_this_page;
                block_end = block_start + blocksize;
                if (block_end <= from || block_start >= to) {
@@ -1434,7 +1526,7 @@ static int walk_page_buffers(handle_t *handle,
  * write.
  */
 static int do_journal_get_write_access(handle_t *handle,
-                                       struct buffer_head *bh)
+                                      struct buffer_head *bh)
 {
        if (!buffer_mapped(bh) || buffer_freed(bh))
                return 0;
@@ -1442,22 +1534,24 @@ static int do_journal_get_write_access(handle_t *handle,
 }
 
 static int ext4_write_begin(struct file *file, struct address_space *mapping,
-                               loff_t pos, unsigned len, unsigned flags,
-                               struct page **pagep, void **fsdata)
+                           loff_t pos, unsigned len, unsigned flags,
+                           struct page **pagep, void **fsdata)
 {
        struct inode *inode = mapping->host;
-       int ret, needed_blocks = ext4_writepage_trans_blocks(inode);
+       int ret, needed_blocks;
        handle_t *handle;
        int retries = 0;
        struct page *page;
-       pgoff_t index;
+       pgoff_t index;
        unsigned from, to;
 
-       trace_mark(ext4_write_begin,
-                  "dev %s ino %lu pos %llu len %u flags %u",
-                  inode->i_sb->s_id, inode->i_ino,
-                  (unsigned long long) pos, len, flags);
-       index = pos >> PAGE_CACHE_SHIFT;
+       trace_ext4_write_begin(inode, pos, len, flags);
+       /*
+        * Reserve one block more for addition to orphan list in case
+        * we allocate blocks but write fails for some reason
+        */
+       needed_blocks = ext4_writepage_trans_blocks(inode) + 1;
+       index = pos >> PAGE_CACHE_SHIFT;
        from = pos & (PAGE_CACHE_SIZE - 1);
        to = from + len;
 
@@ -1490,15 +1584,30 @@ retry:
 
        if (ret) {
                unlock_page(page);
-               ext4_journal_stop(handle);
                page_cache_release(page);
                /*
                 * block_write_begin may have instantiated a few blocks
                 * outside i_size.  Trim these off again. Don't need
                 * i_size_read because we hold i_mutex.
+                *
+                * Add inode to orphan list in case we crash before
+                * truncate finishes
                 */
-               if (pos + len > inode->i_size)
-                       vmtruncate(inode, inode->i_size);
+               if (pos + len > inode->i_size && ext4_can_truncate(inode))
+                       ext4_orphan_add(handle, inode);
+
+               ext4_journal_stop(handle);
+               if (pos + len > inode->i_size) {
+                       ext4_truncate(inode);
+                       /*
+                        * If truncate failed early the inode might
+                        * still be on the orphan list; we need to
+                        * make sure the inode is removed from the
+                        * orphan list in that case.
+                        */
+                       if (inode->i_nlink)
+                               ext4_orphan_del(NULL, inode);
+               }
        }
 
        if (ret == -ENOSPC && ext4_should_retry_alloc(inode->i_sb, &retries))
@@ -1516,6 +1625,52 @@ static int write_end_fn(handle_t *handle, struct buffer_head *bh)
        return ext4_handle_dirty_metadata(handle, NULL, bh);
 }
 
+static int ext4_generic_write_end(struct file *file,
+                                 struct address_space *mapping,
+                                 loff_t pos, unsigned len, unsigned copied,
+                                 struct page *page, void *fsdata)
+{
+       int i_size_changed = 0;
+       struct inode *inode = mapping->host;
+       handle_t *handle = ext4_journal_current_handle();
+
+       copied = block_write_end(file, mapping, pos, len, copied, page, fsdata);
+
+       /*
+        * No need to use i_size_read() here, the i_size
+        * cannot change under us because we hold i_mutex.
+        *
+        * But it's important to update i_size while still holding page lock:
+        * page writeout could otherwise come in and zero beyond i_size.
+        */
+       if (pos + copied > inode->i_size) {
+               i_size_write(inode, pos + copied);
+               i_size_changed = 1;
+       }
+
+       if (pos + copied >  EXT4_I(inode)->i_disksize) {
+               /* We need to mark inode dirty even if
+                * new_i_size is less that inode->i_size
+                * bu greater than i_disksize.(hint delalloc)
+                */
+               ext4_update_i_disksize(inode, (pos + copied));
+               i_size_changed = 1;
+       }
+       unlock_page(page);
+       page_cache_release(page);
+
+       /*
+        * Don't mark the inode dirty under page lock. First, it unnecessarily
+        * makes the holding time of page lock longer. Second, it forces lock
+        * ordering of page lock and transaction start for journaling
+        * filesystems.
+        */
+       if (i_size_changed)
+               ext4_mark_inode_dirty(handle, inode);
+
+       return copied;
+}
+
 /*
  * We need to pick up the new inode size which generic_commit_write gave us
  * `file' can be NULL - eg, when called from page_symlink().
@@ -1524,36 +1679,27 @@ static int write_end_fn(handle_t *handle, struct buffer_head *bh)
  * buffers are managed internally.
  */
 static int ext4_ordered_write_end(struct file *file,
-                               struct address_space *mapping,
-                               loff_t pos, unsigned len, unsigned copied,
-                               struct page *page, void *fsdata)
+                                 struct address_space *mapping,
+                                 loff_t pos, unsigned len, unsigned copied,
+                                 struct page *page, void *fsdata)
 {
        handle_t *handle = ext4_journal_current_handle();
        struct inode *inode = mapping->host;
        int ret = 0, ret2;
 
-       trace_mark(ext4_ordered_write_end,
-                  "dev %s ino %lu pos %llu len %u copied %u",
-                  inode->i_sb->s_id, inode->i_ino,
-                  (unsigned long long) pos, len, copied);
+       trace_ext4_ordered_write_end(inode, pos, len, copied);
        ret = ext4_jbd2_file_inode(handle, inode);
 
        if (ret == 0) {
-               loff_t new_i_size;
-
-               new_i_size = pos + copied;
-               if (new_i_size > EXT4_I(inode)->i_disksize) {
-                       ext4_update_i_disksize(inode, new_i_size);
-                       /* We need to mark inode dirty even if
-                        * new_i_size is less that inode->i_size
-                        * bu greater than i_disksize.(hint delalloc)
-                        */
-                       ext4_mark_inode_dirty(handle, inode);
-               }
-
-               ret2 = generic_write_end(file, mapping, pos, len, copied,
+               ret2 = ext4_generic_write_end(file, mapping, pos, len, copied,
                                                        page, fsdata);
                copied = ret2;
+               if (pos + len > inode->i_size && ext4_can_truncate(inode))
+                       /* if we have allocated more blocks and copied
+                        * less. We will have blocks allocated outside
+                        * inode->i_size. So truncate them
+                        */
+                       ext4_orphan_add(handle, inode);
                if (ret2 < 0)
                        ret = ret2;
        }
@@ -1561,36 +1707,41 @@ static int ext4_ordered_write_end(struct file *file,
        if (!ret)
                ret = ret2;
 
+       if (pos + len > inode->i_size) {
+               ext4_truncate(inode);
+               /*
+                * If truncate failed early the inode might still be
+                * on the orphan list; we need to make sure the inode
+                * is removed from the orphan list in that case.
+                */
+               if (inode->i_nlink)
+                       ext4_orphan_del(NULL, inode);
+       }
+
+
        return ret ? ret : copied;
 }
 
 static int ext4_writeback_write_end(struct file *file,
-                               struct address_space *mapping,
-                               loff_t pos, unsigned len, unsigned copied,
-                               struct page *page, void *fsdata)
+                                   struct address_space *mapping,
+                                   loff_t pos, unsigned len, unsigned copied,
+                                   struct page *page, void *fsdata)
 {
        handle_t *handle = ext4_journal_current_handle();
        struct inode *inode = mapping->host;
        int ret = 0, ret2;
-       loff_t new_i_size;
-
-       trace_mark(ext4_writeback_write_end,
-                  "dev %s ino %lu pos %llu len %u copied %u",
-                  inode->i_sb->s_id, inode->i_ino,
-                  (unsigned long long) pos, len, copied);
-       new_i_size = pos + copied;
-       if (new_i_size > EXT4_I(inode)->i_disksize) {
-               ext4_update_i_disksize(inode, new_i_size);
-               /* We need to mark inode dirty even if
-                * new_i_size is less that inode->i_size
-                * bu greater than i_disksize.(hint delalloc)
-                */
-               ext4_mark_inode_dirty(handle, inode);
-       }
 
-       ret2 = generic_write_end(file, mapping, pos, len, copied,
+       trace_ext4_writeback_write_end(inode, pos, len, copied);
+       ret2 = ext4_generic_write_end(file, mapping, pos, len, copied,
                                                        page, fsdata);
        copied = ret2;
+       if (pos + len > inode->i_size && ext4_can_truncate(inode))
+               /* if we have allocated more blocks and copied
+                * less. We will have blocks allocated outside
+                * inode->i_size. So truncate them
+                */
+               ext4_orphan_add(handle, inode);
+
        if (ret2 < 0)
                ret = ret2;
 
@@ -1598,13 +1749,24 @@ static int ext4_writeback_write_end(struct file *file,
        if (!ret)
                ret = ret2;
 
+       if (pos + len > inode->i_size) {
+               ext4_truncate(inode);
+               /*
+                * If truncate failed early the inode might still be
+                * on the orphan list; we need to make sure the inode
+                * is removed from the orphan list in that case.
+                */
+               if (inode->i_nlink)
+                       ext4_orphan_del(NULL, inode);
+       }
+
        return ret ? ret : copied;
 }
 
 static int ext4_journalled_write_end(struct file *file,
-                               struct address_space *mapping,
-                               loff_t pos, unsigned len, unsigned copied,
-                               struct page *page, void *fsdata)
+                                    struct address_space *mapping,
+                                    loff_t pos, unsigned len, unsigned copied,
+                                    struct page *page, void *fsdata)
 {
        handle_t *handle = ext4_journal_current_handle();
        struct inode *inode = mapping->host;
@@ -1613,10 +1775,7 @@ static int ext4_journalled_write_end(struct file *file,
        unsigned from, to;
        loff_t new_i_size;
 
-       trace_mark(ext4_journalled_write_end,
-                  "dev %s ino %lu pos %llu len %u copied %u",
-                  inode->i_sb->s_id, inode->i_ino,
-                  (unsigned long long) pos, len, copied);
+       trace_ext4_journalled_write_end(inode, pos, len, copied);
        from = pos & (PAGE_CACHE_SIZE - 1);
        to = from + len;
 
@@ -1642,10 +1801,27 @@ static int ext4_journalled_write_end(struct file *file,
        }
 
        unlock_page(page);
+       page_cache_release(page);
+       if (pos + len > inode->i_size && ext4_can_truncate(inode))
+               /* if we have allocated more blocks and copied
+                * less. We will have blocks allocated outside
+                * inode->i_size. So truncate them
+                */
+               ext4_orphan_add(handle, inode);
+
        ret2 = ext4_journal_stop(handle);
        if (!ret)
                ret = ret2;
-       page_cache_release(page);
+       if (pos + len > inode->i_size) {
+               ext4_truncate(inode);
+               /*
+                * If truncate failed early the inode might still be
+                * on the orphan list; we need to make sure the inode
+                * is removed from the orphan list in that case.
+                */
+               if (inode->i_nlink)
+                       ext4_orphan_del(NULL, inode);
+       }
 
        return ret ? ret : copied;
 }
@@ -1682,11 +1858,11 @@ repeat:
 
        if (ext4_claim_free_blocks(sbi, total)) {
                spin_unlock(&EXT4_I(inode)->i_block_reservation_lock);
+               vfs_dq_release_reservation_block(inode, total);
                if (ext4_should_retry_alloc(inode->i_sb, &retries)) {
                        yield();
                        goto repeat;
                }
-               vfs_dq_release_reservation_block(inode, total);
                return -ENOSPC;
        }
        EXT4_I(inode)->i_reserved_data_blocks += nrblocks;
@@ -1745,7 +1921,7 @@ static void ext4_da_release_space(struct inode *inode, int to_free)
 }
 
 static void ext4_da_page_release_reservation(struct page *page,
-                                               unsigned long offset)
+                                            unsigned long offset)
 {
        int to_release = 0;
        struct buffer_head *head, *bh;
@@ -1769,18 +1945,6 @@ static void ext4_da_page_release_reservation(struct page *page,
  * Delayed allocation stuff
  */
 
-struct mpage_da_data {
-       struct inode *inode;
-       sector_t b_blocknr;             /* start block number of extent */
-       size_t b_size;                  /* size of extent */
-       unsigned long b_state;          /* state of the extent */
-       unsigned long first_page, next_page;    /* extent of pages */
-       struct writeback_control *wbc;
-       int io_done;
-       int pages_written;
-       int retval;
-};
-
 /*
  * mpage_da_submit_io - walks through extent of pages and try to write
  * them with writepage() call back
@@ -1990,73 +2154,22 @@ static void ext4_da_block_invalidatepages(struct mpage_da_data *mpd,
 static void ext4_print_free_blocks(struct inode *inode)
 {
        struct ext4_sb_info *sbi = EXT4_SB(inode->i_sb);
-       printk(KERN_EMERG "Total free blocks count %lld\n",
-                       ext4_count_free_blocks(inode->i_sb));
-       printk(KERN_EMERG "Free/Dirty block details\n");
-       printk(KERN_EMERG "free_blocks=%lld\n",
-                       (long long)percpu_counter_sum(&sbi->s_freeblocks_counter));
-       printk(KERN_EMERG "dirty_blocks=%lld\n",
-                       (long long)percpu_counter_sum(&sbi->s_dirtyblocks_counter));
-       printk(KERN_EMERG "Block reservation details\n");
-       printk(KERN_EMERG "i_reserved_data_blocks=%u\n",
-                       EXT4_I(inode)->i_reserved_data_blocks);
-       printk(KERN_EMERG "i_reserved_meta_blocks=%u\n",
-                       EXT4_I(inode)->i_reserved_meta_blocks);
+       printk(KERN_CRIT "Total free blocks count %lld\n",
+              ext4_count_free_blocks(inode->i_sb));
+       printk(KERN_CRIT "Free/Dirty block details\n");
+       printk(KERN_CRIT "free_blocks=%lld\n",
+              (long long) percpu_counter_sum(&sbi->s_freeblocks_counter));
+       printk(KERN_CRIT "dirty_blocks=%lld\n",
+              (long long) percpu_counter_sum(&sbi->s_dirtyblocks_counter));
+       printk(KERN_CRIT "Block reservation details\n");
+       printk(KERN_CRIT "i_reserved_data_blocks=%u\n",
+              EXT4_I(inode)->i_reserved_data_blocks);
+       printk(KERN_CRIT "i_reserved_meta_blocks=%u\n",
+              EXT4_I(inode)->i_reserved_meta_blocks);
        return;
 }
 
 /*
- * This function is used by mpage_da_map_blocks().  We separate it out
- * as a separate function just to make life easier, and because
- * mpage_da_map_blocks() used to be a generic function that took a
- * get_block_t.
- */
-static int ext4_da_get_block_write(struct inode *inode, sector_t iblock,
-                                  struct buffer_head *bh_result)
-{
-       int ret;
-       unsigned max_blocks = bh_result->b_size >> inode->i_blkbits;
-       loff_t disksize = EXT4_I(inode)->i_disksize;
-       handle_t *handle = NULL;
-
-       handle = ext4_journal_current_handle();
-       BUG_ON(!handle);
-       ret = ext4_get_blocks(handle, inode, iblock, max_blocks,
-                             bh_result, EXT4_GET_BLOCKS_CREATE|
-                             EXT4_GET_BLOCKS_DELALLOC_RESERVE);
-       if (ret <= 0)
-               return ret;
-
-       bh_result->b_size = (ret << inode->i_blkbits);
-
-       if (ext4_should_order_data(inode)) {
-               int retval;
-               retval = ext4_jbd2_file_inode(handle, inode);
-               if (retval)
-                       /*
-                        * Failed to add inode for ordered mode. Don't
-                        * update file size
-                        */
-                       return retval;
-       }
-
-       /*
-        * Update on-disk size along with block allocation we don't
-        * use EXT4_GET_BLOCKS_EXTEND_DISKSIZE as size may change
-        * within already allocated block -bzzz
-        */
-       disksize = ((loff_t) iblock + ret) << inode->i_blkbits;
-       if (disksize > i_size_read(inode))
-               disksize = i_size_read(inode);
-       if (disksize > EXT4_I(inode)->i_disksize) {
-               ext4_update_i_disksize(inode, disksize);
-               ret = ext4_mark_inode_dirty(handle, inode);
-               return ret;
-       }
-       return 0;
-}
-
-/*
  * mpage_da_map_blocks - go through given space
  *
  * @mpd - bh describing space
@@ -2066,9 +2179,12 @@ static int ext4_da_get_block_write(struct inode *inode, sector_t iblock,
  */
 static int mpage_da_map_blocks(struct mpage_da_data *mpd)
 {
-       int err = 0;
+       int err, blks, get_blocks_flags;
        struct buffer_head new;
-       sector_t next;
+       sector_t next = mpd->b_blocknr;
+       unsigned max_blocks = mpd->b_size >> mpd->inode->i_blkbits;
+       loff_t disksize = EXT4_I(mpd->inode)->i_disksize;
+       handle_t *handle = NULL;
 
        /*
         * We consider only non-mapped and non-allocated blocks
@@ -2077,33 +2193,43 @@ static int mpage_da_map_blocks(struct mpage_da_data *mpd)
                !(mpd->b_state & (1 << BH_Delay)) &&
                !(mpd->b_state & (1 << BH_Unwritten)))
                return 0;
+
        /*
-        * We need to make sure the BH_Delay flag is passed down to
-        * ext4_da_get_block_write(), since it calls ext4_get_blocks()
-        * with the EXT4_GET_BLOCKS_DELALLOC_RESERVE flag.  This flag
-        * causes ext4_get_blocks() to call
-        * ext4_da_update_reserve_space() if the passed buffer head
-        * has the BH_Delay flag set.  In the future, once we clean up
-        * the interfaces to ext4_get_blocks(), we should pass in a
-        * separate flag which requests that the delayed allocation
-        * statistics should be updated, instead of depending on the
-        * state information getting passed down via the map_bh's
-        * state bitmasks plus the magic
-        * EXT4_GET_BLOCKS_DELALLOC_RESERVE flag.
-        */
-       new.b_state = mpd->b_state & (1 << BH_Delay);
-       new.b_blocknr = 0;
-       new.b_size = mpd->b_size;
-       next = mpd->b_blocknr;
-       /*
-        * If we didn't accumulate anything
-        * to write simply return
+        * If we didn't accumulate anything to write simply return
         */
-       if (!new.b_size)
+       if (!mpd->b_size)
                return 0;
 
-       err = ext4_da_get_block_write(mpd->inode, next, &new);
-       if (err) {
+       handle = ext4_journal_current_handle();
+       BUG_ON(!handle);
+
+       /*
+        * Call ext4_get_blocks() to allocate any delayed allocation
+        * blocks, or to convert an uninitialized extent to be
+        * initialized (in the case where we have written into
+        * one or more preallocated blocks).
+        *
+        * We pass in the magic EXT4_GET_BLOCKS_DELALLOC_RESERVE to
+        * indicate that we are on the delayed allocation path.  This
+        * affects functions in many different parts of the allocation
+        * call path.  This flag exists primarily because we don't
+        * want to change *many* call functions, so ext4_get_blocks()
+        * will set the magic i_delalloc_reserved_flag once the
+        * inode's allocation semaphore is taken.
+        *
+        * If the blocks in questions were delalloc blocks, set
+        * EXT4_GET_BLOCKS_DELALLOC_RESERVE so the delalloc accounting
+        * variables are updated after the blocks have been allocated.
+        */
+       new.b_state = 0;
+       get_blocks_flags = (EXT4_GET_BLOCKS_CREATE |
+                           EXT4_GET_BLOCKS_DELALLOC_RESERVE);
+       if (mpd->b_state & (1 << BH_Delay))
+               get_blocks_flags |= EXT4_GET_BLOCKS_UPDATE_RESERVE_SPACE;
+       blks = ext4_get_blocks(handle, mpd->inode, next, max_blocks,
+                              &new, get_blocks_flags);
+       if (blks < 0) {
+               err = blks;
                /*
                 * If get block returns with error we simply
                 * return. Later writepage will redirty the page and
@@ -2125,23 +2251,25 @@ static int mpage_da_map_blocks(struct mpage_da_data *mpd)
                 * writepage and writepages will again try to write
                 * the same.
                 */
-               printk(KERN_EMERG "%s block allocation failed for inode %lu "
-                                 "at logical offset %llu with max blocks "
-                                 "%zd with error %d\n",
-                                 __func__, mpd->inode->i_ino,
-                                 (unsigned long long)next,
-                                 mpd->b_size >> mpd->inode->i_blkbits, err);
-               printk(KERN_EMERG "This should not happen.!! "
-                                       "Data will be lost\n");
+               ext4_msg(mpd->inode->i_sb, KERN_CRIT,
+                        "delayed block allocation failed for inode %lu at "
+                        "logical offset %llu with max blocks %zd with "
+                        "error %d\n", mpd->inode->i_ino,
+                        (unsigned long long) next,
+                        mpd->b_size >> mpd->inode->i_blkbits, err);
+               printk(KERN_CRIT "This should not happen!!  "
+                      "Data will be lost\n");
                if (err == -ENOSPC) {
                        ext4_print_free_blocks(mpd->inode);
                }
-               /* invlaidate all the pages */
+               /* invalidate all the pages */
                ext4_da_block_invalidatepages(mpd, next,
                                mpd->b_size >> mpd->inode->i_blkbits);
                return err;
        }
-       BUG_ON(new.b_size == 0);
+       BUG_ON(blks == 0);
+
+       new.b_size = (blks << mpd->inode->i_blkbits);
 
        if (buffer_new(&new))
                __unmap_underlying_blocks(mpd->inode, &new);
@@ -2154,6 +2282,23 @@ static int mpage_da_map_blocks(struct mpage_da_data *mpd)
            (mpd->b_state & (1 << BH_Unwritten)))
                mpage_put_bnr_to_bhs(mpd, next, &new);
 
+       if (ext4_should_order_data(mpd->inode)) {
+               err = ext4_jbd2_file_inode(handle, mpd->inode);
+               if (err)
+                       return err;
+       }
+
+       /*
+        * Update on-disk size along with block allocation.
+        */
+       disksize = ((loff_t) next + blks) << mpd->inode->i_blkbits;
+       if (disksize > i_size_read(mpd->inode))
+               disksize = i_size_read(mpd->inode);
+       if (disksize > EXT4_I(mpd->inode)->i_disksize) {
+               ext4_update_i_disksize(mpd->inode, disksize);
+               return ext4_mark_inode_dirty(handle, mpd->inode);
+       }
+
        return 0;
 }
 
@@ -2228,15 +2373,9 @@ flush_it:
        return;
 }
 
-static int ext4_bh_unmapped_or_delay(handle_t *handle, struct buffer_head *bh)
+static int ext4_bh_delay_or_unwritten(handle_t *handle, struct buffer_head *bh)
 {
-       /*
-        * unmapped buffer is possible for holes.
-        * delay buffer is possible with delayed allocation.
-        * We also need to consider unwritten buffer as unmapped.
-        */
-       return (!buffer_mapped(bh) || buffer_delay(bh) ||
-                               buffer_unwritten(bh)) && buffer_dirty(bh);
+       return (buffer_delay(bh) || buffer_unwritten(bh)) && buffer_dirty(bh);
 }
 
 /*
@@ -2260,7 +2399,7 @@ static int __mpage_da_writepage(struct page *page,
                /*
                 * Rest of the page in the page_vec
                 * redirty then and skip then. We will
-                * try to to write them again after
+                * try to write them again after
                 * starting a new transaction
                 */
                redirty_page_for_writepage(wbc, page);
@@ -2321,9 +2460,9 @@ static int __mpage_da_writepage(struct page *page,
                         * We need to try to allocate
                         * unmapped blocks in the same page.
                         * Otherwise we won't make progress
-                        * with the page in ext4_da_writepage
+                        * with the page in ext4_writepage
                         */
-                       if (ext4_bh_unmapped_or_delay(NULL, bh)) {
+                       if (ext4_bh_delay_or_unwritten(NULL, bh)) {
                                mpage_add_bh_to_extent(mpd, logical,
                                                       bh->b_size,
                                                       bh->b_state);
@@ -2433,6 +2572,8 @@ static int noalloc_get_block_write(struct inode *inode, sector_t iblock,
        int ret = 0;
        unsigned max_blocks = bh_result->b_size >> inode->i_blkbits;
 
+       BUG_ON(bh_result->b_size != inode->i_sb->s_blocksize);
+
        /*
         * we don't want to do block allocation in writepage
         * so call get_block_wrap with create = 0
@@ -2445,42 +2586,127 @@ static int noalloc_get_block_write(struct inode *inode, sector_t iblock,
        return ret;
 }
 
-/*
- * This function can get called via...
- *   - ext4_da_writepages after taking page lock (have journal handle)
- *   - journal_submit_inode_data_buffers (no journal handle)
- *   - shrink_page_list via pdflush (no journal handle)
- *   - grab_page_cache when doing write_begin (have journal handle)
- */
-static int ext4_da_writepage(struct page *page,
-                               struct writeback_control *wbc)
+static int bget_one(handle_t *handle, struct buffer_head *bh)
 {
-       int ret = 0;
-       loff_t size;
-       unsigned int len;
-       struct buffer_head *page_bufs;
-       struct inode *inode = page->mapping->host;
+       get_bh(bh);
+       return 0;
+}
 
-       trace_mark(ext4_da_writepage,
-                  "dev %s ino %lu page_index %lu",
-                  inode->i_sb->s_id, inode->i_ino, page->index);
-       size = i_size_read(inode);
-       if (page->index == size >> PAGE_CACHE_SHIFT)
-               len = size & ~PAGE_CACHE_MASK;
-       else
-               len = PAGE_CACHE_SIZE;
+static int bput_one(handle_t *handle, struct buffer_head *bh)
+{
+       put_bh(bh);
+       return 0;
+}
 
-       if (page_has_buffers(page)) {
-               page_bufs = page_buffers(page);
-               if (walk_page_buffers(NULL, page_bufs, 0, len, NULL,
-                                       ext4_bh_unmapped_or_delay)) {
-                       /*
-                        * We don't want to do  block allocation
-                        * So redirty the page and return
-                        * We may reach here when we do a journal commit
-                        * via journal_submit_inode_data_buffers.
-                        * If we don't have mapping block we just ignore
-                        * them. We can also reach here via shrink_page_list
+static int __ext4_journalled_writepage(struct page *page,
+                                      struct writeback_control *wbc,
+                                      unsigned int len)
+{
+       struct address_space *mapping = page->mapping;
+       struct inode *inode = mapping->host;
+       struct buffer_head *page_bufs;
+       handle_t *handle = NULL;
+       int ret = 0;
+       int err;
+
+       page_bufs = page_buffers(page);
+       BUG_ON(!page_bufs);
+       walk_page_buffers(handle, page_bufs, 0, len, NULL, bget_one);
+       /* As soon as we unlock the page, it can go away, but we have
+        * references to buffers so we are safe */
+       unlock_page(page);
+
+       handle = ext4_journal_start(inode, ext4_writepage_trans_blocks(inode));
+       if (IS_ERR(handle)) {
+               ret = PTR_ERR(handle);
+               goto out;
+       }
+
+       ret = walk_page_buffers(handle, page_bufs, 0, len, NULL,
+                               do_journal_get_write_access);
+
+       err = walk_page_buffers(handle, page_bufs, 0, len, NULL,
+                               write_end_fn);
+       if (ret == 0)
+               ret = err;
+       err = ext4_journal_stop(handle);
+       if (!ret)
+               ret = err;
+
+       walk_page_buffers(handle, page_bufs, 0, len, NULL, bput_one);
+       EXT4_I(inode)->i_state |= EXT4_STATE_JDATA;
+out:
+       return ret;
+}
+
+/*
+ * Note that we don't need to start a transaction unless we're journaling data
+ * because we should have holes filled from ext4_page_mkwrite(). We even don't
+ * need to file the inode to the transaction's list in ordered mode because if
+ * we are writing back data added by write(), the inode is already there and if
+ * we are writing back data modified via mmap(), noone guarantees in which
+ * transaction the data will hit the disk. In case we are journaling data, we
+ * cannot start transaction directly because transaction start ranks above page
+ * lock so we have to do some magic.
+ *
+ * This function can get called via...
+ *   - ext4_da_writepages after taking page lock (have journal handle)
+ *   - journal_submit_inode_data_buffers (no journal handle)
+ *   - shrink_page_list via pdflush (no journal handle)
+ *   - grab_page_cache when doing write_begin (have journal handle)
+ *
+ * We don't do any block allocation in this function. If we have page with
+ * multiple blocks we need to write those buffer_heads that are mapped. This
+ * is important for mmaped based write. So if we do with blocksize 1K
+ * truncate(f, 1024);
+ * a = mmap(f, 0, 4096);
+ * a[0] = 'a';
+ * truncate(f, 4096);
+ * we have in the page first buffer_head mapped via page_mkwrite call back
+ * but other bufer_heads would be unmapped but dirty(dirty done via the
+ * do_wp_page). So writepage should write the first block. If we modify
+ * the mmap area beyond 1024 we will again get a page_fault and the
+ * page_mkwrite callback will do the block allocation and mark the
+ * buffer_heads mapped.
+ *
+ * We redirty the page if we have any buffer_heads that is either delay or
+ * unwritten in the page.
+ *
+ * We can get recursively called as show below.
+ *
+ *     ext4_writepage() -> kmalloc() -> __alloc_pages() -> page_launder() ->
+ *             ext4_writepage()
+ *
+ * But since we don't do any block allocation we should not deadlock.
+ * Page also have the dirty flag cleared so we don't get recurive page_lock.
+ */
+static int ext4_writepage(struct page *page,
+                         struct writeback_control *wbc)
+{
+       int ret = 0;
+       loff_t size;
+       unsigned int len;
+       struct buffer_head *page_bufs;
+       struct inode *inode = page->mapping->host;
+
+       trace_ext4_writepage(inode, page);
+       size = i_size_read(inode);
+       if (page->index == size >> PAGE_CACHE_SHIFT)
+               len = size & ~PAGE_CACHE_MASK;
+       else
+               len = PAGE_CACHE_SIZE;
+
+       if (page_has_buffers(page)) {
+               page_bufs = page_buffers(page);
+               if (walk_page_buffers(NULL, page_bufs, 0, len, NULL,
+                                       ext4_bh_delay_or_unwritten)) {
+                       /*
+                        * We don't want to do  block allocation
+                        * So redirty the page and return
+                        * We may reach here when we do a journal commit
+                        * via journal_submit_inode_data_buffers.
+                        * If we don't have mapping block we just ignore
+                        * them. We can also reach here via shrink_page_list
                         */
                        redirty_page_for_writepage(wbc, page);
                        unlock_page(page);
@@ -2500,13 +2726,13 @@ static int ext4_da_writepage(struct page *page,
                 * all are mapped and non delay. We don't want to
                 * do block allocation here.
                 */
-               ret = block_prepare_write(page, 0, PAGE_CACHE_SIZE,
+               ret = block_prepare_write(page, 0, len,
                                          noalloc_get_block_write);
                if (!ret) {
                        page_bufs = page_buffers(page);
                        /* check whether all are mapped and non delay */
                        if (walk_page_buffers(NULL, page_bufs, 0, len, NULL,
-                                               ext4_bh_unmapped_or_delay)) {
+                                               ext4_bh_delay_or_unwritten)) {
                                redirty_page_for_writepage(wbc, page);
                                unlock_page(page);
                                return 0;
@@ -2522,7 +2748,16 @@ static int ext4_da_writepage(struct page *page,
                        return 0;
                }
                /* now mark the buffer_heads as dirty and uptodate */
-               block_commit_write(page, 0, PAGE_CACHE_SIZE);
+               block_commit_write(page, 0, len);
+       }
+
+       if (PageChecked(page) && ext4_should_journal_data(inode)) {
+               /*
+                * It's mmapped pagecache.  Add buffers and journal it.  There
+                * doesn't seem much point in redirtying the page here.
+                */
+               ClearPageChecked(page);
+               return __ext4_journalled_writepage(page, wbc, len);
        }
 
        if (test_opt(inode->i_sb, NOBH) && ext4_should_writeback_data(inode))
@@ -2570,23 +2805,14 @@ static int ext4_da_writepages(struct address_space *mapping,
        int no_nrwrite_index_update;
        int pages_written = 0;
        long pages_skipped;
+       unsigned int max_pages;
        int range_cyclic, cycled = 1, io_done = 0;
-       int needed_blocks, ret = 0, nr_to_writebump = 0;
+       int needed_blocks, ret = 0;
+       long desired_nr_to_write, nr_to_writebump = 0;
+       loff_t range_start = wbc->range_start;
        struct ext4_sb_info *sbi = EXT4_SB(mapping->host->i_sb);
 
-       trace_mark(ext4_da_writepages,
-                  "dev %s ino %lu nr_t_write %ld "
-                  "pages_skipped %ld range_start %llu "
-                  "range_end %llu nonblocking %d "
-                  "for_kupdate %d for_reclaim %d "
-                  "for_writepages %d range_cyclic %d",
-                  inode->i_sb->s_id, inode->i_ino,
-                  wbc->nr_to_write, wbc->pages_skipped,
-                  (unsigned long long) wbc->range_start,
-                  (unsigned long long) wbc->range_end,
-                  wbc->nonblocking, wbc->for_kupdate,
-                  wbc->for_reclaim, wbc->for_writepages,
-                  wbc->range_cyclic);
+       trace_ext4_da_writepages(inode, wbc);
 
        /*
         * No pages to write? This is mainly a kludge to avoid starting
@@ -2600,25 +2826,15 @@ static int ext4_da_writepages(struct address_space *mapping,
         * If the filesystem has aborted, it is read-only, so return
         * right away instead of dumping stack traces later on that
         * will obscure the real source of the problem.  We test
-        * EXT4_MOUNT_ABORT instead of sb->s_flag's MS_RDONLY because
+        * EXT4_MF_FS_ABORTED instead of sb->s_flag's MS_RDONLY because
         * the latter could be true if the filesystem is mounted
         * read-only, and in that case, ext4_da_writepages should
         * *never* be called, so if that ever happens, we would want
         * the stack trace.
         */
-       if (unlikely(sbi->s_mount_opt & EXT4_MOUNT_ABORT))
+       if (unlikely(sbi->s_mount_flags & EXT4_MF_FS_ABORTED))
                return -EROFS;
 
-       /*
-        * Make sure nr_to_write is >= sbi->s_mb_stream_request
-        * This make sure small files blocks are allocated in
-        * single attempt. This ensure that small files
-        * get less fragmented.
-        */
-       if (wbc->nr_to_write < sbi->s_mb_stream_request) {
-               nr_to_writebump = sbi->s_mb_stream_request - wbc->nr_to_write;
-               wbc->nr_to_write = sbi->s_mb_stream_request;
-       }
        if (wbc->range_start == 0 && wbc->range_end == LLONG_MAX)
                range_whole = 1;
 
@@ -2633,6 +2849,36 @@ static int ext4_da_writepages(struct address_space *mapping,
        } else
                index = wbc->range_start >> PAGE_CACHE_SHIFT;
 
+       /*
+        * This works around two forms of stupidity.  The first is in
+        * the writeback code, which caps the maximum number of pages
+        * written to be 1024 pages.  This is wrong on multiple
+        * levels; different architectues have a different page size,
+        * which changes the maximum amount of data which gets
+        * written.  Secondly, 4 megabytes is way too small.  XFS
+        * forces this value to be 16 megabytes by multiplying
+        * nr_to_write parameter by four, and then relies on its
+        * allocator to allocate larger extents to make them
+        * contiguous.  Unfortunately this brings us to the second
+        * stupidity, which is that ext4's mballoc code only allocates
+        * at most 2048 blocks.  So we force contiguous writes up to
+        * the number of dirty blocks in the inode, or
+        * sbi->max_writeback_mb_bump whichever is smaller.
+        */
+       max_pages = sbi->s_max_writeback_mb_bump << (20 - PAGE_CACHE_SHIFT);
+       if (!range_cyclic && range_whole)
+               desired_nr_to_write = wbc->nr_to_write * 8;
+       else
+               desired_nr_to_write = ext4_num_dirty_pages(inode, index,
+                                                          max_pages);
+       if (desired_nr_to_write > max_pages)
+               desired_nr_to_write = max_pages;
+
+       if (wbc->nr_to_write < desired_nr_to_write) {
+               nr_to_writebump = desired_nr_to_write - wbc->nr_to_write;
+               wbc->nr_to_write = desired_nr_to_write;
+       }
+
        mpd.wbc = wbc;
        mpd.inode = mapping->host;
 
@@ -2660,10 +2906,9 @@ retry:
                handle = ext4_journal_start(inode, needed_blocks);
                if (IS_ERR(handle)) {
                        ret = PTR_ERR(handle);
-                       printk(KERN_CRIT "%s: jbd2_start: "
+                       ext4_msg(inode->i_sb, KERN_CRIT, "%s: jbd2_start: "
                               "%ld pages, ino %lu; err %d\n", __func__,
                                wbc->nr_to_write, inode->i_ino, ret);
-                       dump_stack();
                        goto out_writepages;
                }
 
@@ -2687,7 +2932,7 @@ retry:
                ret = write_cache_pages(mapping, wbc, __mpage_da_writepage,
                                        &mpd);
                /*
-                * If we have a contigous extent of pages and we
+                * If we have a contiguous extent of pages and we
                 * haven't done the I/O yet, map the blocks and submit
                 * them for I/O.
                 */
@@ -2697,6 +2942,7 @@ retry:
                        mpd.io_done = 1;
                        ret = MPAGE_DA_EXTENT_TAIL;
                }
+               trace_ext4_da_write_pages(inode, &mpd);
                wbc->nr_to_write -= mpd.pages_written;
 
                ext4_journal_stop(handle);
@@ -2734,9 +2980,10 @@ retry:
                goto retry;
        }
        if (pages_skipped != wbc->pages_skipped)
-               printk(KERN_EMERG "This should not happen leaving %s "
-                               "with nr_to_write = %ld ret = %d\n",
-                               __func__, wbc->nr_to_write, ret);
+               ext4_msg(inode->i_sb, KERN_CRIT,
+                        "This should not happen leaving %s "
+                        "with nr_to_write = %ld ret = %d\n",
+                        __func__, wbc->nr_to_write, ret);
 
        /* Update index */
        index += pages_written;
@@ -2751,15 +2998,10 @@ retry:
 out_writepages:
        if (!no_nrwrite_index_update)
                wbc->no_nrwrite_index_update = 0;
-       wbc->nr_to_write -= nr_to_writebump;
-       trace_mark(ext4_da_writepage_result,
-                  "dev %s ino %lu ret %d pages_written %d "
-                  "pages_skipped %ld congestion %d "
-                  "more_io %d no_nrwrite_index_update %d",
-                  inode->i_sb->s_id, inode->i_ino, ret,
-                  pages_written, wbc->pages_skipped,
-                  wbc->encountered_congestion, wbc->more_io,
-                  wbc->no_nrwrite_index_update);
+       if (wbc->nr_to_write > nr_to_writebump)
+               wbc->nr_to_write -= nr_to_writebump;
+       wbc->range_start = range_start;
+       trace_ext4_da_writepages_result(inode, wbc, ret, pages_written);
        return ret;
 }
 
@@ -2791,8 +3033,8 @@ static int ext4_nonda_switch(struct super_block *sb)
 }
 
 static int ext4_da_write_begin(struct file *file, struct address_space *mapping,
-                               loff_t pos, unsigned len, unsigned flags,
-                               struct page **pagep, void **fsdata)
+                              loff_t pos, unsigned len, unsigned flags,
+                              struct page **pagep, void **fsdata)
 {
        int ret, retries = 0;
        struct page *page;
@@ -2811,11 +3053,7 @@ static int ext4_da_write_begin(struct file *file, struct address_space *mapping,
                                        len, flags, pagep, fsdata);
        }
        *fsdata = (void *)0;
-
-       trace_mark(ext4_da_write_begin,
-                  "dev %s ino %lu pos %llu len %u flags %u",
-                  inode->i_sb->s_id, inode->i_ino,
-                  (unsigned long long) pos, len, flags);
+       trace_ext4_da_write_begin(inode, pos, len, flags);
 retry:
        /*
         * With delayed allocation, we don't log the i_disksize update
@@ -2852,7 +3090,7 @@ retry:
                 * i_size_read because we hold i_mutex.
                 */
                if (pos + len > inode->i_size)
-                       vmtruncate(inode, inode->i_size);
+                       ext4_truncate(inode);
        }
 
        if (ret == -ENOSPC && ext4_should_retry_alloc(inode->i_sb, &retries))
@@ -2866,7 +3104,7 @@ out:
  * when write to the end of file but not require block allocation
  */
 static int ext4_da_should_update_i_disksize(struct page *page,
-                                        unsigned long offset)
+                                           unsigned long offset)
 {
        struct buffer_head *bh;
        struct inode *inode = page->mapping->host;
@@ -2885,9 +3123,9 @@ static int ext4_da_should_update_i_disksize(struct page *page,
 }
 
 static int ext4_da_write_end(struct file *file,
-                               struct address_space *mapping,
-                               loff_t pos, unsigned len, unsigned copied,
-                               struct page *page, void *fsdata)
+                            struct address_space *mapping,
+                            loff_t pos, unsigned len, unsigned copied,
+                            struct page *page, void *fsdata)
 {
        struct inode *inode = mapping->host;
        int ret = 0, ret2;
@@ -2908,10 +3146,7 @@ static int ext4_da_write_end(struct file *file,
                }
        }
 
-       trace_mark(ext4_da_write_end,
-                  "dev %s ino %lu pos %llu len %u copied %u",
-                  inode->i_sb->s_id, inode->i_ino,
-                  (unsigned long long) pos, len, copied);
+       trace_ext4_da_write_end(inode, pos, len, copied);
        start = pos & (PAGE_CACHE_SIZE - 1);
        end = start + copied - 1;
 
@@ -2978,6 +3213,8 @@ out:
  */
 int ext4_alloc_da_blocks(struct inode *inode)
 {
+       trace_ext4_alloc_da_blocks(inode);
+
        if (!EXT4_I(inode)->i_reserved_data_blocks &&
            !EXT4_I(inode)->i_reserved_meta_blocks)
                return 0;
@@ -2988,7 +3225,7 @@ int ext4_alloc_da_blocks(struct inode *inode)
         * not strictly speaking necessary (and for users of
         * laptop_mode, not even desirable).  However, to do otherwise
         * would require replicating code paths in:
-        * 
+        *
         * ext4_da_writepages() ->
         *    write_cache_pages() ---> (via passed in callback function)
         *        __mpage_da_writepage() -->
@@ -3008,7 +3245,7 @@ int ext4_alloc_da_blocks(struct inode *inode)
         * write out the pages, but rather only collect contiguous
         * logical block extents, call the multi-block allocator, and
         * then update the buffer heads with the block allocations.
-        * 
+        *
         * For now, though, we'll cheat by calling filemap_flush(),
         * which will map the blocks, and start the I/O, but not
         * actually wait for the I/O to complete.
@@ -3078,226 +3315,6 @@ static sector_t ext4_bmap(struct address_space *mapping, sector_t block)
        return generic_block_bmap(mapping, block, ext4_get_block);
 }
 
-static int bget_one(handle_t *handle, struct buffer_head *bh)
-{
-       get_bh(bh);
-       return 0;
-}
-
-static int bput_one(handle_t *handle, struct buffer_head *bh)
-{
-       put_bh(bh);
-       return 0;
-}
-
-/*
- * Note that we don't need to start a transaction unless we're journaling data
- * because we should have holes filled from ext4_page_mkwrite(). We even don't
- * need to file the inode to the transaction's list in ordered mode because if
- * we are writing back data added by write(), the inode is already there and if
- * we are writing back data modified via mmap(), noone guarantees in which
- * transaction the data will hit the disk. In case we are journaling data, we
- * cannot start transaction directly because transaction start ranks above page
- * lock so we have to do some magic.
- *
- * In all journaling modes block_write_full_page() will start the I/O.
- *
- * Problem:
- *
- *     ext4_writepage() -> kmalloc() -> __alloc_pages() -> page_launder() ->
- *             ext4_writepage()
- *
- * Similar for:
- *
- *     ext4_file_write() -> generic_file_write() -> __alloc_pages() -> ...
- *
- * Same applies to ext4_get_block().  We will deadlock on various things like
- * lock_journal and i_data_sem
- *
- * Setting PF_MEMALLOC here doesn't work - too many internal memory
- * allocations fail.
- *
- * 16May01: If we're reentered then journal_current_handle() will be
- *         non-zero. We simply *return*.
- *
- * 1 July 2001: @@@ FIXME:
- *   In journalled data mode, a data buffer may be metadata against the
- *   current transaction.  But the same file is part of a shared mapping
- *   and someone does a writepage() on it.
- *
- *   We will move the buffer onto the async_data list, but *after* it has
- *   been dirtied. So there's a small window where we have dirty data on
- *   BJ_Metadata.
- *
- *   Note that this only applies to the last partial page in the file.  The
- *   bit which block_write_full_page() uses prepare/commit for.  (That's
- *   broken code anyway: it's wrong for msync()).
- *
- *   It's a rare case: affects the final partial page, for journalled data
- *   where the file is subject to bith write() and writepage() in the same
- *   transction.  To fix it we'll need a custom block_write_full_page().
- *   We'll probably need that anyway for journalling writepage() output.
- *
- * We don't honour synchronous mounts for writepage().  That would be
- * disastrous.  Any write() or metadata operation will sync the fs for
- * us.
- *
- */
-static int __ext4_normal_writepage(struct page *page,
-                               struct writeback_control *wbc)
-{
-       struct inode *inode = page->mapping->host;
-
-       if (test_opt(inode->i_sb, NOBH))
-               return nobh_writepage(page, noalloc_get_block_write, wbc);
-       else
-               return block_write_full_page(page, noalloc_get_block_write,
-                                            wbc);
-}
-
-static int ext4_normal_writepage(struct page *page,
-                               struct writeback_control *wbc)
-{
-       struct inode *inode = page->mapping->host;
-       loff_t size = i_size_read(inode);
-       loff_t len;
-
-       trace_mark(ext4_normal_writepage,
-                  "dev %s ino %lu page_index %lu",
-                  inode->i_sb->s_id, inode->i_ino, page->index);
-       J_ASSERT(PageLocked(page));
-       if (page->index == size >> PAGE_CACHE_SHIFT)
-               len = size & ~PAGE_CACHE_MASK;
-       else
-               len = PAGE_CACHE_SIZE;
-
-       if (page_has_buffers(page)) {
-               /* if page has buffers it should all be mapped
-                * and allocated. If there are not buffers attached
-                * to the page we know the page is dirty but it lost
-                * buffers. That means that at some moment in time
-                * after write_begin() / write_end() has been called
-                * all buffers have been clean and thus they must have been
-                * written at least once. So they are all mapped and we can
-                * happily proceed with mapping them and writing the page.
-                */
-               BUG_ON(walk_page_buffers(NULL, page_buffers(page), 0, len, NULL,
-                                       ext4_bh_unmapped_or_delay));
-       }
-
-       if (!ext4_journal_current_handle())
-               return __ext4_normal_writepage(page, wbc);
-
-       redirty_page_for_writepage(wbc, page);
-       unlock_page(page);
-       return 0;
-}
-
-static int __ext4_journalled_writepage(struct page *page,
-                               struct writeback_control *wbc)
-{
-       struct address_space *mapping = page->mapping;
-       struct inode *inode = mapping->host;
-       struct buffer_head *page_bufs;
-       handle_t *handle = NULL;
-       int ret = 0;
-       int err;
-
-       ret = block_prepare_write(page, 0, PAGE_CACHE_SIZE,
-                                 noalloc_get_block_write);
-       if (ret != 0)
-               goto out_unlock;
-
-       page_bufs = page_buffers(page);
-       walk_page_buffers(handle, page_bufs, 0, PAGE_CACHE_SIZE, NULL,
-                                                               bget_one);
-       /* As soon as we unlock the page, it can go away, but we have
-        * references to buffers so we are safe */
-       unlock_page(page);
-
-       handle = ext4_journal_start(inode, ext4_writepage_trans_blocks(inode));
-       if (IS_ERR(handle)) {
-               ret = PTR_ERR(handle);
-               goto out;
-       }
-
-       ret = walk_page_buffers(handle, page_bufs, 0,
-                       PAGE_CACHE_SIZE, NULL, do_journal_get_write_access);
-
-       err = walk_page_buffers(handle, page_bufs, 0,
-                               PAGE_CACHE_SIZE, NULL, write_end_fn);
-       if (ret == 0)
-               ret = err;
-       err = ext4_journal_stop(handle);
-       if (!ret)
-               ret = err;
-
-       walk_page_buffers(handle, page_bufs, 0,
-                               PAGE_CACHE_SIZE, NULL, bput_one);
-       EXT4_I(inode)->i_state |= EXT4_STATE_JDATA;
-       goto out;
-
-out_unlock:
-       unlock_page(page);
-out:
-       return ret;
-}
-
-static int ext4_journalled_writepage(struct page *page,
-                               struct writeback_control *wbc)
-{
-       struct inode *inode = page->mapping->host;
-       loff_t size = i_size_read(inode);
-       loff_t len;
-
-       trace_mark(ext4_journalled_writepage,
-                  "dev %s ino %lu page_index %lu",
-                  inode->i_sb->s_id, inode->i_ino, page->index);
-       J_ASSERT(PageLocked(page));
-       if (page->index == size >> PAGE_CACHE_SHIFT)
-               len = size & ~PAGE_CACHE_MASK;
-       else
-               len = PAGE_CACHE_SIZE;
-
-       if (page_has_buffers(page)) {
-               /* if page has buffers it should all be mapped
-                * and allocated. If there are not buffers attached
-                * to the page we know the page is dirty but it lost
-                * buffers. That means that at some moment in time
-                * after write_begin() / write_end() has been called
-                * all buffers have been clean and thus they must have been
-                * written at least once. So they are all mapped and we can
-                * happily proceed with mapping them and writing the page.
-                */
-               BUG_ON(walk_page_buffers(NULL, page_buffers(page), 0, len, NULL,
-                                       ext4_bh_unmapped_or_delay));
-       }
-
-       if (ext4_journal_current_handle())
-               goto no_write;
-
-       if (PageChecked(page)) {
-               /*
-                * It's mmapped pagecache.  Add buffers and journal it.  There
-                * doesn't seem much point in redirtying the page here.
-                */
-               ClearPageChecked(page);
-               return __ext4_journalled_writepage(page, wbc);
-       } else {
-               /*
-                * It may be a page full of checkpoint-mode buffers.  We don't
-                * really know unless we go poke around in the buffer_heads.
-                * But block_write_full_page will do the right thing.
-                */
-               return block_write_full_page(page, noalloc_get_block_write,
-                                            wbc);
-       }
-no_write:
-       redirty_page_for_writepage(wbc, page);
-       unlock_page(page);
-       return 0;
-}
-
 static int ext4_readpage(struct file *file, struct page *page)
 {
        return mpage_readpage(page, ext4_get_block);
@@ -3340,6 +3357,8 @@ static int ext4_releasepage(struct page *page, gfp_t wait)
 }
 
 /*
+ * O_DIRECT for ext3 (or indirect map) based files
+ *
  * If the O_DIRECT write will extend the file then add this inode to the
  * orphan list.  So recovery will truncate it back to the original size
  * if the machine crashes during the write.
@@ -3348,9 +3367,9 @@ static int ext4_releasepage(struct page *page, gfp_t wait)
  * crashes then stale disk data _may_ be exposed inside the file. But current
  * VFS code falls back into buffered path in that case so we are safe.
  */
-static ssize_t ext4_direct_IO(int rw, struct kiocb *iocb,
-                       const struct iovec *iov, loff_t offset,
-                       unsigned long nr_segs)
+static ssize_t ext4_ind_direct_IO(int rw, struct kiocb *iocb,
+                             const struct iovec *iov, loff_t offset,
+                             unsigned long nr_segs)
 {
        struct file *file = iocb->ki_filp;
        struct inode *inode = file->f_mapping->host;
@@ -3359,6 +3378,7 @@ static ssize_t ext4_direct_IO(int rw, struct kiocb *iocb,
        ssize_t ret;
        int orphan = 0;
        size_t count = iov_length(iov, nr_segs);
+       int retries = 0;
 
        if (rw == WRITE) {
                loff_t final_size = offset + count;
@@ -3381,9 +3401,12 @@ static ssize_t ext4_direct_IO(int rw, struct kiocb *iocb,
                }
        }
 
+retry:
        ret = blockdev_direct_IO(rw, iocb, inode, inode->i_sb->s_bdev, iov,
                                 offset, nr_segs,
                                 ext4_get_block, NULL);
+       if (ret == -ENOSPC && ext4_should_retry_alloc(inode->i_sb, &retries))
+               goto retry;
 
        if (orphan) {
                int err;
@@ -3422,6 +3445,359 @@ out:
        return ret;
 }
 
+/* Maximum number of blocks we map for direct IO at once. */
+
+static int ext4_get_block_dio_write(struct inode *inode, sector_t iblock,
+                  struct buffer_head *bh_result, int create)
+{
+       handle_t *handle = NULL;
+       int ret = 0;
+       unsigned max_blocks = bh_result->b_size >> inode->i_blkbits;
+       int dio_credits;
+
+       ext4_debug("ext4_get_block_dio_write: inode %lu, create flag %d\n",
+                  inode->i_ino, create);
+       /*
+        * DIO VFS code passes create = 0 flag for write to
+        * the middle of file. It does this to avoid block
+        * allocation for holes, to prevent expose stale data
+        * out when there is parallel buffered read (which does
+        * not hold the i_mutex lock) while direct IO write has
+        * not completed. DIO request on holes finally falls back
+        * to buffered IO for this reason.
+        *
+        * For ext4 extent based file, since we support fallocate,
+        * new allocated extent as uninitialized, for holes, we
+        * could fallocate blocks for holes, thus parallel
+        * buffered IO read will zero out the page when read on
+        * a hole while parallel DIO write to the hole has not completed.
+        *
+        * when we come here, we know it's a direct IO write to
+        * to the middle of file (<i_size)
+        * so it's safe to override the create flag from VFS.
+        */
+       create = EXT4_GET_BLOCKS_DIO_CREATE_EXT;
+
+       if (max_blocks > DIO_MAX_BLOCKS)
+               max_blocks = DIO_MAX_BLOCKS;
+       dio_credits = ext4_chunk_trans_blocks(inode, max_blocks);
+       handle = ext4_journal_start(inode, dio_credits);
+       if (IS_ERR(handle)) {
+               ret = PTR_ERR(handle);
+               goto out;
+       }
+       ret = ext4_get_blocks(handle, inode, iblock, max_blocks, bh_result,
+                             create);
+       if (ret > 0) {
+               bh_result->b_size = (ret << inode->i_blkbits);
+               ret = 0;
+       }
+       ext4_journal_stop(handle);
+out:
+       return ret;
+}
+
+static void ext4_free_io_end(ext4_io_end_t *io)
+{
+       BUG_ON(!io);
+       iput(io->inode);
+       kfree(io);
+}
+static void dump_aio_dio_list(struct inode * inode)
+{
+#ifdef EXT4_DEBUG
+       struct list_head *cur, *before, *after;
+       ext4_io_end_t *io, *io0, *io1;
+
+       if (list_empty(&EXT4_I(inode)->i_aio_dio_complete_list)){
+               ext4_debug("inode %lu aio dio list is empty\n", inode->i_ino);
+               return;
+       }
+
+       ext4_debug("Dump inode %lu aio_dio_completed_IO list \n", inode->i_ino);
+       list_for_each_entry(io, &EXT4_I(inode)->i_aio_dio_complete_list, list){
+               cur = &io->list;
+               before = cur->prev;
+               io0 = container_of(before, ext4_io_end_t, list);
+               after = cur->next;
+               io1 = container_of(after, ext4_io_end_t, list);
+
+               ext4_debug("io 0x%p from inode %lu,prev 0x%p,next 0x%p\n",
+                           io, inode->i_ino, io0, io1);
+       }
+#endif
+}
+
+/*
+ * check a range of space and convert unwritten extents to written.
+ */
+static int ext4_end_aio_dio_nolock(ext4_io_end_t *io)
+{
+       struct inode *inode = io->inode;
+       loff_t offset = io->offset;
+       size_t size = io->size;
+       int ret = 0;
+
+       ext4_debug("end_aio_dio_onlock: io 0x%p from inode %lu,list->next 0x%p,"
+                  "list->prev 0x%p\n",
+                  io, inode->i_ino, io->list.next, io->list.prev);
+
+       if (list_empty(&io->list))
+               return ret;
+
+       if (io->flag != DIO_AIO_UNWRITTEN)
+               return ret;
+
+       if (offset + size <= i_size_read(inode))
+               ret = ext4_convert_unwritten_extents(inode, offset, size);
+
+       if (ret < 0) {
+               printk(KERN_EMERG "%s: failed to convert unwritten"
+                       "extents to written extents, error is %d"
+                       " io is still on inode %lu aio dio list\n",
+                       __func__, ret, inode->i_ino);
+               return ret;
+       }
+
+       /* clear the DIO AIO unwritten flag */
+       io->flag = 0;
+       return ret;
+}
+/*
+ * work on completed aio dio IO, to convert unwritten extents to extents
+ */
+static void ext4_end_aio_dio_work(struct work_struct *work)
+{
+       ext4_io_end_t *io  = container_of(work, ext4_io_end_t, work);
+       struct inode *inode = io->inode;
+       int ret = 0;
+
+       mutex_lock(&inode->i_mutex);
+       ret = ext4_end_aio_dio_nolock(io);
+       if (ret >= 0) {
+               if (!list_empty(&io->list))
+                       list_del_init(&io->list);
+               ext4_free_io_end(io);
+       }
+       mutex_unlock(&inode->i_mutex);
+}
+/*
+ * This function is called from ext4_sync_file().
+ *
+ * When AIO DIO IO is completed, the work to convert unwritten
+ * extents to written is queued on workqueue but may not get immediately
+ * scheduled. When fsync is called, we need to ensure the
+ * conversion is complete before fsync returns.
+ * The inode keeps track of a list of completed AIO from DIO path
+ * that might needs to do the conversion. This function walks through
+ * the list and convert the related unwritten extents to written.
+ */
+int flush_aio_dio_completed_IO(struct inode *inode)
+{
+       ext4_io_end_t *io;
+       int ret = 0;
+       int ret2 = 0;
+
+       if (list_empty(&EXT4_I(inode)->i_aio_dio_complete_list))
+               return ret;
+
+       dump_aio_dio_list(inode);
+       while (!list_empty(&EXT4_I(inode)->i_aio_dio_complete_list)){
+               io = list_entry(EXT4_I(inode)->i_aio_dio_complete_list.next,
+                               ext4_io_end_t, list);
+               /*
+                * Calling ext4_end_aio_dio_nolock() to convert completed
+                * IO to written.
+                *
+                * When ext4_sync_file() is called, run_queue() may already
+                * about to flush the work corresponding to this io structure.
+                * It will be upset if it founds the io structure related
+                * to the work-to-be schedule is freed.
+                *
+                * Thus we need to keep the io structure still valid here after
+                * convertion finished. The io structure has a flag to
+                * avoid double converting from both fsync and background work
+                * queue work.
+                */
+               ret = ext4_end_aio_dio_nolock(io);
+               if (ret < 0)
+                       ret2 = ret;
+               else
+                       list_del_init(&io->list);
+       }
+       return (ret2 < 0) ? ret2 : 0;
+}
+
+static ext4_io_end_t *ext4_init_io_end (struct inode *inode)
+{
+       ext4_io_end_t *io = NULL;
+
+       io = kmalloc(sizeof(*io), GFP_NOFS);
+
+       if (io) {
+               igrab(inode);
+               io->inode = inode;
+               io->flag = 0;
+               io->offset = 0;
+               io->size = 0;
+               io->error = 0;
+               INIT_WORK(&io->work, ext4_end_aio_dio_work);
+               INIT_LIST_HEAD(&io->list);
+       }
+
+       return io;
+}
+
+static void ext4_end_io_dio(struct kiocb *iocb, loff_t offset,
+                           ssize_t size, void *private)
+{
+        ext4_io_end_t *io_end = iocb->private;
+       struct workqueue_struct *wq;
+
+       ext_debug("ext4_end_io_dio(): io_end 0x%p"
+                 "for inode %lu, iocb 0x%p, offset %llu, size %llu\n",
+                 iocb->private, io_end->inode->i_ino, iocb, offset,
+                 size);
+       /* if not async direct IO or dio with 0 bytes write, just return */
+       if (!io_end || !size)
+               return;
+
+       /* if not aio dio with unwritten extents, just free io and return */
+       if (io_end->flag != DIO_AIO_UNWRITTEN){
+               ext4_free_io_end(io_end);
+               iocb->private = NULL;
+               return;
+       }
+
+       io_end->offset = offset;
+       io_end->size = size;
+       wq = EXT4_SB(io_end->inode->i_sb)->dio_unwritten_wq;
+
+       /* queue the work to convert unwritten extents to written */
+       queue_work(wq, &io_end->work);
+
+       /* Add the io_end to per-inode completed aio dio list*/
+       list_add_tail(&io_end->list,
+                &EXT4_I(io_end->inode)->i_aio_dio_complete_list);
+       iocb->private = NULL;
+}
+/*
+ * For ext4 extent files, ext4 will do direct-io write to holes,
+ * preallocated extents, and those write extend the file, no need to
+ * fall back to buffered IO.
+ *
+ * For holes, we fallocate those blocks, mark them as unintialized
+ * If those blocks were preallocated, we mark sure they are splited, but
+ * still keep the range to write as unintialized.
+ *
+ * The unwrritten extents will be converted to written when DIO is completed.
+ * For async direct IO, since the IO may still pending when return, we
+ * set up an end_io call back function, which will do the convertion
+ * when async direct IO completed.
+ *
+ * If the O_DIRECT write will extend the file then add this inode to the
+ * orphan list.  So recovery will truncate it back to the original size
+ * if the machine crashes during the write.
+ *
+ */
+static ssize_t ext4_ext_direct_IO(int rw, struct kiocb *iocb,
+                             const struct iovec *iov, loff_t offset,
+                             unsigned long nr_segs)
+{
+       struct file *file = iocb->ki_filp;
+       struct inode *inode = file->f_mapping->host;
+       ssize_t ret;
+       size_t count = iov_length(iov, nr_segs);
+
+       loff_t final_size = offset + count;
+       if (rw == WRITE && final_size <= inode->i_size) {
+               /*
+                * We could direct write to holes and fallocate.
+                *
+                * Allocated blocks to fill the hole are marked as uninitialized
+                * to prevent paralel buffered read to expose the stale data
+                * before DIO complete the data IO.
+                *
+                * As to previously fallocated extents, ext4 get_block
+                * will just simply mark the buffer mapped but still
+                * keep the extents uninitialized.
+                *
+                * for non AIO case, we will convert those unwritten extents
+                * to written after return back from blockdev_direct_IO.
+                *
+                * for async DIO, the conversion needs to be defered when
+                * the IO is completed. The ext4 end_io callback function
+                * will be called to take care of the conversion work.
+                * Here for async case, we allocate an io_end structure to
+                * hook to the iocb.
+                */
+               iocb->private = NULL;
+               EXT4_I(inode)->cur_aio_dio = NULL;
+               if (!is_sync_kiocb(iocb)) {
+                       iocb->private = ext4_init_io_end(inode);
+                       if (!iocb->private)
+                               return -ENOMEM;
+                       /*
+                        * we save the io structure for current async
+                        * direct IO, so that later ext4_get_blocks()
+                        * could flag the io structure whether there
+                        * is a unwritten extents needs to be converted
+                        * when IO is completed.
+                        */
+                       EXT4_I(inode)->cur_aio_dio = iocb->private;
+               }
+
+               ret = blockdev_direct_IO(rw, iocb, inode,
+                                        inode->i_sb->s_bdev, iov,
+                                        offset, nr_segs,
+                                        ext4_get_block_dio_write,
+                                        ext4_end_io_dio);
+               if (iocb->private)
+                       EXT4_I(inode)->cur_aio_dio = NULL;
+               /*
+                * The io_end structure takes a reference to the inode,
+                * that structure needs to be destroyed and the
+                * reference to the inode need to be dropped, when IO is
+                * complete, even with 0 byte write, or failed.
+                *
+                * In the successful AIO DIO case, the io_end structure will be
+                * desctroyed and the reference to the inode will be dropped
+                * after the end_io call back function is called.
+                *
+                * In the case there is 0 byte write, or error case, since
+                * VFS direct IO won't invoke the end_io call back function,
+                * we need to free the end_io structure here.
+                */
+               if (ret != -EIOCBQUEUED && ret <= 0 && iocb->private) {
+                       ext4_free_io_end(iocb->private);
+                       iocb->private = NULL;
+               } else if (ret > 0)
+                       /*
+                        * for non AIO case, since the IO is already
+                        * completed, we could do the convertion right here
+                        */
+                       ret = ext4_convert_unwritten_extents(inode,
+                                                               offset, ret);
+               return ret;
+       }
+
+       /* for write the the end of file case, we fall back to old way */
+       return ext4_ind_direct_IO(rw, iocb, iov, offset, nr_segs);
+}
+
+static ssize_t ext4_direct_IO(int rw, struct kiocb *iocb,
+                             const struct iovec *iov, loff_t offset,
+                             unsigned long nr_segs)
+{
+       struct file *file = iocb->ki_filp;
+       struct inode *inode = file->f_mapping->host;
+
+       if (EXT4_I(inode)->i_flags & EXT4_EXTENTS_FL)
+               return ext4_ext_direct_IO(rw, iocb, iov, offset, nr_segs);
+
+       return ext4_ind_direct_IO(rw, iocb, iov, offset, nr_segs);
+}
+
 /*
  * Pages can be marked dirty completely asynchronously from ext4's journalling
  * activity.  By filemap_sync_pte(), try_to_unmap_one(), etc.  We cannot do
@@ -3444,7 +3820,7 @@ static int ext4_journalled_set_page_dirty(struct page *page)
 static const struct address_space_operations ext4_ordered_aops = {
        .readpage               = ext4_readpage,
        .readpages              = ext4_readpages,
-       .writepage              = ext4_normal_writepage,
+       .writepage              = ext4_writepage,
        .sync_page              = block_sync_page,
        .write_begin            = ext4_write_begin,
        .write_end              = ext4_ordered_write_end,
@@ -3454,12 +3830,13 @@ static const struct address_space_operations ext4_ordered_aops = {
        .direct_IO              = ext4_direct_IO,
        .migratepage            = buffer_migrate_page,
        .is_partially_uptodate  = block_is_partially_uptodate,
+       .error_remove_page      = generic_error_remove_page,
 };
 
 static const struct address_space_operations ext4_writeback_aops = {
        .readpage               = ext4_readpage,
        .readpages              = ext4_readpages,
-       .writepage              = ext4_normal_writepage,
+       .writepage              = ext4_writepage,
        .sync_page              = block_sync_page,
        .write_begin            = ext4_write_begin,
        .write_end              = ext4_writeback_write_end,
@@ -3469,12 +3846,13 @@ static const struct address_space_operations ext4_writeback_aops = {
        .direct_IO              = ext4_direct_IO,
        .migratepage            = buffer_migrate_page,
        .is_partially_uptodate  = block_is_partially_uptodate,
+       .error_remove_page      = generic_error_remove_page,
 };
 
 static const struct address_space_operations ext4_journalled_aops = {
        .readpage               = ext4_readpage,
        .readpages              = ext4_readpages,
-       .writepage              = ext4_journalled_writepage,
+       .writepage              = ext4_writepage,
        .sync_page              = block_sync_page,
        .write_begin            = ext4_write_begin,
        .write_end              = ext4_journalled_write_end,
@@ -3483,12 +3861,13 @@ static const struct address_space_operations ext4_journalled_aops = {
        .invalidatepage         = ext4_invalidatepage,
        .releasepage            = ext4_releasepage,
        .is_partially_uptodate  = block_is_partially_uptodate,
+       .error_remove_page      = generic_error_remove_page,
 };
 
 static const struct address_space_operations ext4_da_aops = {
        .readpage               = ext4_readpage,
        .readpages              = ext4_readpages,
-       .writepage              = ext4_da_writepage,
+       .writepage              = ext4_writepage,
        .writepages             = ext4_da_writepages,
        .sync_page              = block_sync_page,
        .write_begin            = ext4_da_write_begin,
@@ -3499,6 +3878,7 @@ static const struct address_space_operations ext4_da_aops = {
        .direct_IO              = ext4_direct_IO,
        .migratepage            = buffer_migrate_page,
        .is_partially_uptodate  = block_is_partially_uptodate,
+       .error_remove_page      = generic_error_remove_page,
 };
 
 void ext4_set_aops(struct inode *inode)
@@ -3535,7 +3915,8 @@ int ext4_block_truncate_page(handle_t *handle,
        struct page *page;
        int err = 0;
 
-       page = grab_cache_page(mapping, from >> PAGE_CACHE_SHIFT);
+       page = find_or_create_page(mapping, from >> PAGE_CACHE_SHIFT,
+                                  mapping_gfp_mask(mapping) & ~__GFP_FS);
        if (!page)
                return -EINVAL;
 
@@ -3670,13 +4051,14 @@ static inline int all_zeroes(__le32 *p, __le32 *q)
  *                     (no partially truncated stuff there).  */
 
 static Indirect *ext4_find_shared(struct inode *inode, int depth,
-                       ext4_lblk_t offsets[4], Indirect chain[4], __le32 *top)
+                                 ext4_lblk_t offsets[4], Indirect chain[4],
+                                 __le32 *top)
 {
        Indirect *partial, *p;
        int k, err;
 
        *top = 0;
-       /* Make k index the deepest non-null offest + 1 */
+       /* Make k index the deepest non-null offset + 1 */
        for (k = depth; k > 1 && !offsets[k-1]; k--)
                ;
        partial = ext4_get_branch(inode, k, offsets, chain, &err);
@@ -3726,8 +4108,10 @@ no_top:
  * than `count' because there can be holes in there.
  */
 static void ext4_clear_blocks(handle_t *handle, struct inode *inode,
-               struct buffer_head *bh, ext4_fsblk_t block_to_free,
-               unsigned long count, __le32 *first, __le32 *last)
+                             struct buffer_head *bh,
+                             ext4_fsblk_t block_to_free,
+                             unsigned long count, __le32 *first,
+                             __le32 *last)
 {
        __le32 *p;
        if (try_to_extend_transaction(handle, inode)) {
@@ -3736,7 +4120,8 @@ static void ext4_clear_blocks(handle_t *handle, struct inode *inode,
                        ext4_handle_dirty_metadata(handle, inode, bh);
                }
                ext4_mark_inode_dirty(handle, inode);
-               ext4_journal_test_restart(handle, inode);
+               ext4_truncate_restart_trans(handle, inode,
+                                           blocks_for_truncate(inode));
                if (bh) {
                        BUFFER_TRACE(bh, "retaking write access");
                        ext4_journal_get_write_access(handle, bh);
@@ -3744,10 +4129,11 @@ static void ext4_clear_blocks(handle_t *handle, struct inode *inode,
        }
 
        /*
-        * Any buffers which are on the journal will be in memory. We find
-        * them on the hash table so jbd2_journal_revoke() will run jbd2_journal_forget()
-        * on them.  We've already detached each block from the file, so
-        * bforget() in jbd2_journal_forget() should be safe.
+        * Any buffers which are on the journal will be in memory. We
+        * find them on the hash table so jbd2_journal_revoke() will
+        * run jbd2_journal_forget() on them.  We've already detached
+        * each block from the file, so bforget() in
+        * jbd2_journal_forget() should be safe.
         *
         * AKPM: turn on bforget in jbd2_journal_forget()!!!
         */
@@ -3946,7 +4332,8 @@ static void ext4_free_branches(handle_t *handle, struct inode *inode,
                                return;
                        if (try_to_extend_transaction(handle, inode)) {
                                ext4_mark_inode_dirty(handle, inode);
-                               ext4_journal_test_restart(handle, inode);
+                               ext4_truncate_restart_trans(handle, inode,
+                                           blocks_for_truncate(inode));
                        }
 
                        ext4_free_blocks(handle, inode, nr, 1, 1);
@@ -4118,7 +4505,7 @@ void ext4_truncate(struct inode *inode)
                                   (__le32*)partial->bh->b_data+addr_per_block,
                                   (chain+n-1) - partial);
                BUFFER_TRACE(partial->bh, "call brelse");
-               brelse (partial->bh);
+               brelse(partial->bh);
                partial--;
        }
 do_indirects:
@@ -4359,8 +4746,9 @@ void ext4_get_inode_flags(struct ext4_inode_info *ei)
        if (flags & S_DIRSYNC)
                ei->i_flags |= EXT4_DIRSYNC_FL;
 }
+
 static blkcnt_t ext4_inode_blocks(struct ext4_inode *raw_inode,
-                                       struct ext4_inode_info *ei)
+                                 struct ext4_inode_info *ei)
 {
        blkcnt_t i_blocks ;
        struct inode *inode = &(ei->vfs_inode);
@@ -4399,10 +4787,6 @@ struct inode *ext4_iget(struct super_block *sb, unsigned long ino)
                return inode;
 
        ei = EXT4_I(inode);
-#ifdef CONFIG_EXT4_FS_POSIX_ACL
-       ei->i_acl = EXT4_ACL_NOT_CACHED;
-       ei->i_default_acl = EXT4_ACL_NOT_CACHED;
-#endif
 
        ret = __ext4_get_inode_loc(inode, &iloc, 0);
        if (ret < 0)
@@ -4475,7 +4859,7 @@ struct inode *ext4_iget(struct super_block *sb, unsigned long ino)
                                        EXT4_GOOD_OLD_INODE_SIZE +
                                        ei->i_extra_isize;
                        if (*magic == cpu_to_le32(EXT4_XATTR_MAGIC))
-                                ei->i_state |= EXT4_STATE_XATTR;
+                               ei->i_state |= EXT4_STATE_XATTR;
                }
        } else
                ei->i_extra_isize = 0;
@@ -4494,7 +4878,7 @@ struct inode *ext4_iget(struct super_block *sb, unsigned long ino)
 
        ret = 0;
        if (ei->i_file_acl &&
-           ((ei->i_file_acl < 
+           ((ei->i_file_acl <
              (le32_to_cpu(EXT4_SB(sb)->s_es->s_first_data_block) +
               EXT4_SB(sb)->s_gdb_count)) ||
             (ei->i_file_acl >= ext4_blocks_count(EXT4_SB(sb)->s_es)))) {
@@ -4509,15 +4893,15 @@ struct inode *ext4_iget(struct super_block *sb, unsigned long ino)
                     !ext4_inode_is_fast_symlink(inode)))
                        /* Validate extent which is part of inode */
                        ret = ext4_ext_check_inode(inode);
-       } else if (S_ISREG(inode->i_mode) || S_ISDIR(inode->i_mode) ||
+       } else if (S_ISREG(inode->i_mode) || S_ISDIR(inode->i_mode) ||
                   (S_ISLNK(inode->i_mode) &&
                    !ext4_inode_is_fast_symlink(inode))) {
-               /* Validate block references which are part of inode */
+               /* Validate block references which are part of inode */
                ret = ext4_check_inode_blockref(inode);
        }
        if (ret) {
-               brelse(bh);
-               goto bad_inode;
+               brelse(bh);
+               goto bad_inode;
        }
 
        if (S_ISREG(inode->i_mode)) {
@@ -4548,7 +4932,7 @@ struct inode *ext4_iget(struct super_block *sb, unsigned long ino)
        } else {
                brelse(bh);
                ret = -EIO;
-               ext4_error(inode->i_sb, __func__, 
+               ext4_error(inode->i_sb, __func__,
                           "bogus i_mode (%o) for inode=%lu",
                           inode->i_mode, inode->i_ino);
                goto bad_inode;
@@ -4659,8 +5043,7 @@ static int ext4_do_update_inode(handle_t *handle,
        if (ext4_inode_blocks_set(handle, raw_inode, ei))
                goto out_brelse;
        raw_inode->i_dtime = cpu_to_le32(ei->i_dtime);
-       /* clear the migrate flag in the raw_inode */
-       raw_inode->i_flags = cpu_to_le32(ei->i_flags & ~EXT4_EXT_MIGRATE);
+       raw_inode->i_flags = cpu_to_le32(ei->i_flags);
        if (EXT4_SB(inode->i_sb)->s_es->s_creator_os !=
            cpu_to_le32(EXT4_OS_HURD))
                raw_inode->i_file_acl_high =
@@ -4701,8 +5084,9 @@ static int ext4_do_update_inode(handle_t *handle,
                                cpu_to_le32(new_encode_dev(inode->i_rdev));
                        raw_inode->i_block[2] = 0;
                }
-       } else for (block = 0; block < EXT4_N_BLOCKS; block++)
-               raw_inode->i_block[block] = ei->i_data[block];
+       } else
+               for (block = 0; block < EXT4_N_BLOCKS; block++)
+                       raw_inode->i_block[block] = ei->i_data[block];
 
        raw_inode->i_disk_version = cpu_to_le32(inode->i_version);
        if (ei->i_extra_isize) {
@@ -4761,34 +5145,36 @@ out_brelse:
  */
 int ext4_write_inode(struct inode *inode, int wait)
 {
+       int err;
+
        if (current->flags & PF_MEMALLOC)
                return 0;
 
-       if (ext4_journal_current_handle()) {
-               jbd_debug(1, "called recursively, non-PF_MEMALLOC!\n");
-               dump_stack();
-               return -EIO;
-       }
-
-       if (!wait)
-               return 0;
+       if (EXT4_SB(inode->i_sb)->s_journal) {
+               if (ext4_journal_current_handle()) {
+                       jbd_debug(1, "called recursively, non-PF_MEMALLOC!\n");
+                       dump_stack();
+                       return -EIO;
+               }
 
-       return ext4_force_commit(inode->i_sb);
-}
+               if (!wait)
+                       return 0;
 
-int __ext4_write_dirty_metadata(struct inode *inode, struct buffer_head *bh)
-{
-       int err = 0;
+               err = ext4_force_commit(inode->i_sb);
+       } else {
+               struct ext4_iloc iloc;
 
-       mark_buffer_dirty(bh);
-       if (inode && inode_needs_sync(inode)) {
-               sync_dirty_buffer(bh);
-               if (buffer_req(bh) && !buffer_uptodate(bh)) {
+               err = ext4_get_inode_loc(inode, &iloc);
+               if (err)
+                       return err;
+               if (wait)
+                       sync_dirty_buffer(iloc.bh);
+               if (buffer_req(iloc.bh) && !buffer_uptodate(iloc.bh)) {
                        ext4_error(inode->i_sb, __func__,
                                   "IO error syncing inode, "
                                   "inode=%lu, block=%llu",
                                   inode->i_ino,
-                                  (unsigned long long)bh->b_blocknr);
+                                  (unsigned long long)iloc.bh->b_blocknr);
                        err = -EIO;
                }
        }
@@ -4984,7 +5370,7 @@ static int ext4_index_trans_blocks(struct inode *inode, int nrblocks, int chunk)
  * worse case, the indexs blocks spread over different block groups
  *
  * If datablocks are discontiguous, they are possible to spread over
- * different block groups too. If they are contiugous, with flexbg,
+ * different block groups too. If they are contiuguous, with flexbg,
  * they could still across block group boundary.
  *
  * Also account for superblock, inode, quota and xattr blocks
@@ -5060,7 +5446,7 @@ int ext4_writepage_trans_blocks(struct inode *inode)
  * Calculate the journal credits for a chunk of data modification.
  *
  * This is called from DIO, fallocate or whoever calling
- * ext4_get_blocks() to map/allocate a chunk of contigous disk blocks.
+ * ext4_get_blocks() to map/allocate a chunk of contiguous disk blocks.
  *
  * journal buffers for data blocks are not included here, as DIO
  * and fallocate do no need to journal data buffers.
@@ -5075,7 +5461,7 @@ int ext4_chunk_trans_blocks(struct inode *inode, int nrblocks)
  * Give this, we know that the caller already has write access to iloc->bh.
  */
 int ext4_mark_iloc_dirty(handle_t *handle,
-               struct inode *inode, struct ext4_iloc *iloc)
+                        struct inode *inode, struct ext4_iloc *iloc)
 {
        int err = 0;
 
@@ -5230,27 +5616,14 @@ int ext4_mark_inode_dirty(handle_t *handle, struct inode *inode)
  */
 void ext4_dirty_inode(struct inode *inode)
 {
-       handle_t *current_handle = ext4_journal_current_handle();
        handle_t *handle;
 
-       if (!ext4_handle_valid(current_handle)) {
-               ext4_mark_inode_dirty(current_handle, inode);
-               return;
-       }
-
        handle = ext4_journal_start(inode, 2);
        if (IS_ERR(handle))
                goto out;
-       if (current_handle &&
-               current_handle->h_transaction != handle->h_transaction) {
-               /* This task has a transaction open against a different fs */
-               printk(KERN_EMERG "%s: transactions do not match!\n",
-                      __func__);
-       } else {
-               jbd_debug(5, "marking dirty.  outer handle=%p\n",
-                               current_handle);
-               ext4_mark_inode_dirty(handle, inode);
-       }
+
+       ext4_mark_inode_dirty(handle, inode);
+
        ext4_journal_stop(handle);
 out:
        return;
@@ -5377,12 +5750,21 @@ int ext4_page_mkwrite(struct vm_area_struct *vma, struct vm_fault *vmf)
        else
                len = PAGE_CACHE_SIZE;
 
+       lock_page(page);
+       /*
+        * return if we have all the buffers mapped. This avoid
+        * the need to call write_begin/write_end which does a
+        * journal_start/journal_stop which can block and take
+        * long time
+        */
        if (page_has_buffers(page)) {
-               /* return if we have all the buffers mapped */
                if (!walk_page_buffers(NULL, page_buffers(page), 0, len, NULL,
-                                      ext4_bh_unmapped))
+                                       ext4_bh_unmapped)) {
+                       unlock_page(page);
                        goto out_unlock;
+               }
        }
+       unlock_page(page);
        /*
         * OK, we need to fill the hole... Do write_begin write_end
         * to do block allocation/reservation.We are not holding