tree-wide: fix assorted typos all over the place
[safe/jmp/linux-2.6] / fs / ext4 / inode.c
index 71d3ecd..0282ec7 100644 (file)
 #include <linux/namei.h>
 #include <linux/uio.h>
 #include <linux/bio.h>
+#include <linux/workqueue.h>
+
 #include "ext4_jbd2.h"
 #include "xattr.h"
 #include "acl.h"
 #include "ext4_extents.h"
 
+#include <trace/events/ext4.h>
+
 #define MPAGE_DA_EXTENT_TAIL 0x01
 
 static inline int ext4_begin_ordered_truncate(struct inode *inode,
@@ -75,22 +79,20 @@ static int ext4_inode_is_fast_symlink(struct inode *inode)
  * but there may still be a record of it in the journal, and that record
  * still needs to be revoked.
  *
- * If the handle isn't valid we're not journaling so there's nothing to do.
+ * If the handle isn't valid we're not journaling, but we still need to
+ * call into ext4_journal_revoke() to put the buffer head.
  */
 int ext4_forget(handle_t *handle, int is_metadata, struct inode *inode,
-                       struct buffer_head *bh, ext4_fsblk_t blocknr)
+               struct buffer_head *bh, ext4_fsblk_t blocknr)
 {
        int err;
 
-       if (!ext4_handle_valid(handle))
-               return 0;
-
        might_sleep();
 
        BUFFER_TRACE(bh, "enter");
 
        jbd_debug(4, "forgetting bh %p: is_metadata = %d, mode %o, "
-                 "data mode %lx\n",
+                 "data mode %x\n",
                  bh, is_metadata, inode->i_mode,
                  test_opt(inode->i_sb, DATA_FLAGS));
 
@@ -191,11 +193,24 @@ static int try_to_extend_transaction(handle_t *handle, struct inode *inode)
  * so before we call here everything must be consistently dirtied against
  * this transaction.
  */
-static int ext4_journal_test_restart(handle_t *handle, struct inode *inode)
+ int ext4_truncate_restart_trans(handle_t *handle, struct inode *inode,
+                                int nblocks)
 {
+       int ret;
+
+       /*
+        * Drop i_data_sem to avoid deadlock with ext4_get_blocks At this
+        * moment, get_block can be called only for blocks inside i_size since
+        * page cache has been already dropped and writes are blocked by
+        * i_mutex. So we can safely drop the i_data_sem here.
+        */
        BUG_ON(EXT4_JOURNAL(inode) == NULL);
        jbd_debug(2, "restarting handle %p\n", handle);
-       return ext4_journal_restart(handle, blocks_for_truncate(inode));
+       up_write(&EXT4_I(inode)->i_data_sem);
+       ret = ext4_journal_restart(handle, blocks_for_truncate(inode));
+       down_write(&EXT4_I(inode)->i_data_sem);
+
+       return ret;
 }
 
 /*
@@ -329,8 +344,8 @@ static inline void add_chain(Indirect *p, struct buffer_head *bh, __le32 *v)
  */
 
 static int ext4_block_to_path(struct inode *inode,
-                       ext4_lblk_t i_block,
-                       ext4_lblk_t offsets[4], int *boundary)
+                             ext4_lblk_t i_block,
+                             ext4_lblk_t offsets[4], int *boundary)
 {
        int ptrs = EXT4_ADDR_PER_BLOCK(inode->i_sb);
        int ptrs_bits = EXT4_ADDR_PER_BLOCK_BITS(inode->i_sb);
@@ -340,9 +355,7 @@ static int ext4_block_to_path(struct inode *inode,
        int n = 0;
        int final = 0;
 
-       if (i_block < 0) {
-               ext4_warning(inode->i_sb, "ext4_block_to_path", "block < 0");
-       } else if (i_block < direct_blocks) {
+       if (i_block < direct_blocks) {
                offsets[n++] = i_block;
                final = direct_blocks;
        } else if ((i_block -= direct_blocks) < indirect_blocks) {
@@ -362,15 +375,44 @@ static int ext4_block_to_path(struct inode *inode,
                final = ptrs;
        } else {
                ext4_warning(inode->i_sb, "ext4_block_to_path",
-                               "block %lu > max in inode %lu",
-                               i_block + direct_blocks +
-                               indirect_blocks + double_blocks, inode->i_ino);
+                            "block %lu > max in inode %lu",
+                            i_block + direct_blocks +
+                            indirect_blocks + double_blocks, inode->i_ino);
        }
        if (boundary)
                *boundary = final - 1 - (i_block & (ptrs - 1));
        return n;
 }
 
+static int __ext4_check_blockref(const char *function, struct inode *inode,
+                                __le32 *p, unsigned int max)
+{
+       __le32 *bref = p;
+       unsigned int blk;
+
+       while (bref < p+max) {
+               blk = le32_to_cpu(*bref++);
+               if (blk &&
+                   unlikely(!ext4_data_block_valid(EXT4_SB(inode->i_sb),
+                                                   blk, 1))) {
+                       ext4_error(inode->i_sb, function,
+                                  "invalid block reference %u "
+                                  "in inode #%lu", blk, inode->i_ino);
+                       return -EIO;
+               }
+       }
+       return 0;
+}
+
+
+#define ext4_check_indirect_blockref(inode, bh)                         \
+       __ext4_check_blockref(__func__, inode, (__le32 *)(bh)->b_data,  \
+                             EXT4_ADDR_PER_BLOCK((inode)->i_sb))
+
+#define ext4_check_inode_blockref(inode)                                \
+       __ext4_check_blockref(__func__, inode, EXT4_I(inode)->i_data,   \
+                             EXT4_NDIR_BLOCKS)
+
 /**
  *     ext4_get_branch - read the chain of indirect blocks leading to data
  *     @inode: inode in question
@@ -415,9 +457,22 @@ static Indirect *ext4_get_branch(struct inode *inode, int depth,
        if (!p->key)
                goto no_block;
        while (--depth) {
-               bh = sb_bread(sb, le32_to_cpu(p->key));
-               if (!bh)
+               bh = sb_getblk(sb, le32_to_cpu(p->key));
+               if (unlikely(!bh))
                        goto failure;
+
+               if (!bh_uptodate_or_lock(bh)) {
+                       if (bh_submit_read(bh) < 0) {
+                               put_bh(bh);
+                               goto failure;
+                       }
+                       /* validate block references */
+                       if (ext4_check_indirect_blockref(inode, bh)) {
+                               put_bh(bh);
+                               goto failure;
+                       }
+               }
+
                add_chain(++p, bh, (__le32 *)bh->b_data + *++offsets);
                /* Reader: end */
                if (!p->key)
@@ -459,6 +514,8 @@ static ext4_fsblk_t ext4_find_near(struct inode *inode, Indirect *ind)
        ext4_fsblk_t bg_start;
        ext4_fsblk_t last_block;
        ext4_grpblk_t colour;
+       ext4_group_t block_group;
+       int flex_size = ext4_flex_bg_size(EXT4_SB(inode->i_sb));
 
        /* Try to find previous block */
        for (p = ind->p - 1; p >= start; p--) {
@@ -474,9 +531,22 @@ static ext4_fsblk_t ext4_find_near(struct inode *inode, Indirect *ind)
         * It is going to be referred to from the inode itself? OK, just put it
         * into the same cylinder group then.
         */
-       bg_start = ext4_group_first_block_no(inode->i_sb, ei->i_block_group);
+       block_group = ei->i_block_group;
+       if (flex_size >= EXT4_FLEX_SIZE_DIR_ALLOC_SCHEME) {
+               block_group &= ~(flex_size-1);
+               if (S_ISREG(inode->i_mode))
+                       block_group++;
+       }
+       bg_start = ext4_group_first_block_no(inode->i_sb, block_group);
        last_block = ext4_blocks_count(EXT4_SB(inode->i_sb)->s_es) - 1;
 
+       /*
+        * If we are doing delayed allocation, we don't need take
+        * colour into account.
+        */
+       if (test_opt(inode->i_sb, DELALLOC))
+               return bg_start;
+
        if (bg_start + EXT4_BLOCKS_PER_GROUP(inode->i_sb) <= last_block)
                colour = (current->pid % 16) *
                        (EXT4_BLOCKS_PER_GROUP(inode->i_sb) / 16);
@@ -493,15 +563,21 @@ static ext4_fsblk_t ext4_find_near(struct inode *inode, Indirect *ind)
  *
  *     Normally this function find the preferred place for block allocation,
  *     returns it.
+ *     Because this is only used for non-extent files, we limit the block nr
+ *     to 32 bits.
  */
 static ext4_fsblk_t ext4_find_goal(struct inode *inode, ext4_lblk_t block,
-               Indirect *partial)
+                                  Indirect *partial)
 {
+       ext4_fsblk_t goal;
+
        /*
         * XXX need to get goal block from mballoc's data structures
         */
 
-       return ext4_find_near(inode, partial);
+       goal = ext4_find_near(inode, partial);
+       goal = goal & EXT4_MAX_BLOCK_FILE_PHYS;
+       return goal;
 }
 
 /**
@@ -517,7 +593,7 @@ static ext4_fsblk_t ext4_find_goal(struct inode *inode, ext4_lblk_t block,
  *     direct and indirect blocks.
  */
 static int ext4_blks_to_allocate(Indirect *branch, int k, unsigned int blks,
-               int blocks_to_boundary)
+                                int blocks_to_boundary)
 {
        unsigned int count = 0;
 
@@ -553,9 +629,9 @@ static int ext4_blks_to_allocate(Indirect *branch, int k, unsigned int blks,
  *             direct blocks
  */
 static int ext4_alloc_blocks(handle_t *handle, struct inode *inode,
-                               ext4_lblk_t iblock, ext4_fsblk_t goal,
-                               int indirect_blks, int blks,
-                               ext4_fsblk_t new_blocks[4], int *err)
+                            ext4_lblk_t iblock, ext4_fsblk_t goal,
+                            int indirect_blks, int blks,
+                            ext4_fsblk_t new_blocks[4], int *err)
 {
        struct ext4_allocation_request ar;
        int target, i;
@@ -582,6 +658,8 @@ static int ext4_alloc_blocks(handle_t *handle, struct inode *inode,
                if (*err)
                        goto failed_out;
 
+               BUG_ON(current_block + count > EXT4_MAX_BLOCK_FILE_PHYS);
+
                target -= count;
                /* allocate blocks for indirect blocks */
                while (index < indirect_blks && count) {
@@ -616,6 +694,7 @@ static int ext4_alloc_blocks(handle_t *handle, struct inode *inode,
                ar.flags = EXT4_MB_HINT_DATA;
 
        current_block = ext4_mb_new_blocks(handle, &ar, err);
+       BUG_ON(current_block + ar.len > EXT4_MAX_BLOCK_FILE_PHYS);
 
        if (*err && (target == blks)) {
                /*
@@ -626,10 +705,10 @@ static int ext4_alloc_blocks(handle_t *handle, struct inode *inode,
        }
        if (!*err) {
                if (target == blks) {
-               /*
-                * save the new block number
-                * for the first direct block
-                */
+                       /*
+                        * save the new block number
+                        * for the first direct block
+                        */
                        new_blocks[index] = current_block;
                }
                blk_allocated += ar.len;
@@ -671,9 +750,9 @@ failed_out:
  *     as described above and return 0.
  */
 static int ext4_alloc_branch(handle_t *handle, struct inode *inode,
-                               ext4_lblk_t iblock, int indirect_blks,
-                               int *blks, ext4_fsblk_t goal,
-                               ext4_lblk_t *offsets, Indirect *branch)
+                            ext4_lblk_t iblock, int indirect_blks,
+                            int *blks, ext4_fsblk_t goal,
+                            ext4_lblk_t *offsets, Indirect *branch)
 {
        int blocksize = inode->i_sb->s_blocksize;
        int i, n = 0;
@@ -704,8 +783,9 @@ static int ext4_alloc_branch(handle_t *handle, struct inode *inode,
                BUFFER_TRACE(bh, "call get_create_access");
                err = ext4_journal_get_create_access(handle, bh);
                if (err) {
+                       /* Don't brelse(bh) here; it's done in
+                        * ext4_journal_forget() below */
                        unlock_buffer(bh);
-                       brelse(bh);
                        goto failed;
                }
 
@@ -720,7 +800,7 @@ static int ext4_alloc_branch(handle_t *handle, struct inode *inode,
                         * the chain to point to the new allocated
                         * data blocks numbers
                         */
-                       for (i=1; i < num; i++)
+                       for (i = 1; i < num; i++)
                                *(branch[n].p + i) = cpu_to_le32(++current_block);
                }
                BUFFER_TRACE(bh, "marking uptodate");
@@ -763,7 +843,8 @@ failed:
  * chain to new block and return 0.
  */
 static int ext4_splice_branch(handle_t *handle, struct inode *inode,
-                       ext4_lblk_t block, Indirect *where, int num, int blks)
+                             ext4_lblk_t block, Indirect *where, int num,
+                             int blks)
 {
        int i;
        int err = 0;
@@ -795,10 +876,6 @@ static int ext4_splice_branch(handle_t *handle, struct inode *inode,
        }
 
        /* We are done with atomic stuff, now do the rest of housekeeping */
-
-       inode->i_ctime = ext4_current_time(inode);
-       ext4_mark_inode_dirty(handle, inode);
-
        /* had we spliced it onto indirect block? */
        if (where->bh) {
                /*
@@ -817,8 +894,8 @@ static int ext4_splice_branch(handle_t *handle, struct inode *inode,
        } else {
                /*
                 * OK, we spliced it into the inode itself on a direct block.
-                * Inode was dirtied above.
                 */
+               ext4_mark_inode_dirty(handle, inode);
                jbd_debug(5, "splicing direct\n");
        }
        return err;
@@ -836,6 +913,10 @@ err_out:
 }
 
 /*
+ * The ext4_ind_get_blocks() function handles non-extents inodes
+ * (i.e., using the traditional indirect/double-indirect i_blocks
+ * scheme) for ext4_get_blocks().
+ *
  * Allocation strategy is simple: if we have to allocate something, we will
  * have to go the whole way to leaf. So let's do it before attaching anything
  * to tree, set linkage between the newborn blocks, write them if sync is
@@ -853,15 +934,16 @@ err_out:
  * return = 0, if plain lookup failed.
  * return < 0, error case.
  *
- *
- * Need to be called with
- * down_read(&EXT4_I(inode)->i_data_sem) if not allocating file system block
- * (ie, create is zero). Otherwise down_write(&EXT4_I(inode)->i_data_sem)
+ * The ext4_ind_get_blocks() function should be called with
+ * down_write(&EXT4_I(inode)->i_data_sem) if allocating filesystem
+ * blocks (i.e., flags has EXT4_GET_BLOCKS_CREATE set) or
+ * down_read(&EXT4_I(inode)->i_data_sem) if not allocating file system
+ * blocks.
  */
-static int ext4_get_blocks_handle(handle_t *handle, struct inode *inode,
-                                 ext4_lblk_t iblock, unsigned int maxblocks,
-                                 struct buffer_head *bh_result,
-                                 int create, int extend_disksize)
+static int ext4_ind_get_blocks(handle_t *handle, struct inode *inode,
+                              ext4_lblk_t iblock, unsigned int maxblocks,
+                              struct buffer_head *bh_result,
+                              int flags)
 {
        int err = -EIO;
        ext4_lblk_t offsets[4];
@@ -871,16 +953,13 @@ static int ext4_get_blocks_handle(handle_t *handle, struct inode *inode,
        int indirect_blks;
        int blocks_to_boundary = 0;
        int depth;
-       struct ext4_inode_info *ei = EXT4_I(inode);
        int count = 0;
        ext4_fsblk_t first_block = 0;
-       loff_t disksize;
-
 
        J_ASSERT(!(EXT4_I(inode)->i_flags & EXT4_EXTENTS_FL));
-       J_ASSERT(handle != NULL || create == 0);
+       J_ASSERT(handle != NULL || (flags & EXT4_GET_BLOCKS_CREATE) == 0);
        depth = ext4_block_to_path(inode, iblock, offsets,
-                                       &blocks_to_boundary);
+                                  &blocks_to_boundary);
 
        if (depth == 0)
                goto out;
@@ -907,7 +986,7 @@ static int ext4_get_blocks_handle(handle_t *handle, struct inode *inode,
        }
 
        /* Next simple case - plain lookup or failed read of indirect block */
-       if (!create || err == -EIO)
+       if ((flags & EXT4_GET_BLOCKS_CREATE) == 0 || err == -EIO)
                goto cleanup;
 
        /*
@@ -928,8 +1007,8 @@ static int ext4_get_blocks_handle(handle_t *handle, struct inode *inode,
         * Block out ext4_truncate while we alter the tree
         */
        err = ext4_alloc_branch(handle, inode, iblock, indirect_blks,
-                                       &count, goal,
-                                       offsets + (partial - chain), partial);
+                               &count, goal,
+                               offsets + (partial - chain), partial);
 
        /*
         * The ext4_splice_branch call will free and forget any buffers
@@ -940,20 +1019,8 @@ static int ext4_get_blocks_handle(handle_t *handle, struct inode *inode,
         */
        if (!err)
                err = ext4_splice_branch(handle, inode, iblock,
-                                       partial, indirect_blks, count);
-       /*
-        * i_disksize growing is protected by i_data_sem.  Don't forget to
-        * protect it if you're about to implement concurrent
-        * ext4_get_block() -bzzz
-       */
-       if (!err && extend_disksize) {
-               disksize = ((loff_t) iblock + count) << inode->i_blkbits;
-               if (disksize > i_size_read(inode))
-                       disksize = i_size_read(inode);
-               if (disksize > ei->i_disksize)
-                       ei->i_disksize = disksize;
-       }
-       if (err)
+                                        partial, indirect_blks, count);
+       else
                goto cleanup;
 
        set_buffer_new(bh_result);
@@ -1052,13 +1119,92 @@ static void ext4_da_update_reserve_space(struct inode *inode, int used)
        /*
         * free those over-booking quota for metadata blocks
         */
-
        if (mdb_free)
                vfs_dq_release_reservation_block(inode, mdb_free);
+
+       /*
+        * If we have done all the pending block allocations and if
+        * there aren't any writers on the inode, we can discard the
+        * inode's preallocations.
+        */
+       if (!total && (atomic_read(&inode->i_writecount) == 0))
+               ext4_discard_preallocations(inode);
+}
+
+static int check_block_validity(struct inode *inode, const char *msg,
+                               sector_t logical, sector_t phys, int len)
+{
+       if (!ext4_data_block_valid(EXT4_SB(inode->i_sb), phys, len)) {
+               ext4_error(inode->i_sb, msg,
+                          "inode #%lu logical block %llu mapped to %llu "
+                          "(size %d)", inode->i_ino,
+                          (unsigned long long) logical,
+                          (unsigned long long) phys, len);
+               return -EIO;
+       }
+       return 0;
 }
 
 /*
- * The ext4_get_blocks_wrap() function try to look up the requested blocks,
+ * Return the number of contiguous dirty pages in a given inode
+ * starting at page frame idx.
+ */
+static pgoff_t ext4_num_dirty_pages(struct inode *inode, pgoff_t idx,
+                                   unsigned int max_pages)
+{
+       struct address_space *mapping = inode->i_mapping;
+       pgoff_t index;
+       struct pagevec pvec;
+       pgoff_t num = 0;
+       int i, nr_pages, done = 0;
+
+       if (max_pages == 0)
+               return 0;
+       pagevec_init(&pvec, 0);
+       while (!done) {
+               index = idx;
+               nr_pages = pagevec_lookup_tag(&pvec, mapping, &index,
+                                             PAGECACHE_TAG_DIRTY,
+                                             (pgoff_t)PAGEVEC_SIZE);
+               if (nr_pages == 0)
+                       break;
+               for (i = 0; i < nr_pages; i++) {
+                       struct page *page = pvec.pages[i];
+                       struct buffer_head *bh, *head;
+
+                       lock_page(page);
+                       if (unlikely(page->mapping != mapping) ||
+                           !PageDirty(page) ||
+                           PageWriteback(page) ||
+                           page->index != idx) {
+                               done = 1;
+                               unlock_page(page);
+                               break;
+                       }
+                       if (page_has_buffers(page)) {
+                               bh = head = page_buffers(page);
+                               do {
+                                       if (!buffer_delay(bh) &&
+                                           !buffer_unwritten(bh))
+                                               done = 1;
+                                       bh = bh->b_this_page;
+                               } while (!done && (bh != head));
+                       }
+                       unlock_page(page);
+                       if (done)
+                               break;
+                       idx++;
+                       num++;
+                       if (num >= max_pages)
+                               break;
+               }
+               pagevec_release(&pvec);
+       }
+       return num;
+}
+
+/*
+ * The ext4_get_blocks() function tries to look up the requested blocks,
  * and returns if the blocks are already mapped.
  *
  * Otherwise it takes the write lock of the i_data_sem and allocate blocks
@@ -1066,7 +1212,7 @@ static void ext4_da_update_reserve_space(struct inode *inode, int used)
  * mapped.
  *
  * If file type is extents based, it will call ext4_ext_get_blocks(),
- * Otherwise, call with ext4_get_blocks_handle() to handle indirect mapping
+ * Otherwise, call with ext4_ind_get_blocks() to handle indirect mapping
  * based files
  *
  * On success, it returns the number of blocks being mapped or allocate.
@@ -1079,30 +1225,41 @@ static void ext4_da_update_reserve_space(struct inode *inode, int used)
  *
  * It returns the error in case of allocation failure.
  */
-int ext4_get_blocks_wrap(handle_t *handle, struct inode *inode, sector_t block,
-                       unsigned int max_blocks, struct buffer_head *bh,
-                       int create, int extend_disksize, int flag)
+int ext4_get_blocks(handle_t *handle, struct inode *inode, sector_t block,
+                   unsigned int max_blocks, struct buffer_head *bh,
+                   int flags)
 {
        int retval;
 
        clear_buffer_mapped(bh);
+       clear_buffer_unwritten(bh);
 
+       ext_debug("ext4_get_blocks(): inode %lu, flag %d, max_blocks %u,"
+                 "logical block %lu\n", inode->i_ino, flags, max_blocks,
+                 (unsigned long)block);
        /*
-        * Try to see if we can get  the block without requesting
-        * for new file system block.
+        * Try to see if we can get the block without requesting a new
+        * file system block.
         */
        down_read((&EXT4_I(inode)->i_data_sem));
        if (EXT4_I(inode)->i_flags & EXT4_EXTENTS_FL) {
                retval =  ext4_ext_get_blocks(handle, inode, block, max_blocks,
-                               bh, 0, 0);
+                               bh, 0);
        } else {
-               retval = ext4_get_blocks_handle(handle,
-                               inode, block, max_blocks, bh, 0, 0);
+               retval = ext4_ind_get_blocks(handle, inode, block, max_blocks,
+                                            bh, 0);
        }
        up_read((&EXT4_I(inode)->i_data_sem));
 
+       if (retval > 0 && buffer_mapped(bh)) {
+               int ret = check_block_validity(inode, "file system corruption",
+                                              block, bh->b_blocknr, retval);
+               if (ret != 0)
+                       return ret;
+       }
+
        /* If it is only a block(s) look up */
-       if (!create)
+       if ((flags & EXT4_GET_BLOCKS_CREATE) == 0)
                return retval;
 
        /*
@@ -1116,6 +1273,18 @@ int ext4_get_blocks_wrap(handle_t *handle, struct inode *inode, sector_t block,
                return retval;
 
        /*
+        * When we call get_blocks without the create flag, the
+        * BH_Unwritten flag could have gotten set if the blocks
+        * requested were part of a uninitialized extent.  We need to
+        * clear this flag now that we are committed to convert all or
+        * part of the uninitialized extent to be an initialized
+        * extent.  This is because we need to avoid the combination
+        * of BH_Unwritten and BH_Mapped flags being simultaneously
+        * set on the buffer_head.
+        */
+       clear_buffer_unwritten(bh);
+
+       /*
         * New blocks allocate and/or writing to uninitialized extent
         * will possibly result in updating i_data, so we take
         * the write lock of i_data_sem, and call get_blocks()
@@ -1129,7 +1298,7 @@ int ext4_get_blocks_wrap(handle_t *handle, struct inode *inode, sector_t block,
         * let the underlying get_block() function know to
         * avoid double accounting
         */
-       if (flag)
+       if (flags & EXT4_GET_BLOCKS_DELALLOC_RESERVE)
                EXT4_I(inode)->i_delalloc_reserved_flag = 1;
        /*
         * We need to check for EXT4 here because migrate
@@ -1137,10 +1306,10 @@ int ext4_get_blocks_wrap(handle_t *handle, struct inode *inode, sector_t block,
         */
        if (EXT4_I(inode)->i_flags & EXT4_EXTENTS_FL) {
                retval =  ext4_ext_get_blocks(handle, inode, block, max_blocks,
-                               bh, create, extend_disksize);
+                                             bh, flags);
        } else {
-               retval = ext4_get_blocks_handle(handle, inode, block,
-                               max_blocks, bh, create, extend_disksize);
+               retval = ext4_ind_get_blocks(handle, inode, block,
+                                            max_blocks, bh, flags);
 
                if (retval > 0 && buffer_new(bh)) {
                        /*
@@ -1148,23 +1317,28 @@ int ext4_get_blocks_wrap(handle_t *handle, struct inode *inode, sector_t block,
                         * i_data's format changing.  Force the migrate
                         * to fail by clearing migrate flags
                         */
-                       EXT4_I(inode)->i_flags = EXT4_I(inode)->i_flags &
-                                                       ~EXT4_EXT_MIGRATE;
+                       EXT4_I(inode)->i_state &= ~EXT4_STATE_EXT_MIGRATE;
                }
        }
 
-       if (flag) {
+       if (flags & EXT4_GET_BLOCKS_DELALLOC_RESERVE)
                EXT4_I(inode)->i_delalloc_reserved_flag = 0;
-               /*
-                * Update reserved blocks/metadata blocks
-                * after successful block allocation
-                * which were deferred till now
-                */
-               if ((retval > 0) && buffer_delay(bh))
-                       ext4_da_update_reserve_space(inode, retval);
-       }
+
+       /*
+        * Update reserved blocks/metadata blocks after successful
+        * block allocation which had been deferred till now.
+        */
+       if ((retval > 0) && (flags & EXT4_GET_BLOCKS_UPDATE_RESERVE_SPACE))
+               ext4_da_update_reserve_space(inode, retval);
 
        up_write((&EXT4_I(inode)->i_data_sem));
+       if (retval > 0 && buffer_mapped(bh)) {
+               int ret = check_block_validity(inode, "file system "
+                                              "corruption after allocation",
+                                              block, bh->b_blocknr, retval);
+               if (ret != 0)
+                       return ret;
+       }
        return retval;
 }
 
@@ -1192,8 +1366,8 @@ int ext4_get_block(struct inode *inode, sector_t iblock,
                started = 1;
        }
 
-       ret = ext4_get_blocks_wrap(handle, inode, iblock,
-                                       max_blocks, bh_result, create, 0, 0);
+       ret = ext4_get_blocks(handle, inode, iblock, max_blocks, bh_result,
+                             create ? EXT4_GET_BLOCKS_CREATE : 0);
        if (ret > 0) {
                bh_result->b_size = (ret << inode->i_blkbits);
                ret = 0;
@@ -1212,17 +1386,19 @@ struct buffer_head *ext4_getblk(handle_t *handle, struct inode *inode,
 {
        struct buffer_head dummy;
        int fatal = 0, err;
+       int flags = 0;
 
        J_ASSERT(handle != NULL || create == 0);
 
        dummy.b_state = 0;
        dummy.b_blocknr = -1000;
        buffer_trace_init(&dummy.b_history);
-       err = ext4_get_blocks_wrap(handle, inode, block, 1,
-                                       &dummy, create, 1, 0);
+       if (create)
+               flags |= EXT4_GET_BLOCKS_CREATE;
+       err = ext4_get_blocks(handle, inode, block, 1, &dummy, flags);
        /*
-        * ext4_get_blocks_handle() returns number of blocks
-        * mapped. 0 in case of a HOLE.
+        * ext4_get_blocks() returns number of blocks mapped. 0 in
+        * case of a HOLE.
         */
        if (err > 0) {
                if (err > 1)
@@ -1309,8 +1485,7 @@ static int walk_page_buffers(handle_t *handle,
 
        for (bh = head, block_start = 0;
             ret == 0 && (bh != head || !block_start);
-            block_start = block_end, bh = next)
-       {
+            block_start = block_end, bh = next) {
                next = bh->b_this_page;
                block_end = block_start + blocksize;
                if (block_end <= from || block_start >= to) {
@@ -1351,7 +1526,7 @@ static int walk_page_buffers(handle_t *handle,
  * write.
  */
 static int do_journal_get_write_access(handle_t *handle,
-                                       struct buffer_head *bh)
+                                      struct buffer_head *bh)
 {
        if (!buffer_mapped(bh) || buffer_freed(bh))
                return 0;
@@ -1359,22 +1534,24 @@ static int do_journal_get_write_access(handle_t *handle,
 }
 
 static int ext4_write_begin(struct file *file, struct address_space *mapping,
-                               loff_t pos, unsigned len, unsigned flags,
-                               struct page **pagep, void **fsdata)
+                           loff_t pos, unsigned len, unsigned flags,
+                           struct page **pagep, void **fsdata)
 {
        struct inode *inode = mapping->host;
-       int ret, needed_blocks = ext4_writepage_trans_blocks(inode);
+       int ret, needed_blocks;
        handle_t *handle;
        int retries = 0;
        struct page *page;
-       pgoff_t index;
+       pgoff_t index;
        unsigned from, to;
 
-       trace_mark(ext4_write_begin,
-                  "dev %s ino %lu pos %llu len %u flags %u",
-                  inode->i_sb->s_id, inode->i_ino,
-                  (unsigned long long) pos, len, flags);
-       index = pos >> PAGE_CACHE_SHIFT;
+       trace_ext4_write_begin(inode, pos, len, flags);
+       /*
+        * Reserve one block more for addition to orphan list in case
+        * we allocate blocks but write fails for some reason
+        */
+       needed_blocks = ext4_writepage_trans_blocks(inode) + 1;
+       index = pos >> PAGE_CACHE_SHIFT;
        from = pos & (PAGE_CACHE_SIZE - 1);
        to = from + len;
 
@@ -1407,15 +1584,30 @@ retry:
 
        if (ret) {
                unlock_page(page);
-               ext4_journal_stop(handle);
                page_cache_release(page);
                /*
                 * block_write_begin may have instantiated a few blocks
                 * outside i_size.  Trim these off again. Don't need
                 * i_size_read because we hold i_mutex.
+                *
+                * Add inode to orphan list in case we crash before
+                * truncate finishes
                 */
-               if (pos + len > inode->i_size)
-                       vmtruncate(inode, inode->i_size);
+               if (pos + len > inode->i_size && ext4_can_truncate(inode))
+                       ext4_orphan_add(handle, inode);
+
+               ext4_journal_stop(handle);
+               if (pos + len > inode->i_size) {
+                       ext4_truncate(inode);
+                       /*
+                        * If truncate failed early the inode might
+                        * still be on the orphan list; we need to
+                        * make sure the inode is removed from the
+                        * orphan list in that case.
+                        */
+                       if (inode->i_nlink)
+                               ext4_orphan_del(NULL, inode);
+               }
        }
 
        if (ret == -ENOSPC && ext4_should_retry_alloc(inode->i_sb, &retries))
@@ -1433,6 +1625,52 @@ static int write_end_fn(handle_t *handle, struct buffer_head *bh)
        return ext4_handle_dirty_metadata(handle, NULL, bh);
 }
 
+static int ext4_generic_write_end(struct file *file,
+                                 struct address_space *mapping,
+                                 loff_t pos, unsigned len, unsigned copied,
+                                 struct page *page, void *fsdata)
+{
+       int i_size_changed = 0;
+       struct inode *inode = mapping->host;
+       handle_t *handle = ext4_journal_current_handle();
+
+       copied = block_write_end(file, mapping, pos, len, copied, page, fsdata);
+
+       /*
+        * No need to use i_size_read() here, the i_size
+        * cannot change under us because we hold i_mutex.
+        *
+        * But it's important to update i_size while still holding page lock:
+        * page writeout could otherwise come in and zero beyond i_size.
+        */
+       if (pos + copied > inode->i_size) {
+               i_size_write(inode, pos + copied);
+               i_size_changed = 1;
+       }
+
+       if (pos + copied >  EXT4_I(inode)->i_disksize) {
+               /* We need to mark inode dirty even if
+                * new_i_size is less that inode->i_size
+                * bu greater than i_disksize.(hint delalloc)
+                */
+               ext4_update_i_disksize(inode, (pos + copied));
+               i_size_changed = 1;
+       }
+       unlock_page(page);
+       page_cache_release(page);
+
+       /*
+        * Don't mark the inode dirty under page lock. First, it unnecessarily
+        * makes the holding time of page lock longer. Second, it forces lock
+        * ordering of page lock and transaction start for journaling
+        * filesystems.
+        */
+       if (i_size_changed)
+               ext4_mark_inode_dirty(handle, inode);
+
+       return copied;
+}
+
 /*
  * We need to pick up the new inode size which generic_commit_write gave us
  * `file' can be NULL - eg, when called from page_symlink().
@@ -1441,36 +1679,27 @@ static int write_end_fn(handle_t *handle, struct buffer_head *bh)
  * buffers are managed internally.
  */
 static int ext4_ordered_write_end(struct file *file,
-                               struct address_space *mapping,
-                               loff_t pos, unsigned len, unsigned copied,
-                               struct page *page, void *fsdata)
+                                 struct address_space *mapping,
+                                 loff_t pos, unsigned len, unsigned copied,
+                                 struct page *page, void *fsdata)
 {
        handle_t *handle = ext4_journal_current_handle();
        struct inode *inode = mapping->host;
        int ret = 0, ret2;
 
-       trace_mark(ext4_ordered_write_end,
-                  "dev %s ino %lu pos %llu len %u copied %u",
-                  inode->i_sb->s_id, inode->i_ino,
-                  (unsigned long long) pos, len, copied);
+       trace_ext4_ordered_write_end(inode, pos, len, copied);
        ret = ext4_jbd2_file_inode(handle, inode);
 
        if (ret == 0) {
-               loff_t new_i_size;
-
-               new_i_size = pos + copied;
-               if (new_i_size > EXT4_I(inode)->i_disksize) {
-                       ext4_update_i_disksize(inode, new_i_size);
-                       /* We need to mark inode dirty even if
-                        * new_i_size is less that inode->i_size
-                        * bu greater than i_disksize.(hint delalloc)
-                        */
-                       ext4_mark_inode_dirty(handle, inode);
-               }
-
-               ret2 = generic_write_end(file, mapping, pos, len, copied,
+               ret2 = ext4_generic_write_end(file, mapping, pos, len, copied,
                                                        page, fsdata);
                copied = ret2;
+               if (pos + len > inode->i_size && ext4_can_truncate(inode))
+                       /* if we have allocated more blocks and copied
+                        * less. We will have blocks allocated outside
+                        * inode->i_size. So truncate them
+                        */
+                       ext4_orphan_add(handle, inode);
                if (ret2 < 0)
                        ret = ret2;
        }
@@ -1478,36 +1707,41 @@ static int ext4_ordered_write_end(struct file *file,
        if (!ret)
                ret = ret2;
 
+       if (pos + len > inode->i_size) {
+               ext4_truncate(inode);
+               /*
+                * If truncate failed early the inode might still be
+                * on the orphan list; we need to make sure the inode
+                * is removed from the orphan list in that case.
+                */
+               if (inode->i_nlink)
+                       ext4_orphan_del(NULL, inode);
+       }
+
+
        return ret ? ret : copied;
 }
 
 static int ext4_writeback_write_end(struct file *file,
-                               struct address_space *mapping,
-                               loff_t pos, unsigned len, unsigned copied,
-                               struct page *page, void *fsdata)
+                                   struct address_space *mapping,
+                                   loff_t pos, unsigned len, unsigned copied,
+                                   struct page *page, void *fsdata)
 {
        handle_t *handle = ext4_journal_current_handle();
        struct inode *inode = mapping->host;
        int ret = 0, ret2;
-       loff_t new_i_size;
 
-       trace_mark(ext4_writeback_write_end,
-                  "dev %s ino %lu pos %llu len %u copied %u",
-                  inode->i_sb->s_id, inode->i_ino,
-                  (unsigned long long) pos, len, copied);
-       new_i_size = pos + copied;
-       if (new_i_size > EXT4_I(inode)->i_disksize) {
-               ext4_update_i_disksize(inode, new_i_size);
-               /* We need to mark inode dirty even if
-                * new_i_size is less that inode->i_size
-                * bu greater than i_disksize.(hint delalloc)
-                */
-               ext4_mark_inode_dirty(handle, inode);
-       }
-
-       ret2 = generic_write_end(file, mapping, pos, len, copied,
+       trace_ext4_writeback_write_end(inode, pos, len, copied);
+       ret2 = ext4_generic_write_end(file, mapping, pos, len, copied,
                                                        page, fsdata);
        copied = ret2;
+       if (pos + len > inode->i_size && ext4_can_truncate(inode))
+               /* if we have allocated more blocks and copied
+                * less. We will have blocks allocated outside
+                * inode->i_size. So truncate them
+                */
+               ext4_orphan_add(handle, inode);
+
        if (ret2 < 0)
                ret = ret2;
 
@@ -1515,13 +1749,24 @@ static int ext4_writeback_write_end(struct file *file,
        if (!ret)
                ret = ret2;
 
+       if (pos + len > inode->i_size) {
+               ext4_truncate(inode);
+               /*
+                * If truncate failed early the inode might still be
+                * on the orphan list; we need to make sure the inode
+                * is removed from the orphan list in that case.
+                */
+               if (inode->i_nlink)
+                       ext4_orphan_del(NULL, inode);
+       }
+
        return ret ? ret : copied;
 }
 
 static int ext4_journalled_write_end(struct file *file,
-                               struct address_space *mapping,
-                               loff_t pos, unsigned len, unsigned copied,
-                               struct page *page, void *fsdata)
+                                    struct address_space *mapping,
+                                    loff_t pos, unsigned len, unsigned copied,
+                                    struct page *page, void *fsdata)
 {
        handle_t *handle = ext4_journal_current_handle();
        struct inode *inode = mapping->host;
@@ -1530,10 +1775,7 @@ static int ext4_journalled_write_end(struct file *file,
        unsigned from, to;
        loff_t new_i_size;
 
-       trace_mark(ext4_journalled_write_end,
-                  "dev %s ino %lu pos %llu len %u copied %u",
-                  inode->i_sb->s_id, inode->i_ino,
-                  (unsigned long long) pos, len, copied);
+       trace_ext4_journalled_write_end(inode, pos, len, copied);
        from = pos & (PAGE_CACHE_SIZE - 1);
        to = from + len;
 
@@ -1559,10 +1801,27 @@ static int ext4_journalled_write_end(struct file *file,
        }
 
        unlock_page(page);
+       page_cache_release(page);
+       if (pos + len > inode->i_size && ext4_can_truncate(inode))
+               /* if we have allocated more blocks and copied
+                * less. We will have blocks allocated outside
+                * inode->i_size. So truncate them
+                */
+               ext4_orphan_add(handle, inode);
+
        ret2 = ext4_journal_stop(handle);
        if (!ret)
                ret = ret2;
-       page_cache_release(page);
+       if (pos + len > inode->i_size) {
+               ext4_truncate(inode);
+               /*
+                * If truncate failed early the inode might still be
+                * on the orphan list; we need to make sure the inode
+                * is removed from the orphan list in that case.
+                */
+               if (inode->i_nlink)
+                       ext4_orphan_del(NULL, inode);
+       }
 
        return ret ? ret : copied;
 }
@@ -1599,11 +1858,11 @@ repeat:
 
        if (ext4_claim_free_blocks(sbi, total)) {
                spin_unlock(&EXT4_I(inode)->i_block_reservation_lock);
+               vfs_dq_release_reservation_block(inode, total);
                if (ext4_should_retry_alloc(inode->i_sb, &retries)) {
                        yield();
                        goto repeat;
                }
-               vfs_dq_release_reservation_block(inode, total);
                return -ENOSPC;
        }
        EXT4_I(inode)->i_reserved_data_blocks += nrblocks;
@@ -1662,7 +1921,7 @@ static void ext4_da_release_space(struct inode *inode, int to_free)
 }
 
 static void ext4_da_page_release_reservation(struct page *page,
-                                               unsigned long offset)
+                                            unsigned long offset)
 {
        int to_release = 0;
        struct buffer_head *head, *bh;
@@ -1686,17 +1945,6 @@ static void ext4_da_page_release_reservation(struct page *page,
  * Delayed allocation stuff
  */
 
-struct mpage_da_data {
-       struct inode *inode;
-       struct buffer_head lbh;                 /* extent of blocks */
-       unsigned long first_page, next_page;    /* extent of pages */
-       get_block_t *get_block;
-       struct writeback_control *wbc;
-       int io_done;
-       int pages_written;
-       int retval;
-};
-
 /*
  * mpage_da_submit_io - walks through extent of pages and try to write
  * them with writepage() call back
@@ -1704,7 +1952,6 @@ struct mpage_da_data {
  * @mpd->inode: inode
  * @mpd->first_page: first page of the extent
  * @mpd->next_page: page after the last page of the extent
- * @mpd->get_block: the filesystem's block mapper function
  *
  * By the time mpage_da_submit_io() is called we expect all blocks
  * to be allocated. this may be wrong if allocation failed.
@@ -1724,7 +1971,7 @@ static int mpage_da_submit_io(struct mpage_da_data *mpd)
        /*
         * We need to start from the first_page to the next_page - 1
         * to make sure we also write the mapped dirty buffer_heads.
-        * If we look at mpd->lbh.b_blocknr we would only be looking
+        * If we look at mpd->b_blocknr we would only be looking
         * at the currently mapped buffer_heads.
         */
        index = mpd->first_page;
@@ -1776,7 +2023,7 @@ static int mpage_da_submit_io(struct mpage_da_data *mpd)
  * @logical - first logical block to start assignment with
  *
  * the function goes through all passed space and put actual disk
- * block numbers into buffer heads, dropping BH_Delay
+ * block numbers into buffer heads, dropping BH_Delay and BH_Unwritten
  */
 static void mpage_put_bnr_to_bhs(struct mpage_da_data *mpd, sector_t logical,
                                 struct buffer_head *exbh)
@@ -1826,16 +2073,24 @@ static void mpage_put_bnr_to_bhs(struct mpage_da_data *mpd, sector_t logical,
                        do {
                                if (cur_logical >= logical + blocks)
                                        break;
-                               if (buffer_delay(bh)) {
-                                       bh->b_blocknr = pblock;
-                                       clear_buffer_delay(bh);
-                                       bh->b_bdev = inode->i_sb->s_bdev;
-                               } else if (buffer_unwritten(bh)) {
-                                       bh->b_blocknr = pblock;
-                                       clear_buffer_unwritten(bh);
-                                       set_buffer_mapped(bh);
-                                       set_buffer_new(bh);
-                                       bh->b_bdev = inode->i_sb->s_bdev;
+
+                               if (buffer_delay(bh) ||
+                                               buffer_unwritten(bh)) {
+
+                                       BUG_ON(bh->b_bdev != inode->i_sb->s_bdev);
+
+                                       if (buffer_delay(bh)) {
+                                               clear_buffer_delay(bh);
+                                               bh->b_blocknr = pblock;
+                                       } else {
+                                               /*
+                                                * unwritten already should have
+                                                * blocknr assigned. Verify that
+                                                */
+                                               clear_buffer_unwritten(bh);
+                                               BUG_ON(bh->b_blocknr != pblock);
+                                       }
+
                                } else if (buffer_mapped(bh))
                                        BUG_ON(bh->b_blocknr != pblock);
 
@@ -1899,94 +2154,122 @@ static void ext4_da_block_invalidatepages(struct mpage_da_data *mpd,
 static void ext4_print_free_blocks(struct inode *inode)
 {
        struct ext4_sb_info *sbi = EXT4_SB(inode->i_sb);
-       printk(KERN_EMERG "Total free blocks count %lld\n",
-                       ext4_count_free_blocks(inode->i_sb));
-       printk(KERN_EMERG "Free/Dirty block details\n");
-       printk(KERN_EMERG "free_blocks=%lld\n",
-                       (long long)percpu_counter_sum(&sbi->s_freeblocks_counter));
-       printk(KERN_EMERG "dirty_blocks=%lld\n",
-                       (long long)percpu_counter_sum(&sbi->s_dirtyblocks_counter));
-       printk(KERN_EMERG "Block reservation details\n");
-       printk(KERN_EMERG "i_reserved_data_blocks=%u\n",
-                       EXT4_I(inode)->i_reserved_data_blocks);
-       printk(KERN_EMERG "i_reserved_meta_blocks=%u\n",
-                       EXT4_I(inode)->i_reserved_meta_blocks);
+       printk(KERN_CRIT "Total free blocks count %lld\n",
+              ext4_count_free_blocks(inode->i_sb));
+       printk(KERN_CRIT "Free/Dirty block details\n");
+       printk(KERN_CRIT "free_blocks=%lld\n",
+              (long long) percpu_counter_sum(&sbi->s_freeblocks_counter));
+       printk(KERN_CRIT "dirty_blocks=%lld\n",
+              (long long) percpu_counter_sum(&sbi->s_dirtyblocks_counter));
+       printk(KERN_CRIT "Block reservation details\n");
+       printk(KERN_CRIT "i_reserved_data_blocks=%u\n",
+              EXT4_I(inode)->i_reserved_data_blocks);
+       printk(KERN_CRIT "i_reserved_meta_blocks=%u\n",
+              EXT4_I(inode)->i_reserved_meta_blocks);
        return;
 }
 
 /*
  * mpage_da_map_blocks - go through given space
  *
- * @mpd->lbh - bh describing space
- * @mpd->get_block - the filesystem's block mapper function
+ * @mpd - bh describing space
  *
  * The function skips space we know is already mapped to disk blocks.
  *
  */
-static int  mpage_da_map_blocks(struct mpage_da_data *mpd)
+static int mpage_da_map_blocks(struct mpage_da_data *mpd)
 {
-       int err = 0;
+       int err, blks, get_blocks_flags;
        struct buffer_head new;
-       struct buffer_head *lbh = &mpd->lbh;
-       sector_t next;
+       sector_t next = mpd->b_blocknr;
+       unsigned max_blocks = mpd->b_size >> mpd->inode->i_blkbits;
+       loff_t disksize = EXT4_I(mpd->inode)->i_disksize;
+       handle_t *handle = NULL;
 
        /*
         * We consider only non-mapped and non-allocated blocks
         */
-       if (buffer_mapped(lbh) && !buffer_delay(lbh))
+       if ((mpd->b_state  & (1 << BH_Mapped)) &&
+               !(mpd->b_state & (1 << BH_Delay)) &&
+               !(mpd->b_state & (1 << BH_Unwritten)))
                return 0;
-       new.b_state = lbh->b_state;
-       new.b_blocknr = 0;
-       new.b_size = lbh->b_size;
-       next = lbh->b_blocknr;
+
        /*
-        * If we didn't accumulate anything
-        * to write simply return
+        * If we didn't accumulate anything to write simply return
         */
-       if (!new.b_size)
+       if (!mpd->b_size)
                return 0;
-       err = mpd->get_block(mpd->inode, next, &new, 1);
-       if (err) {
 
-               /* If get block returns with error
-                * we simply return. Later writepage
-                * will redirty the page and writepages
-                * will find the dirty page again
+       handle = ext4_journal_current_handle();
+       BUG_ON(!handle);
+
+       /*
+        * Call ext4_get_blocks() to allocate any delayed allocation
+        * blocks, or to convert an uninitialized extent to be
+        * initialized (in the case where we have written into
+        * one or more preallocated blocks).
+        *
+        * We pass in the magic EXT4_GET_BLOCKS_DELALLOC_RESERVE to
+        * indicate that we are on the delayed allocation path.  This
+        * affects functions in many different parts of the allocation
+        * call path.  This flag exists primarily because we don't
+        * want to change *many* call functions, so ext4_get_blocks()
+        * will set the magic i_delalloc_reserved_flag once the
+        * inode's allocation semaphore is taken.
+        *
+        * If the blocks in questions were delalloc blocks, set
+        * EXT4_GET_BLOCKS_DELALLOC_RESERVE so the delalloc accounting
+        * variables are updated after the blocks have been allocated.
+        */
+       new.b_state = 0;
+       get_blocks_flags = (EXT4_GET_BLOCKS_CREATE |
+                           EXT4_GET_BLOCKS_DELALLOC_RESERVE);
+       if (mpd->b_state & (1 << BH_Delay))
+               get_blocks_flags |= EXT4_GET_BLOCKS_UPDATE_RESERVE_SPACE;
+       blks = ext4_get_blocks(handle, mpd->inode, next, max_blocks,
+                              &new, get_blocks_flags);
+       if (blks < 0) {
+               err = blks;
+               /*
+                * If get block returns with error we simply
+                * return. Later writepage will redirty the page and
+                * writepages will find the dirty page again
                 */
                if (err == -EAGAIN)
                        return 0;
 
                if (err == -ENOSPC &&
-                               ext4_count_free_blocks(mpd->inode->i_sb)) {
+                   ext4_count_free_blocks(mpd->inode->i_sb)) {
                        mpd->retval = err;
                        return 0;
                }
 
                /*
-                * get block failure will cause us
-                * to loop in writepages. Because
-                * a_ops->writepage won't be able to
-                * make progress. The page will be redirtied
-                * by writepage and writepages will again
-                * try to write the same.
+                * get block failure will cause us to loop in
+                * writepages, because a_ops->writepage won't be able
+                * to make progress. The page will be redirtied by
+                * writepage and writepages will again try to write
+                * the same.
                 */
-               printk(KERN_EMERG "%s block allocation failed for inode %lu "
-                                 "at logical offset %llu with max blocks "
-                                 "%zd with error %d\n",
-                                 __func__, mpd->inode->i_ino,
-                                 (unsigned long long)next,
-                                 lbh->b_size >> mpd->inode->i_blkbits, err);
-               printk(KERN_EMERG "This should not happen.!! "
-                                       "Data will be lost\n");
+               ext4_msg(mpd->inode->i_sb, KERN_CRIT,
+                        "delayed block allocation failed for inode %lu at "
+                        "logical offset %llu with max blocks %zd with "
+                        "error %d\n", mpd->inode->i_ino,
+                        (unsigned long long) next,
+                        mpd->b_size >> mpd->inode->i_blkbits, err);
+               printk(KERN_CRIT "This should not happen!!  "
+                      "Data will be lost\n");
                if (err == -ENOSPC) {
                        ext4_print_free_blocks(mpd->inode);
                }
-               /* invlaidate all the pages */
+               /* invalidate all the pages */
                ext4_da_block_invalidatepages(mpd, next,
-                               lbh->b_size >> mpd->inode->i_blkbits);
+                               mpd->b_size >> mpd->inode->i_blkbits);
                return err;
        }
-       BUG_ON(new.b_size == 0);
+       BUG_ON(blks == 0);
+
+       new.b_size = (blks << mpd->inode->i_blkbits);
 
        if (buffer_new(&new))
                __unmap_underlying_blocks(mpd->inode, &new);
@@ -1995,9 +2278,27 @@ static int  mpage_da_map_blocks(struct mpage_da_data *mpd)
         * If blocks are delayed marked, we need to
         * put actual blocknr and drop delayed bit
         */
-       if (buffer_delay(lbh) || buffer_unwritten(lbh))
+       if ((mpd->b_state & (1 << BH_Delay)) ||
+           (mpd->b_state & (1 << BH_Unwritten)))
                mpage_put_bnr_to_bhs(mpd, next, &new);
 
+       if (ext4_should_order_data(mpd->inode)) {
+               err = ext4_jbd2_file_inode(handle, mpd->inode);
+               if (err)
+                       return err;
+       }
+
+       /*
+        * Update on-disk size along with block allocation.
+        */
+       disksize = ((loff_t) next + blks) << mpd->inode->i_blkbits;
+       if (disksize > i_size_read(mpd->inode))
+               disksize = i_size_read(mpd->inode);
+       if (disksize > EXT4_I(mpd->inode)->i_disksize) {
+               ext4_update_i_disksize(mpd->inode, disksize);
+               return ext4_mark_inode_dirty(handle, mpd->inode);
+       }
+
        return 0;
 }
 
@@ -2014,12 +2315,11 @@ static int  mpage_da_map_blocks(struct mpage_da_data *mpd)
  * the function is used to collect contig. blocks in same state
  */
 static void mpage_add_bh_to_extent(struct mpage_da_data *mpd,
-                                  sector_t logical, struct buffer_head *bh)
+                                  sector_t logical, size_t b_size,
+                                  unsigned long b_state)
 {
        sector_t next;
-       size_t b_size = bh->b_size;
-       struct buffer_head *lbh = &mpd->lbh;
-       int nrblocks = lbh->b_size >> mpd->inode->i_blkbits;
+       int nrblocks = mpd->b_size >> mpd->inode->i_blkbits;
 
        /* check if thereserved journal credits might overflow */
        if (!(EXT4_I(mpd->inode)->i_flags & EXT4_EXTENTS_FL)) {
@@ -2046,19 +2346,19 @@ static void mpage_add_bh_to_extent(struct mpage_da_data *mpd,
        /*
         * First block in the extent
         */
-       if (lbh->b_size == 0) {
-               lbh->b_blocknr = logical;
-               lbh->b_size = b_size;
-               lbh->b_state = bh->b_state & BH_FLAGS;
+       if (mpd->b_size == 0) {
+               mpd->b_blocknr = logical;
+               mpd->b_size = b_size;
+               mpd->b_state = b_state & BH_FLAGS;
                return;
        }
 
-       next = lbh->b_blocknr + nrblocks;
+       next = mpd->b_blocknr + nrblocks;
        /*
         * Can we merge the block to our big extent?
         */
-       if (logical == next && (bh->b_state & BH_FLAGS) == lbh->b_state) {
-               lbh->b_size += b_size;
+       if (logical == next && (b_state & BH_FLAGS) == mpd->b_state) {
+               mpd->b_size += b_size;
                return;
        }
 
@@ -2073,6 +2373,11 @@ flush_it:
        return;
 }
 
+static int ext4_bh_delay_or_unwritten(handle_t *handle, struct buffer_head *bh)
+{
+       return (buffer_delay(bh) || buffer_unwritten(bh)) && buffer_dirty(bh);
+}
+
 /*
  * __mpage_da_writepage - finds extent of pages and blocks
  *
@@ -2087,14 +2392,14 @@ static int __mpage_da_writepage(struct page *page,
 {
        struct mpage_da_data *mpd = data;
        struct inode *inode = mpd->inode;
-       struct buffer_head *bh, *head, fake;
+       struct buffer_head *bh, *head;
        sector_t logical;
 
        if (mpd->io_done) {
                /*
                 * Rest of the page in the page_vec
                 * redirty then and skip then. We will
-                * try to to write them again after
+                * try to write them again after
                 * starting a new transaction
                 */
                redirty_page_for_writepage(wbc, page);
@@ -2129,9 +2434,9 @@ static int __mpage_da_writepage(struct page *page,
                /*
                 * ... and blocks
                 */
-               mpd->lbh.b_size = 0;
-               mpd->lbh.b_state = 0;
-               mpd->lbh.b_blocknr = 0;
+               mpd->b_size = 0;
+               mpd->b_state = 0;
+               mpd->b_blocknr = 0;
        }
 
        mpd->next_page = page->index + 1;
@@ -2139,16 +2444,8 @@ static int __mpage_da_writepage(struct page *page,
                  (PAGE_CACHE_SHIFT - inode->i_blkbits);
 
        if (!page_has_buffers(page)) {
-               /*
-                * There is no attached buffer heads yet (mmap?)
-                * we treat the page asfull of dirty blocks
-                */
-               bh = &fake;
-               bh->b_size = PAGE_CACHE_SIZE;
-               bh->b_state = 0;
-               set_buffer_dirty(bh);
-               set_buffer_uptodate(bh);
-               mpage_add_bh_to_extent(mpd, logical, bh);
+               mpage_add_bh_to_extent(mpd, logical, PAGE_CACHE_SIZE,
+                                      (1 << BH_Dirty) | (1 << BH_Uptodate));
                if (mpd->io_done)
                        return MPAGE_DA_EXTENT_TAIL;
        } else {
@@ -2163,11 +2460,12 @@ static int __mpage_da_writepage(struct page *page,
                         * We need to try to allocate
                         * unmapped blocks in the same page.
                         * Otherwise we won't make progress
-                        * with the page in ext4_da_writepage
+                        * with the page in ext4_writepage
                         */
-                       if (buffer_dirty(bh) &&
-                               (!buffer_mapped(bh) || buffer_delay(bh))) {
-                               mpage_add_bh_to_extent(mpd, logical, bh);
+                       if (ext4_bh_delay_or_unwritten(NULL, bh)) {
+                               mpage_add_bh_to_extent(mpd, logical,
+                                                      bh->b_size,
+                                                      bh->b_state);
                                if (mpd->io_done)
                                        return MPAGE_DA_EXTENT_TAIL;
                        } else if (buffer_dirty(bh) && (buffer_mapped(bh))) {
@@ -2179,9 +2477,8 @@ static int __mpage_da_writepage(struct page *page,
                                 * unmapped buffer_head later we need to
                                 * use the b_state flag of that buffer_head.
                                 */
-                               if (mpd->lbh.b_size == 0)
-                                       mpd->lbh.b_state =
-                                               bh->b_state & BH_FLAGS;
+                               if (mpd->b_size == 0)
+                                       mpd->b_state = bh->b_state & BH_FLAGS;
                        }
                        logical++;
                } while ((bh = bh->b_this_page) != head);
@@ -2191,58 +2488,25 @@ static int __mpage_da_writepage(struct page *page,
 }
 
 /*
- * mpage_da_writepages - walk the list of dirty pages of the given
- * address space, allocates non-allocated blocks, maps newly-allocated
- * blocks to existing bhs and issue IO them
+ * This is a special get_blocks_t callback which is used by
+ * ext4_da_write_begin().  It will either return mapped block or
+ * reserve space for a single block.
  *
- * @mapping: address space structure to write
- * @wbc: subtract the number of written pages from *@wbc->nr_to_write
- * @get_block: the filesystem's block mapper function.
+ * For delayed buffer_head we have BH_Mapped, BH_New, BH_Delay set.
+ * We also have b_blocknr = -1 and b_bdev initialized properly
  *
- * This is a library function, which implements the writepages()
- * address_space_operation.
- */
-static int mpage_da_writepages(struct address_space *mapping,
-                              struct writeback_control *wbc,
-                              struct mpage_da_data *mpd)
-{
-       int ret;
-
-       if (!mpd->get_block)
-               return generic_writepages(mapping, wbc);
-
-       mpd->lbh.b_size = 0;
-       mpd->lbh.b_state = 0;
-       mpd->lbh.b_blocknr = 0;
-       mpd->first_page = 0;
-       mpd->next_page = 0;
-       mpd->io_done = 0;
-       mpd->pages_written = 0;
-       mpd->retval = 0;
-
-       ret = write_cache_pages(mapping, wbc, __mpage_da_writepage, mpd);
-       /*
-        * Handle last extent of pages
-        */
-       if (!mpd->io_done && mpd->next_page != mpd->first_page) {
-               if (mpage_da_map_blocks(mpd) == 0)
-                       mpage_da_submit_io(mpd);
-
-               mpd->io_done = 1;
-               ret = MPAGE_DA_EXTENT_TAIL;
-       }
-       wbc->nr_to_write -= mpd->pages_written;
-       return ret;
-}
-
-/*
- * this is a special callback for ->write_begin() only
- * it's intention is to return mapped block or reserve space
+ * For unwritten buffer_head we have BH_Mapped, BH_New, BH_Unwritten set.
+ * We also have b_blocknr = physicalblock mapping unwritten extent and b_bdev
+ * initialized properly.
  */
 static int ext4_da_get_block_prep(struct inode *inode, sector_t iblock,
                                  struct buffer_head *bh_result, int create)
 {
        int ret = 0;
+       sector_t invalid_block = ~((sector_t) 0xffff);
+
+       if (invalid_block < ext4_blocks_count(EXT4_SB(inode->i_sb)->s_es))
+               invalid_block = ~0;
 
        BUG_ON(create == 0);
        BUG_ON(bh_result->b_size != inode->i_sb->s_blocksize);
@@ -2252,7 +2516,7 @@ static int ext4_da_get_block_prep(struct inode *inode, sector_t iblock,
         * preallocated blocks are unmapped but should treated
         * the same as allocated blocks.
         */
-       ret = ext4_get_blocks_wrap(NULL, inode, iblock, 1,  bh_result, 0, 0, 0);
+       ret = ext4_get_blocks(NULL, inode, iblock, 1,  bh_result, 0);
        if ((ret == 0) && !buffer_delay(bh_result)) {
                /* the block isn't (pre)allocated yet, let's reserve space */
                /*
@@ -2264,98 +2528,160 @@ static int ext4_da_get_block_prep(struct inode *inode, sector_t iblock,
                        /* not enough space to reserve */
                        return ret;
 
-               map_bh(bh_result, inode->i_sb, 0);
+               map_bh(bh_result, inode->i_sb, invalid_block);
                set_buffer_new(bh_result);
                set_buffer_delay(bh_result);
        } else if (ret > 0) {
                bh_result->b_size = (ret << inode->i_blkbits);
+               if (buffer_unwritten(bh_result)) {
+                       /* A delayed write to unwritten bh should
+                        * be marked new and mapped.  Mapped ensures
+                        * that we don't do get_block multiple times
+                        * when we write to the same offset and new
+                        * ensures that we do proper zero out for
+                        * partial write.
+                        */
+                       set_buffer_new(bh_result);
+                       set_buffer_mapped(bh_result);
+               }
                ret = 0;
        }
 
        return ret;
 }
-#define                EXT4_DELALLOC_RSVED     1
-static int ext4_da_get_block_write(struct inode *inode, sector_t iblock,
+
+/*
+ * This function is used as a standard get_block_t calback function
+ * when there is no desire to allocate any blocks.  It is used as a
+ * callback function for block_prepare_write(), nobh_writepage(), and
+ * block_write_full_page().  These functions should only try to map a
+ * single block at a time.
+ *
+ * Since this function doesn't do block allocations even if the caller
+ * requests it by passing in create=1, it is critically important that
+ * any caller checks to make sure that any buffer heads are returned
+ * by this function are either all already mapped or marked for
+ * delayed allocation before calling nobh_writepage() or
+ * block_write_full_page().  Otherwise, b_blocknr could be left
+ * unitialized, and the page write functions will be taken by
+ * surprise.
+ */
+static int noalloc_get_block_write(struct inode *inode, sector_t iblock,
                                   struct buffer_head *bh_result, int create)
 {
-       int ret;
+       int ret = 0;
        unsigned max_blocks = bh_result->b_size >> inode->i_blkbits;
-       loff_t disksize = EXT4_I(inode)->i_disksize;
-       handle_t *handle = NULL;
 
-       handle = ext4_journal_current_handle();
-       BUG_ON(!handle);
-       ret = ext4_get_blocks_wrap(handle, inode, iblock, max_blocks,
-                       bh_result, create, 0, EXT4_DELALLOC_RSVED);
-       if (ret > 0) {
+       BUG_ON(bh_result->b_size != inode->i_sb->s_blocksize);
 
+       /*
+        * we don't want to do block allocation in writepage
+        * so call get_block_wrap with create = 0
+        */
+       ret = ext4_get_blocks(NULL, inode, iblock, max_blocks, bh_result, 0);
+       if (ret > 0) {
                bh_result->b_size = (ret << inode->i_blkbits);
-
-               if (ext4_should_order_data(inode)) {
-                       int retval;
-                       retval = ext4_jbd2_file_inode(handle, inode);
-                       if (retval)
-                               /*
-                                * Failed to add inode for ordered
-                                * mode. Don't update file size
-                                */
-                               return retval;
-               }
-
-               /*
-                * Update on-disk size along with block allocation
-                * we don't use 'extend_disksize' as size may change
-                * within already allocated block -bzzz
-                */
-               disksize = ((loff_t) iblock + ret) << inode->i_blkbits;
-               if (disksize > i_size_read(inode))
-                       disksize = i_size_read(inode);
-               if (disksize > EXT4_I(inode)->i_disksize) {
-                       ext4_update_i_disksize(inode, disksize);
-                       ret = ext4_mark_inode_dirty(handle, inode);
-                       return ret;
-               }
                ret = 0;
        }
        return ret;
 }
 
-static int ext4_bh_unmapped_or_delay(handle_t *handle, struct buffer_head *bh)
+static int bget_one(handle_t *handle, struct buffer_head *bh)
 {
-       /*
-        * unmapped buffer is possible for holes.
-        * delay buffer is possible with delayed allocation
-        */
-       return ((!buffer_mapped(bh) || buffer_delay(bh)) && buffer_dirty(bh));
+       get_bh(bh);
+       return 0;
 }
 
-static int ext4_normal_get_block_write(struct inode *inode, sector_t iblock,
-                                  struct buffer_head *bh_result, int create)
+static int bput_one(handle_t *handle, struct buffer_head *bh)
+{
+       put_bh(bh);
+       return 0;
+}
+
+static int __ext4_journalled_writepage(struct page *page,
+                                      struct writeback_control *wbc,
+                                      unsigned int len)
 {
+       struct address_space *mapping = page->mapping;
+       struct inode *inode = mapping->host;
+       struct buffer_head *page_bufs;
+       handle_t *handle = NULL;
        int ret = 0;
-       unsigned max_blocks = bh_result->b_size >> inode->i_blkbits;
+       int err;
 
-       /*
-        * we don't want to do block allocation in writepage
-        * so call get_block_wrap with create = 0
-        */
-       ret = ext4_get_blocks_wrap(NULL, inode, iblock, max_blocks,
-                                  bh_result, 0, 0, 0);
-       if (ret > 0) {
-               bh_result->b_size = (ret << inode->i_blkbits);
-               ret = 0;
+       page_bufs = page_buffers(page);
+       BUG_ON(!page_bufs);
+       walk_page_buffers(handle, page_bufs, 0, len, NULL, bget_one);
+       /* As soon as we unlock the page, it can go away, but we have
+        * references to buffers so we are safe */
+       unlock_page(page);
+
+       handle = ext4_journal_start(inode, ext4_writepage_trans_blocks(inode));
+       if (IS_ERR(handle)) {
+               ret = PTR_ERR(handle);
+               goto out;
        }
+
+       ret = walk_page_buffers(handle, page_bufs, 0, len, NULL,
+                               do_journal_get_write_access);
+
+       err = walk_page_buffers(handle, page_bufs, 0, len, NULL,
+                               write_end_fn);
+       if (ret == 0)
+               ret = err;
+       err = ext4_journal_stop(handle);
+       if (!ret)
+               ret = err;
+
+       walk_page_buffers(handle, page_bufs, 0, len, NULL, bput_one);
+       EXT4_I(inode)->i_state |= EXT4_STATE_JDATA;
+out:
        return ret;
 }
 
 /*
- * get called vi ext4_da_writepages after taking page lock (have journal handle)
- * get called via journal_submit_inode_data_buffers (no journal handle)
- * get called via shrink_page_list via pdflush (no journal handle)
- * or grab_page_cache when doing write_begin (have journal handle)
+ * Note that we don't need to start a transaction unless we're journaling data
+ * because we should have holes filled from ext4_page_mkwrite(). We even don't
+ * need to file the inode to the transaction's list in ordered mode because if
+ * we are writing back data added by write(), the inode is already there and if
+ * we are writing back data modified via mmap(), noone guarantees in which
+ * transaction the data will hit the disk. In case we are journaling data, we
+ * cannot start transaction directly because transaction start ranks above page
+ * lock so we have to do some magic.
+ *
+ * This function can get called via...
+ *   - ext4_da_writepages after taking page lock (have journal handle)
+ *   - journal_submit_inode_data_buffers (no journal handle)
+ *   - shrink_page_list via pdflush (no journal handle)
+ *   - grab_page_cache when doing write_begin (have journal handle)
+ *
+ * We don't do any block allocation in this function. If we have page with
+ * multiple blocks we need to write those buffer_heads that are mapped. This
+ * is important for mmaped based write. So if we do with blocksize 1K
+ * truncate(f, 1024);
+ * a = mmap(f, 0, 4096);
+ * a[0] = 'a';
+ * truncate(f, 4096);
+ * we have in the page first buffer_head mapped via page_mkwrite call back
+ * but other bufer_heads would be unmapped but dirty(dirty done via the
+ * do_wp_page). So writepage should write the first block. If we modify
+ * the mmap area beyond 1024 we will again get a page_fault and the
+ * page_mkwrite callback will do the block allocation and mark the
+ * buffer_heads mapped.
+ *
+ * We redirty the page if we have any buffer_heads that is either delay or
+ * unwritten in the page.
+ *
+ * We can get recursively called as show below.
+ *
+ *     ext4_writepage() -> kmalloc() -> __alloc_pages() -> page_launder() ->
+ *             ext4_writepage()
+ *
+ * But since we don't do any block allocation we should not deadlock.
+ * Page also have the dirty flag cleared so we don't get recurive page_lock.
  */
-static int ext4_da_writepage(struct page *page,
-                               struct writeback_control *wbc)
+static int ext4_writepage(struct page *page,
+                         struct writeback_control *wbc)
 {
        int ret = 0;
        loff_t size;
@@ -2363,9 +2689,7 @@ static int ext4_da_writepage(struct page *page,
        struct buffer_head *page_bufs;
        struct inode *inode = page->mapping->host;
 
-       trace_mark(ext4_da_writepage,
-                  "dev %s ino %lu page_index %lu",
-                  inode->i_sb->s_id, inode->i_ino, page->index);
+       trace_ext4_writepage(inode, page);
        size = i_size_read(inode);
        if (page->index == size >> PAGE_CACHE_SHIFT)
                len = size & ~PAGE_CACHE_MASK;
@@ -2375,7 +2699,7 @@ static int ext4_da_writepage(struct page *page,
        if (page_has_buffers(page)) {
                page_bufs = page_buffers(page);
                if (walk_page_buffers(NULL, page_bufs, 0, len, NULL,
-                                       ext4_bh_unmapped_or_delay)) {
+                                       ext4_bh_delay_or_unwritten)) {
                        /*
                         * We don't want to do  block allocation
                         * So redirty the page and return
@@ -2402,13 +2726,13 @@ static int ext4_da_writepage(struct page *page,
                 * all are mapped and non delay. We don't want to
                 * do block allocation here.
                 */
-               ret = block_prepare_write(page, 0, PAGE_CACHE_SIZE,
-                                               ext4_normal_get_block_write);
+               ret = block_prepare_write(page, 0, len,
+                                         noalloc_get_block_write);
                if (!ret) {
                        page_bufs = page_buffers(page);
                        /* check whether all are mapped and non delay */
                        if (walk_page_buffers(NULL, page_bufs, 0, len, NULL,
-                                               ext4_bh_unmapped_or_delay)) {
+                                               ext4_bh_delay_or_unwritten)) {
                                redirty_page_for_writepage(wbc, page);
                                unlock_page(page);
                                return 0;
@@ -2424,15 +2748,23 @@ static int ext4_da_writepage(struct page *page,
                        return 0;
                }
                /* now mark the buffer_heads as dirty and uptodate */
-               block_commit_write(page, 0, PAGE_CACHE_SIZE);
+               block_commit_write(page, 0, len);
+       }
+
+       if (PageChecked(page) && ext4_should_journal_data(inode)) {
+               /*
+                * It's mmapped pagecache.  Add buffers and journal it.  There
+                * doesn't seem much point in redirtying the page here.
+                */
+               ClearPageChecked(page);
+               return __ext4_journalled_writepage(page, wbc, len);
        }
 
        if (test_opt(inode->i_sb, NOBH) && ext4_should_writeback_data(inode))
-               ret = nobh_writepage(page, ext4_normal_get_block_write, wbc);
+               ret = nobh_writepage(page, noalloc_get_block_write, wbc);
        else
-               ret = block_write_full_page(page,
-                                               ext4_normal_get_block_write,
-                                               wbc);
+               ret = block_write_full_page(page, noalloc_get_block_write,
+                                           wbc);
 
        return ret;
 }
@@ -2473,23 +2805,14 @@ static int ext4_da_writepages(struct address_space *mapping,
        int no_nrwrite_index_update;
        int pages_written = 0;
        long pages_skipped;
+       unsigned int max_pages;
        int range_cyclic, cycled = 1, io_done = 0;
-       int needed_blocks, ret = 0, nr_to_writebump = 0;
+       int needed_blocks, ret = 0;
+       long desired_nr_to_write, nr_to_writebump = 0;
+       loff_t range_start = wbc->range_start;
        struct ext4_sb_info *sbi = EXT4_SB(mapping->host->i_sb);
 
-       trace_mark(ext4_da_writepages,
-                  "dev %s ino %lu nr_t_write %ld "
-                  "pages_skipped %ld range_start %llu "
-                  "range_end %llu nonblocking %d "
-                  "for_kupdate %d for_reclaim %d "
-                  "for_writepages %d range_cyclic %d",
-                  inode->i_sb->s_id, inode->i_ino,
-                  wbc->nr_to_write, wbc->pages_skipped,
-                  (unsigned long long) wbc->range_start,
-                  (unsigned long long) wbc->range_end,
-                  wbc->nonblocking, wbc->for_kupdate,
-                  wbc->for_reclaim, wbc->for_writepages,
-                  wbc->range_cyclic);
+       trace_ext4_da_writepages(inode, wbc);
 
        /*
         * No pages to write? This is mainly a kludge to avoid starting
@@ -2503,25 +2826,15 @@ static int ext4_da_writepages(struct address_space *mapping,
         * If the filesystem has aborted, it is read-only, so return
         * right away instead of dumping stack traces later on that
         * will obscure the real source of the problem.  We test
-        * EXT4_MOUNT_ABORT instead of sb->s_flag's MS_RDONLY because
+        * EXT4_MF_FS_ABORTED instead of sb->s_flag's MS_RDONLY because
         * the latter could be true if the filesystem is mounted
         * read-only, and in that case, ext4_da_writepages should
         * *never* be called, so if that ever happens, we would want
         * the stack trace.
         */
-       if (unlikely(sbi->s_mount_opt & EXT4_MOUNT_ABORT))
+       if (unlikely(sbi->s_mount_flags & EXT4_MF_FS_ABORTED))
                return -EROFS;
 
-       /*
-        * Make sure nr_to_write is >= sbi->s_mb_stream_request
-        * This make sure small files blocks are allocated in
-        * single attempt. This ensure that small files
-        * get less fragmented.
-        */
-       if (wbc->nr_to_write < sbi->s_mb_stream_request) {
-               nr_to_writebump = sbi->s_mb_stream_request - wbc->nr_to_write;
-               wbc->nr_to_write = sbi->s_mb_stream_request;
-       }
        if (wbc->range_start == 0 && wbc->range_end == LLONG_MAX)
                range_whole = 1;
 
@@ -2536,6 +2849,36 @@ static int ext4_da_writepages(struct address_space *mapping,
        } else
                index = wbc->range_start >> PAGE_CACHE_SHIFT;
 
+       /*
+        * This works around two forms of stupidity.  The first is in
+        * the writeback code, which caps the maximum number of pages
+        * written to be 1024 pages.  This is wrong on multiple
+        * levels; different architectues have a different page size,
+        * which changes the maximum amount of data which gets
+        * written.  Secondly, 4 megabytes is way too small.  XFS
+        * forces this value to be 16 megabytes by multiplying
+        * nr_to_write parameter by four, and then relies on its
+        * allocator to allocate larger extents to make them
+        * contiguous.  Unfortunately this brings us to the second
+        * stupidity, which is that ext4's mballoc code only allocates
+        * at most 2048 blocks.  So we force contiguous writes up to
+        * the number of dirty blocks in the inode, or
+        * sbi->max_writeback_mb_bump whichever is smaller.
+        */
+       max_pages = sbi->s_max_writeback_mb_bump << (20 - PAGE_CACHE_SHIFT);
+       if (!range_cyclic && range_whole)
+               desired_nr_to_write = wbc->nr_to_write * 8;
+       else
+               desired_nr_to_write = ext4_num_dirty_pages(inode, index,
+                                                          max_pages);
+       if (desired_nr_to_write > max_pages)
+               desired_nr_to_write = max_pages;
+
+       if (wbc->nr_to_write < desired_nr_to_write) {
+               nr_to_writebump = desired_nr_to_write - wbc->nr_to_write;
+               wbc->nr_to_write = desired_nr_to_write;
+       }
+
        mpd.wbc = wbc;
        mpd.inode = mapping->host;
 
@@ -2563,14 +2906,44 @@ retry:
                handle = ext4_journal_start(inode, needed_blocks);
                if (IS_ERR(handle)) {
                        ret = PTR_ERR(handle);
-                       printk(KERN_CRIT "%s: jbd2_start: "
+                       ext4_msg(inode->i_sb, KERN_CRIT, "%s: jbd2_start: "
                               "%ld pages, ino %lu; err %d\n", __func__,
                                wbc->nr_to_write, inode->i_ino, ret);
-                       dump_stack();
                        goto out_writepages;
                }
-               mpd.get_block = ext4_da_get_block_write;
-               ret = mpage_da_writepages(mapping, wbc, &mpd);
+
+               /*
+                * Now call __mpage_da_writepage to find the next
+                * contiguous region of logical blocks that need
+                * blocks to be allocated by ext4.  We don't actually
+                * submit the blocks for I/O here, even though
+                * write_cache_pages thinks it will, and will set the
+                * pages as clean for write before calling
+                * __mpage_da_writepage().
+                */
+               mpd.b_size = 0;
+               mpd.b_state = 0;
+               mpd.b_blocknr = 0;
+               mpd.first_page = 0;
+               mpd.next_page = 0;
+               mpd.io_done = 0;
+               mpd.pages_written = 0;
+               mpd.retval = 0;
+               ret = write_cache_pages(mapping, wbc, __mpage_da_writepage,
+                                       &mpd);
+               /*
+                * If we have a contiguous extent of pages and we
+                * haven't done the I/O yet, map the blocks and submit
+                * them for I/O.
+                */
+               if (!mpd.io_done && mpd.next_page != mpd.first_page) {
+                       if (mpage_da_map_blocks(&mpd) == 0)
+                               mpage_da_submit_io(&mpd);
+                       mpd.io_done = 1;
+                       ret = MPAGE_DA_EXTENT_TAIL;
+               }
+               trace_ext4_da_write_pages(inode, &mpd);
+               wbc->nr_to_write -= mpd.pages_written;
 
                ext4_journal_stop(handle);
 
@@ -2607,9 +2980,10 @@ retry:
                goto retry;
        }
        if (pages_skipped != wbc->pages_skipped)
-               printk(KERN_EMERG "This should not happen leaving %s "
-                               "with nr_to_write = %ld ret = %d\n",
-                               __func__, wbc->nr_to_write, ret);
+               ext4_msg(inode->i_sb, KERN_CRIT,
+                        "This should not happen leaving %s "
+                        "with nr_to_write = %ld ret = %d\n",
+                        __func__, wbc->nr_to_write, ret);
 
        /* Update index */
        index += pages_written;
@@ -2624,15 +2998,10 @@ retry:
 out_writepages:
        if (!no_nrwrite_index_update)
                wbc->no_nrwrite_index_update = 0;
-       wbc->nr_to_write -= nr_to_writebump;
-       trace_mark(ext4_da_writepage_result,
-                  "dev %s ino %lu ret %d pages_written %d "
-                  "pages_skipped %ld congestion %d "
-                  "more_io %d no_nrwrite_index_update %d",
-                  inode->i_sb->s_id, inode->i_ino, ret,
-                  pages_written, wbc->pages_skipped,
-                  wbc->encountered_congestion, wbc->more_io,
-                  wbc->no_nrwrite_index_update);
+       if (wbc->nr_to_write > nr_to_writebump)
+               wbc->nr_to_write -= nr_to_writebump;
+       wbc->range_start = range_start;
+       trace_ext4_da_writepages_result(inode, wbc, ret, pages_written);
        return ret;
 }
 
@@ -2664,8 +3033,8 @@ static int ext4_nonda_switch(struct super_block *sb)
 }
 
 static int ext4_da_write_begin(struct file *file, struct address_space *mapping,
-                               loff_t pos, unsigned len, unsigned flags,
-                               struct page **pagep, void **fsdata)
+                              loff_t pos, unsigned len, unsigned flags,
+                              struct page **pagep, void **fsdata)
 {
        int ret, retries = 0;
        struct page *page;
@@ -2684,11 +3053,7 @@ static int ext4_da_write_begin(struct file *file, struct address_space *mapping,
                                        len, flags, pagep, fsdata);
        }
        *fsdata = (void *)0;
-
-       trace_mark(ext4_da_write_begin,
-                  "dev %s ino %lu pos %llu len %u flags %u",
-                  inode->i_sb->s_id, inode->i_ino,
-                  (unsigned long long) pos, len, flags);
+       trace_ext4_da_write_begin(inode, pos, len, flags);
 retry:
        /*
         * With delayed allocation, we don't log the i_disksize update
@@ -2714,7 +3079,7 @@ retry:
        *pagep = page;
 
        ret = block_write_begin(file, mapping, pos, len, flags, pagep, fsdata,
-                                                       ext4_da_get_block_prep);
+                               ext4_da_get_block_prep);
        if (ret < 0) {
                unlock_page(page);
                ext4_journal_stop(handle);
@@ -2725,7 +3090,7 @@ retry:
                 * i_size_read because we hold i_mutex.
                 */
                if (pos + len > inode->i_size)
-                       vmtruncate(inode, inode->i_size);
+                       ext4_truncate(inode);
        }
 
        if (ret == -ENOSPC && ext4_should_retry_alloc(inode->i_sb, &retries))
@@ -2739,7 +3104,7 @@ out:
  * when write to the end of file but not require block allocation
  */
 static int ext4_da_should_update_i_disksize(struct page *page,
-                                        unsigned long offset)
+                                           unsigned long offset)
 {
        struct buffer_head *bh;
        struct inode *inode = page->mapping->host;
@@ -2752,15 +3117,15 @@ static int ext4_da_should_update_i_disksize(struct page *page,
        for (i = 0; i < idx; i++)
                bh = bh->b_this_page;
 
-       if (!buffer_mapped(bh) || (buffer_delay(bh)))
+       if (!buffer_mapped(bh) || (buffer_delay(bh)) || buffer_unwritten(bh))
                return 0;
        return 1;
 }
 
 static int ext4_da_write_end(struct file *file,
-                               struct address_space *mapping,
-                               loff_t pos, unsigned len, unsigned copied,
-                               struct page *page, void *fsdata)
+                            struct address_space *mapping,
+                            loff_t pos, unsigned len, unsigned copied,
+                            struct page *page, void *fsdata)
 {
        struct inode *inode = mapping->host;
        int ret = 0, ret2;
@@ -2781,10 +3146,7 @@ static int ext4_da_write_end(struct file *file,
                }
        }
 
-       trace_mark(ext4_da_write_end,
-                  "dev %s ino %lu pos %llu len %u copied %u",
-                  inode->i_sb->s_id, inode->i_ino,
-                  (unsigned long long) pos, len, copied);
+       trace_ext4_da_write_end(inode, pos, len, copied);
        start = pos & (PAGE_CACHE_SIZE - 1);
        end = start + copied - 1;
 
@@ -2846,290 +3208,111 @@ out:
        return;
 }
 
-
-/*
- * bmap() is special.  It gets used by applications such as lilo and by
- * the swapper to find the on-disk block of a specific piece of data.
- *
- * Naturally, this is dangerous if the block concerned is still in the
- * journal.  If somebody makes a swapfile on an ext4 data-journaling
- * filesystem and enables swap, then they may get a nasty shock when the
- * data getting swapped to that swapfile suddenly gets overwritten by
- * the original zero's written out previously to the journal and
- * awaiting writeback in the kernel's buffer cache.
- *
- * So, if we see any bmap calls here on a modified, data-journaled file,
- * take extra steps to flush any blocks which might be in the cache.
- */
-static sector_t ext4_bmap(struct address_space *mapping, sector_t block)
-{
-       struct inode *inode = mapping->host;
-       journal_t *journal;
-       int err;
-
-       if (mapping_tagged(mapping, PAGECACHE_TAG_DIRTY) &&
-                       test_opt(inode->i_sb, DELALLOC)) {
-               /*
-                * With delalloc we want to sync the file
-                * so that we can make sure we allocate
-                * blocks for file
-                */
-               filemap_write_and_wait(mapping);
-       }
-
-       if (EXT4_JOURNAL(inode) && EXT4_I(inode)->i_state & EXT4_STATE_JDATA) {
-               /*
-                * This is a REALLY heavyweight approach, but the use of
-                * bmap on dirty files is expected to be extremely rare:
-                * only if we run lilo or swapon on a freshly made file
-                * do we expect this to happen.
-                *
-                * (bmap requires CAP_SYS_RAWIO so this does not
-                * represent an unprivileged user DOS attack --- we'd be
-                * in trouble if mortal users could trigger this path at
-                * will.)
-                *
-                * NB. EXT4_STATE_JDATA is not set on files other than
-                * regular files.  If somebody wants to bmap a directory
-                * or symlink and gets confused because the buffer
-                * hasn't yet been flushed to disk, they deserve
-                * everything they get.
-                */
-
-               EXT4_I(inode)->i_state &= ~EXT4_STATE_JDATA;
-               journal = EXT4_JOURNAL(inode);
-               jbd2_journal_lock_updates(journal);
-               err = jbd2_journal_flush(journal);
-               jbd2_journal_unlock_updates(journal);
-
-               if (err)
-                       return 0;
-       }
-
-       return generic_block_bmap(mapping, block, ext4_get_block);
-}
-
-static int bget_one(handle_t *handle, struct buffer_head *bh)
-{
-       get_bh(bh);
-       return 0;
-}
-
-static int bput_one(handle_t *handle, struct buffer_head *bh)
-{
-       put_bh(bh);
-       return 0;
-}
-
 /*
- * Note that we don't need to start a transaction unless we're journaling data
- * because we should have holes filled from ext4_page_mkwrite(). We even don't
- * need to file the inode to the transaction's list in ordered mode because if
- * we are writing back data added by write(), the inode is already there and if
- * we are writing back data modified via mmap(), noone guarantees in which
- * transaction the data will hit the disk. In case we are journaling data, we
- * cannot start transaction directly because transaction start ranks above page
- * lock so we have to do some magic.
- *
- * In all journaling modes block_write_full_page() will start the I/O.
- *
- * Problem:
- *
- *     ext4_writepage() -> kmalloc() -> __alloc_pages() -> page_launder() ->
- *             ext4_writepage()
- *
- * Similar for:
- *
- *     ext4_file_write() -> generic_file_write() -> __alloc_pages() -> ...
- *
- * Same applies to ext4_get_block().  We will deadlock on various things like
- * lock_journal and i_data_sem
- *
- * Setting PF_MEMALLOC here doesn't work - too many internal memory
- * allocations fail.
- *
- * 16May01: If we're reentered then journal_current_handle() will be
- *         non-zero. We simply *return*.
- *
- * 1 July 2001: @@@ FIXME:
- *   In journalled data mode, a data buffer may be metadata against the
- *   current transaction.  But the same file is part of a shared mapping
- *   and someone does a writepage() on it.
- *
- *   We will move the buffer onto the async_data list, but *after* it has
- *   been dirtied. So there's a small window where we have dirty data on
- *   BJ_Metadata.
- *
- *   Note that this only applies to the last partial page in the file.  The
- *   bit which block_write_full_page() uses prepare/commit for.  (That's
- *   broken code anyway: it's wrong for msync()).
- *
- *   It's a rare case: affects the final partial page, for journalled data
- *   where the file is subject to bith write() and writepage() in the same
- *   transction.  To fix it we'll need a custom block_write_full_page().
- *   We'll probably need that anyway for journalling writepage() output.
- *
- * We don't honour synchronous mounts for writepage().  That would be
- * disastrous.  Any write() or metadata operation will sync the fs for
- * us.
- *
+ * Force all delayed allocation blocks to be allocated for a given inode.
  */
-static int __ext4_normal_writepage(struct page *page,
-                               struct writeback_control *wbc)
-{
-       struct inode *inode = page->mapping->host;
-
-       if (test_opt(inode->i_sb, NOBH))
-               return nobh_writepage(page,
-                                       ext4_normal_get_block_write, wbc);
-       else
-               return block_write_full_page(page,
-                                               ext4_normal_get_block_write,
-                                               wbc);
-}
-
-static int ext4_normal_writepage(struct page *page,
-                               struct writeback_control *wbc)
+int ext4_alloc_da_blocks(struct inode *inode)
 {
-       struct inode *inode = page->mapping->host;
-       loff_t size = i_size_read(inode);
-       loff_t len;
-
-       trace_mark(ext4_normal_writepage,
-                  "dev %s ino %lu page_index %lu",
-                  inode->i_sb->s_id, inode->i_ino, page->index);
-       J_ASSERT(PageLocked(page));
-       if (page->index == size >> PAGE_CACHE_SHIFT)
-               len = size & ~PAGE_CACHE_MASK;
-       else
-               len = PAGE_CACHE_SIZE;
-
-       if (page_has_buffers(page)) {
-               /* if page has buffers it should all be mapped
-                * and allocated. If there are not buffers attached
-                * to the page we know the page is dirty but it lost
-                * buffers. That means that at some moment in time
-                * after write_begin() / write_end() has been called
-                * all buffers have been clean and thus they must have been
-                * written at least once. So they are all mapped and we can
-                * happily proceed with mapping them and writing the page.
-                */
-               BUG_ON(walk_page_buffers(NULL, page_buffers(page), 0, len, NULL,
-                                       ext4_bh_unmapped_or_delay));
-       }
-
-       if (!ext4_journal_current_handle())
-               return __ext4_normal_writepage(page, wbc);
-
-       redirty_page_for_writepage(wbc, page);
-       unlock_page(page);
-       return 0;
-}
+       trace_ext4_alloc_da_blocks(inode);
 
-static int __ext4_journalled_writepage(struct page *page,
-                               struct writeback_control *wbc)
-{
-       struct address_space *mapping = page->mapping;
-       struct inode *inode = mapping->host;
-       struct buffer_head *page_bufs;
-       handle_t *handle = NULL;
-       int ret = 0;
-       int err;
-
-       ret = block_prepare_write(page, 0, PAGE_CACHE_SIZE,
-                                       ext4_normal_get_block_write);
-       if (ret != 0)
-               goto out_unlock;
-
-       page_bufs = page_buffers(page);
-       walk_page_buffers(handle, page_bufs, 0, PAGE_CACHE_SIZE, NULL,
-                                                               bget_one);
-       /* As soon as we unlock the page, it can go away, but we have
-        * references to buffers so we are safe */
-       unlock_page(page);
-
-       handle = ext4_journal_start(inode, ext4_writepage_trans_blocks(inode));
-       if (IS_ERR(handle)) {
-               ret = PTR_ERR(handle);
-               goto out;
-       }
-
-       ret = walk_page_buffers(handle, page_bufs, 0,
-                       PAGE_CACHE_SIZE, NULL, do_journal_get_write_access);
-
-       err = walk_page_buffers(handle, page_bufs, 0,
-                               PAGE_CACHE_SIZE, NULL, write_end_fn);
-       if (ret == 0)
-               ret = err;
-       err = ext4_journal_stop(handle);
-       if (!ret)
-               ret = err;
-
-       walk_page_buffers(handle, page_bufs, 0,
-                               PAGE_CACHE_SIZE, NULL, bput_one);
-       EXT4_I(inode)->i_state |= EXT4_STATE_JDATA;
-       goto out;
+       if (!EXT4_I(inode)->i_reserved_data_blocks &&
+           !EXT4_I(inode)->i_reserved_meta_blocks)
+               return 0;
 
-out_unlock:
-       unlock_page(page);
-out:
-       return ret;
+       /*
+        * We do something simple for now.  The filemap_flush() will
+        * also start triggering a write of the data blocks, which is
+        * not strictly speaking necessary (and for users of
+        * laptop_mode, not even desirable).  However, to do otherwise
+        * would require replicating code paths in:
+        *
+        * ext4_da_writepages() ->
+        *    write_cache_pages() ---> (via passed in callback function)
+        *        __mpage_da_writepage() -->
+        *           mpage_add_bh_to_extent()
+        *           mpage_da_map_blocks()
+        *
+        * The problem is that write_cache_pages(), located in
+        * mm/page-writeback.c, marks pages clean in preparation for
+        * doing I/O, which is not desirable if we're not planning on
+        * doing I/O at all.
+        *
+        * We could call write_cache_pages(), and then redirty all of
+        * the pages by calling redirty_page_for_writeback() but that
+        * would be ugly in the extreme.  So instead we would need to
+        * replicate parts of the code in the above functions,
+        * simplifying them becuase we wouldn't actually intend to
+        * write out the pages, but rather only collect contiguous
+        * logical block extents, call the multi-block allocator, and
+        * then update the buffer heads with the block allocations.
+        *
+        * For now, though, we'll cheat by calling filemap_flush(),
+        * which will map the blocks, and start the I/O, but not
+        * actually wait for the I/O to complete.
+        */
+       return filemap_flush(inode->i_mapping);
 }
 
-static int ext4_journalled_writepage(struct page *page,
-                               struct writeback_control *wbc)
+/*
+ * bmap() is special.  It gets used by applications such as lilo and by
+ * the swapper to find the on-disk block of a specific piece of data.
+ *
+ * Naturally, this is dangerous if the block concerned is still in the
+ * journal.  If somebody makes a swapfile on an ext4 data-journaling
+ * filesystem and enables swap, then they may get a nasty shock when the
+ * data getting swapped to that swapfile suddenly gets overwritten by
+ * the original zero's written out previously to the journal and
+ * awaiting writeback in the kernel's buffer cache.
+ *
+ * So, if we see any bmap calls here on a modified, data-journaled file,
+ * take extra steps to flush any blocks which might be in the cache.
+ */
+static sector_t ext4_bmap(struct address_space *mapping, sector_t block)
 {
-       struct inode *inode = page->mapping->host;
-       loff_t size = i_size_read(inode);
-       loff_t len;
-
-       trace_mark(ext4_journalled_writepage,
-                  "dev %s ino %lu page_index %lu",
-                  inode->i_sb->s_id, inode->i_ino, page->index);
-       J_ASSERT(PageLocked(page));
-       if (page->index == size >> PAGE_CACHE_SHIFT)
-               len = size & ~PAGE_CACHE_MASK;
-       else
-               len = PAGE_CACHE_SIZE;
+       struct inode *inode = mapping->host;
+       journal_t *journal;
+       int err;
 
-       if (page_has_buffers(page)) {
-               /* if page has buffers it should all be mapped
-                * and allocated. If there are not buffers attached
-                * to the page we know the page is dirty but it lost
-                * buffers. That means that at some moment in time
-                * after write_begin() / write_end() has been called
-                * all buffers have been clean and thus they must have been
-                * written at least once. So they are all mapped and we can
-                * happily proceed with mapping them and writing the page.
+       if (mapping_tagged(mapping, PAGECACHE_TAG_DIRTY) &&
+                       test_opt(inode->i_sb, DELALLOC)) {
+               /*
+                * With delalloc we want to sync the file
+                * so that we can make sure we allocate
+                * blocks for file
                 */
-               BUG_ON(walk_page_buffers(NULL, page_buffers(page), 0, len, NULL,
-                                       ext4_bh_unmapped_or_delay));
+               filemap_write_and_wait(mapping);
        }
 
-       if (ext4_journal_current_handle())
-               goto no_write;
-
-       if (PageChecked(page)) {
-               /*
-                * It's mmapped pagecache.  Add buffers and journal it.  There
-                * doesn't seem much point in redirtying the page here.
-                */
-               ClearPageChecked(page);
-               return __ext4_journalled_writepage(page, wbc);
-       } else {
+       if (EXT4_JOURNAL(inode) && EXT4_I(inode)->i_state & EXT4_STATE_JDATA) {
                /*
-                * It may be a page full of checkpoint-mode buffers.  We don't
-                * really know unless we go poke around in the buffer_heads.
-                * But block_write_full_page will do the right thing.
+                * This is a REALLY heavyweight approach, but the use of
+                * bmap on dirty files is expected to be extremely rare:
+                * only if we run lilo or swapon on a freshly made file
+                * do we expect this to happen.
+                *
+                * (bmap requires CAP_SYS_RAWIO so this does not
+                * represent an unprivileged user DOS attack --- we'd be
+                * in trouble if mortal users could trigger this path at
+                * will.)
+                *
+                * NB. EXT4_STATE_JDATA is not set on files other than
+                * regular files.  If somebody wants to bmap a directory
+                * or symlink and gets confused because the buffer
+                * hasn't yet been flushed to disk, they deserve
+                * everything they get.
                 */
-               return block_write_full_page(page,
-                                               ext4_normal_get_block_write,
-                                               wbc);
+
+               EXT4_I(inode)->i_state &= ~EXT4_STATE_JDATA;
+               journal = EXT4_JOURNAL(inode);
+               jbd2_journal_lock_updates(journal);
+               err = jbd2_journal_flush(journal);
+               jbd2_journal_unlock_updates(journal);
+
+               if (err)
+                       return 0;
        }
-no_write:
-       redirty_page_for_writepage(wbc, page);
-       unlock_page(page);
-       return 0;
+
+       return generic_block_bmap(mapping, block, ext4_get_block);
 }
 
 static int ext4_readpage(struct file *file, struct page *page)
@@ -3174,6 +3357,8 @@ static int ext4_releasepage(struct page *page, gfp_t wait)
 }
 
 /*
+ * O_DIRECT for ext3 (or indirect map) based files
+ *
  * If the O_DIRECT write will extend the file then add this inode to the
  * orphan list.  So recovery will truncate it back to the original size
  * if the machine crashes during the write.
@@ -3182,9 +3367,9 @@ static int ext4_releasepage(struct page *page, gfp_t wait)
  * crashes then stale disk data _may_ be exposed inside the file. But current
  * VFS code falls back into buffered path in that case so we are safe.
  */
-static ssize_t ext4_direct_IO(int rw, struct kiocb *iocb,
-                       const struct iovec *iov, loff_t offset,
-                       unsigned long nr_segs)
+static ssize_t ext4_ind_direct_IO(int rw, struct kiocb *iocb,
+                             const struct iovec *iov, loff_t offset,
+                             unsigned long nr_segs)
 {
        struct file *file = iocb->ki_filp;
        struct inode *inode = file->f_mapping->host;
@@ -3193,6 +3378,7 @@ static ssize_t ext4_direct_IO(int rw, struct kiocb *iocb,
        ssize_t ret;
        int orphan = 0;
        size_t count = iov_length(iov, nr_segs);
+       int retries = 0;
 
        if (rw == WRITE) {
                loff_t final_size = offset + count;
@@ -3215,9 +3401,12 @@ static ssize_t ext4_direct_IO(int rw, struct kiocb *iocb,
                }
        }
 
+retry:
        ret = blockdev_direct_IO(rw, iocb, inode, inode->i_sb->s_bdev, iov,
                                 offset, nr_segs,
                                 ext4_get_block, NULL);
+       if (ret == -ENOSPC && ext4_should_retry_alloc(inode->i_sb, &retries))
+               goto retry;
 
        if (orphan) {
                int err;
@@ -3256,6 +3445,359 @@ out:
        return ret;
 }
 
+/* Maximum number of blocks we map for direct IO at once. */
+
+static int ext4_get_block_dio_write(struct inode *inode, sector_t iblock,
+                  struct buffer_head *bh_result, int create)
+{
+       handle_t *handle = NULL;
+       int ret = 0;
+       unsigned max_blocks = bh_result->b_size >> inode->i_blkbits;
+       int dio_credits;
+
+       ext4_debug("ext4_get_block_dio_write: inode %lu, create flag %d\n",
+                  inode->i_ino, create);
+       /*
+        * DIO VFS code passes create = 0 flag for write to
+        * the middle of file. It does this to avoid block
+        * allocation for holes, to prevent expose stale data
+        * out when there is parallel buffered read (which does
+        * not hold the i_mutex lock) while direct IO write has
+        * not completed. DIO request on holes finally falls back
+        * to buffered IO for this reason.
+        *
+        * For ext4 extent based file, since we support fallocate,
+        * new allocated extent as uninitialized, for holes, we
+        * could fallocate blocks for holes, thus parallel
+        * buffered IO read will zero out the page when read on
+        * a hole while parallel DIO write to the hole has not completed.
+        *
+        * when we come here, we know it's a direct IO write to
+        * to the middle of file (<i_size)
+        * so it's safe to override the create flag from VFS.
+        */
+       create = EXT4_GET_BLOCKS_DIO_CREATE_EXT;
+
+       if (max_blocks > DIO_MAX_BLOCKS)
+               max_blocks = DIO_MAX_BLOCKS;
+       dio_credits = ext4_chunk_trans_blocks(inode, max_blocks);
+       handle = ext4_journal_start(inode, dio_credits);
+       if (IS_ERR(handle)) {
+               ret = PTR_ERR(handle);
+               goto out;
+       }
+       ret = ext4_get_blocks(handle, inode, iblock, max_blocks, bh_result,
+                             create);
+       if (ret > 0) {
+               bh_result->b_size = (ret << inode->i_blkbits);
+               ret = 0;
+       }
+       ext4_journal_stop(handle);
+out:
+       return ret;
+}
+
+static void ext4_free_io_end(ext4_io_end_t *io)
+{
+       BUG_ON(!io);
+       iput(io->inode);
+       kfree(io);
+}
+static void dump_aio_dio_list(struct inode * inode)
+{
+#ifdef EXT4_DEBUG
+       struct list_head *cur, *before, *after;
+       ext4_io_end_t *io, *io0, *io1;
+
+       if (list_empty(&EXT4_I(inode)->i_aio_dio_complete_list)){
+               ext4_debug("inode %lu aio dio list is empty\n", inode->i_ino);
+               return;
+       }
+
+       ext4_debug("Dump inode %lu aio_dio_completed_IO list \n", inode->i_ino);
+       list_for_each_entry(io, &EXT4_I(inode)->i_aio_dio_complete_list, list){
+               cur = &io->list;
+               before = cur->prev;
+               io0 = container_of(before, ext4_io_end_t, list);
+               after = cur->next;
+               io1 = container_of(after, ext4_io_end_t, list);
+
+               ext4_debug("io 0x%p from inode %lu,prev 0x%p,next 0x%p\n",
+                           io, inode->i_ino, io0, io1);
+       }
+#endif
+}
+
+/*
+ * check a range of space and convert unwritten extents to written.
+ */
+static int ext4_end_aio_dio_nolock(ext4_io_end_t *io)
+{
+       struct inode *inode = io->inode;
+       loff_t offset = io->offset;
+       size_t size = io->size;
+       int ret = 0;
+
+       ext4_debug("end_aio_dio_onlock: io 0x%p from inode %lu,list->next 0x%p,"
+                  "list->prev 0x%p\n",
+                  io, inode->i_ino, io->list.next, io->list.prev);
+
+       if (list_empty(&io->list))
+               return ret;
+
+       if (io->flag != DIO_AIO_UNWRITTEN)
+               return ret;
+
+       if (offset + size <= i_size_read(inode))
+               ret = ext4_convert_unwritten_extents(inode, offset, size);
+
+       if (ret < 0) {
+               printk(KERN_EMERG "%s: failed to convert unwritten"
+                       "extents to written extents, error is %d"
+                       " io is still on inode %lu aio dio list\n",
+                       __func__, ret, inode->i_ino);
+               return ret;
+       }
+
+       /* clear the DIO AIO unwritten flag */
+       io->flag = 0;
+       return ret;
+}
+/*
+ * work on completed aio dio IO, to convert unwritten extents to extents
+ */
+static void ext4_end_aio_dio_work(struct work_struct *work)
+{
+       ext4_io_end_t *io  = container_of(work, ext4_io_end_t, work);
+       struct inode *inode = io->inode;
+       int ret = 0;
+
+       mutex_lock(&inode->i_mutex);
+       ret = ext4_end_aio_dio_nolock(io);
+       if (ret >= 0) {
+               if (!list_empty(&io->list))
+                       list_del_init(&io->list);
+               ext4_free_io_end(io);
+       }
+       mutex_unlock(&inode->i_mutex);
+}
+/*
+ * This function is called from ext4_sync_file().
+ *
+ * When AIO DIO IO is completed, the work to convert unwritten
+ * extents to written is queued on workqueue but may not get immediately
+ * scheduled. When fsync is called, we need to ensure the
+ * conversion is complete before fsync returns.
+ * The inode keeps track of a list of completed AIO from DIO path
+ * that might needs to do the conversion. This function walks through
+ * the list and convert the related unwritten extents to written.
+ */
+int flush_aio_dio_completed_IO(struct inode *inode)
+{
+       ext4_io_end_t *io;
+       int ret = 0;
+       int ret2 = 0;
+
+       if (list_empty(&EXT4_I(inode)->i_aio_dio_complete_list))
+               return ret;
+
+       dump_aio_dio_list(inode);
+       while (!list_empty(&EXT4_I(inode)->i_aio_dio_complete_list)){
+               io = list_entry(EXT4_I(inode)->i_aio_dio_complete_list.next,
+                               ext4_io_end_t, list);
+               /*
+                * Calling ext4_end_aio_dio_nolock() to convert completed
+                * IO to written.
+                *
+                * When ext4_sync_file() is called, run_queue() may already
+                * about to flush the work corresponding to this io structure.
+                * It will be upset if it founds the io structure related
+                * to the work-to-be schedule is freed.
+                *
+                * Thus we need to keep the io structure still valid here after
+                * convertion finished. The io structure has a flag to
+                * avoid double converting from both fsync and background work
+                * queue work.
+                */
+               ret = ext4_end_aio_dio_nolock(io);
+               if (ret < 0)
+                       ret2 = ret;
+               else
+                       list_del_init(&io->list);
+       }
+       return (ret2 < 0) ? ret2 : 0;
+}
+
+static ext4_io_end_t *ext4_init_io_end (struct inode *inode)
+{
+       ext4_io_end_t *io = NULL;
+
+       io = kmalloc(sizeof(*io), GFP_NOFS);
+
+       if (io) {
+               igrab(inode);
+               io->inode = inode;
+               io->flag = 0;
+               io->offset = 0;
+               io->size = 0;
+               io->error = 0;
+               INIT_WORK(&io->work, ext4_end_aio_dio_work);
+               INIT_LIST_HEAD(&io->list);
+       }
+
+       return io;
+}
+
+static void ext4_end_io_dio(struct kiocb *iocb, loff_t offset,
+                           ssize_t size, void *private)
+{
+        ext4_io_end_t *io_end = iocb->private;
+       struct workqueue_struct *wq;
+
+       ext_debug("ext4_end_io_dio(): io_end 0x%p"
+                 "for inode %lu, iocb 0x%p, offset %llu, size %llu\n",
+                 iocb->private, io_end->inode->i_ino, iocb, offset,
+                 size);
+       /* if not async direct IO or dio with 0 bytes write, just return */
+       if (!io_end || !size)
+               return;
+
+       /* if not aio dio with unwritten extents, just free io and return */
+       if (io_end->flag != DIO_AIO_UNWRITTEN){
+               ext4_free_io_end(io_end);
+               iocb->private = NULL;
+               return;
+       }
+
+       io_end->offset = offset;
+       io_end->size = size;
+       wq = EXT4_SB(io_end->inode->i_sb)->dio_unwritten_wq;
+
+       /* queue the work to convert unwritten extents to written */
+       queue_work(wq, &io_end->work);
+
+       /* Add the io_end to per-inode completed aio dio list*/
+       list_add_tail(&io_end->list,
+                &EXT4_I(io_end->inode)->i_aio_dio_complete_list);
+       iocb->private = NULL;
+}
+/*
+ * For ext4 extent files, ext4 will do direct-io write to holes,
+ * preallocated extents, and those write extend the file, no need to
+ * fall back to buffered IO.
+ *
+ * For holes, we fallocate those blocks, mark them as unintialized
+ * If those blocks were preallocated, we mark sure they are splited, but
+ * still keep the range to write as unintialized.
+ *
+ * The unwrritten extents will be converted to written when DIO is completed.
+ * For async direct IO, since the IO may still pending when return, we
+ * set up an end_io call back function, which will do the convertion
+ * when async direct IO completed.
+ *
+ * If the O_DIRECT write will extend the file then add this inode to the
+ * orphan list.  So recovery will truncate it back to the original size
+ * if the machine crashes during the write.
+ *
+ */
+static ssize_t ext4_ext_direct_IO(int rw, struct kiocb *iocb,
+                             const struct iovec *iov, loff_t offset,
+                             unsigned long nr_segs)
+{
+       struct file *file = iocb->ki_filp;
+       struct inode *inode = file->f_mapping->host;
+       ssize_t ret;
+       size_t count = iov_length(iov, nr_segs);
+
+       loff_t final_size = offset + count;
+       if (rw == WRITE && final_size <= inode->i_size) {
+               /*
+                * We could direct write to holes and fallocate.
+                *
+                * Allocated blocks to fill the hole are marked as uninitialized
+                * to prevent paralel buffered read to expose the stale data
+                * before DIO complete the data IO.
+                *
+                * As to previously fallocated extents, ext4 get_block
+                * will just simply mark the buffer mapped but still
+                * keep the extents uninitialized.
+                *
+                * for non AIO case, we will convert those unwritten extents
+                * to written after return back from blockdev_direct_IO.
+                *
+                * for async DIO, the conversion needs to be defered when
+                * the IO is completed. The ext4 end_io callback function
+                * will be called to take care of the conversion work.
+                * Here for async case, we allocate an io_end structure to
+                * hook to the iocb.
+                */
+               iocb->private = NULL;
+               EXT4_I(inode)->cur_aio_dio = NULL;
+               if (!is_sync_kiocb(iocb)) {
+                       iocb->private = ext4_init_io_end(inode);
+                       if (!iocb->private)
+                               return -ENOMEM;
+                       /*
+                        * we save the io structure for current async
+                        * direct IO, so that later ext4_get_blocks()
+                        * could flag the io structure whether there
+                        * is a unwritten extents needs to be converted
+                        * when IO is completed.
+                        */
+                       EXT4_I(inode)->cur_aio_dio = iocb->private;
+               }
+
+               ret = blockdev_direct_IO(rw, iocb, inode,
+                                        inode->i_sb->s_bdev, iov,
+                                        offset, nr_segs,
+                                        ext4_get_block_dio_write,
+                                        ext4_end_io_dio);
+               if (iocb->private)
+                       EXT4_I(inode)->cur_aio_dio = NULL;
+               /*
+                * The io_end structure takes a reference to the inode,
+                * that structure needs to be destroyed and the
+                * reference to the inode need to be dropped, when IO is
+                * complete, even with 0 byte write, or failed.
+                *
+                * In the successful AIO DIO case, the io_end structure will be
+                * desctroyed and the reference to the inode will be dropped
+                * after the end_io call back function is called.
+                *
+                * In the case there is 0 byte write, or error case, since
+                * VFS direct IO won't invoke the end_io call back function,
+                * we need to free the end_io structure here.
+                */
+               if (ret != -EIOCBQUEUED && ret <= 0 && iocb->private) {
+                       ext4_free_io_end(iocb->private);
+                       iocb->private = NULL;
+               } else if (ret > 0)
+                       /*
+                        * for non AIO case, since the IO is already
+                        * completed, we could do the convertion right here
+                        */
+                       ret = ext4_convert_unwritten_extents(inode,
+                                                               offset, ret);
+               return ret;
+       }
+
+       /* for write the the end of file case, we fall back to old way */
+       return ext4_ind_direct_IO(rw, iocb, iov, offset, nr_segs);
+}
+
+static ssize_t ext4_direct_IO(int rw, struct kiocb *iocb,
+                             const struct iovec *iov, loff_t offset,
+                             unsigned long nr_segs)
+{
+       struct file *file = iocb->ki_filp;
+       struct inode *inode = file->f_mapping->host;
+
+       if (EXT4_I(inode)->i_flags & EXT4_EXTENTS_FL)
+               return ext4_ext_direct_IO(rw, iocb, iov, offset, nr_segs);
+
+       return ext4_ind_direct_IO(rw, iocb, iov, offset, nr_segs);
+}
+
 /*
  * Pages can be marked dirty completely asynchronously from ext4's journalling
  * activity.  By filemap_sync_pte(), try_to_unmap_one(), etc.  We cannot do
@@ -3278,7 +3820,7 @@ static int ext4_journalled_set_page_dirty(struct page *page)
 static const struct address_space_operations ext4_ordered_aops = {
        .readpage               = ext4_readpage,
        .readpages              = ext4_readpages,
-       .writepage              = ext4_normal_writepage,
+       .writepage              = ext4_writepage,
        .sync_page              = block_sync_page,
        .write_begin            = ext4_write_begin,
        .write_end              = ext4_ordered_write_end,
@@ -3288,12 +3830,13 @@ static const struct address_space_operations ext4_ordered_aops = {
        .direct_IO              = ext4_direct_IO,
        .migratepage            = buffer_migrate_page,
        .is_partially_uptodate  = block_is_partially_uptodate,
+       .error_remove_page      = generic_error_remove_page,
 };
 
 static const struct address_space_operations ext4_writeback_aops = {
        .readpage               = ext4_readpage,
        .readpages              = ext4_readpages,
-       .writepage              = ext4_normal_writepage,
+       .writepage              = ext4_writepage,
        .sync_page              = block_sync_page,
        .write_begin            = ext4_write_begin,
        .write_end              = ext4_writeback_write_end,
@@ -3303,12 +3846,13 @@ static const struct address_space_operations ext4_writeback_aops = {
        .direct_IO              = ext4_direct_IO,
        .migratepage            = buffer_migrate_page,
        .is_partially_uptodate  = block_is_partially_uptodate,
+       .error_remove_page      = generic_error_remove_page,
 };
 
 static const struct address_space_operations ext4_journalled_aops = {
        .readpage               = ext4_readpage,
        .readpages              = ext4_readpages,
-       .writepage              = ext4_journalled_writepage,
+       .writepage              = ext4_writepage,
        .sync_page              = block_sync_page,
        .write_begin            = ext4_write_begin,
        .write_end              = ext4_journalled_write_end,
@@ -3317,12 +3861,13 @@ static const struct address_space_operations ext4_journalled_aops = {
        .invalidatepage         = ext4_invalidatepage,
        .releasepage            = ext4_releasepage,
        .is_partially_uptodate  = block_is_partially_uptodate,
+       .error_remove_page      = generic_error_remove_page,
 };
 
 static const struct address_space_operations ext4_da_aops = {
        .readpage               = ext4_readpage,
        .readpages              = ext4_readpages,
-       .writepage              = ext4_da_writepage,
+       .writepage              = ext4_writepage,
        .writepages             = ext4_da_writepages,
        .sync_page              = block_sync_page,
        .write_begin            = ext4_da_write_begin,
@@ -3333,6 +3878,7 @@ static const struct address_space_operations ext4_da_aops = {
        .direct_IO              = ext4_direct_IO,
        .migratepage            = buffer_migrate_page,
        .is_partially_uptodate  = block_is_partially_uptodate,
+       .error_remove_page      = generic_error_remove_page,
 };
 
 void ext4_set_aops(struct inode *inode)
@@ -3369,7 +3915,8 @@ int ext4_block_truncate_page(handle_t *handle,
        struct page *page;
        int err = 0;
 
-       page = grab_cache_page(mapping, from >> PAGE_CACHE_SHIFT);
+       page = find_or_create_page(mapping, from >> PAGE_CACHE_SHIFT,
+                                  mapping_gfp_mask(mapping) & ~__GFP_FS);
        if (!page)
                return -EINVAL;
 
@@ -3504,13 +4051,14 @@ static inline int all_zeroes(__le32 *p, __le32 *q)
  *                     (no partially truncated stuff there).  */
 
 static Indirect *ext4_find_shared(struct inode *inode, int depth,
-                       ext4_lblk_t offsets[4], Indirect chain[4], __le32 *top)
+                                 ext4_lblk_t offsets[4], Indirect chain[4],
+                                 __le32 *top)
 {
        Indirect *partial, *p;
        int k, err;
 
        *top = 0;
-       /* Make k index the deepest non-null offest + 1 */
+       /* Make k index the deepest non-null offset + 1 */
        for (k = depth; k > 1 && !offsets[k-1]; k--)
                ;
        partial = ext4_get_branch(inode, k, offsets, chain, &err);
@@ -3560,8 +4108,10 @@ no_top:
  * than `count' because there can be holes in there.
  */
 static void ext4_clear_blocks(handle_t *handle, struct inode *inode,
-               struct buffer_head *bh, ext4_fsblk_t block_to_free,
-               unsigned long count, __le32 *first, __le32 *last)
+                             struct buffer_head *bh,
+                             ext4_fsblk_t block_to_free,
+                             unsigned long count, __le32 *first,
+                             __le32 *last)
 {
        __le32 *p;
        if (try_to_extend_transaction(handle, inode)) {
@@ -3570,7 +4120,8 @@ static void ext4_clear_blocks(handle_t *handle, struct inode *inode,
                        ext4_handle_dirty_metadata(handle, inode, bh);
                }
                ext4_mark_inode_dirty(handle, inode);
-               ext4_journal_test_restart(handle, inode);
+               ext4_truncate_restart_trans(handle, inode,
+                                           blocks_for_truncate(inode));
                if (bh) {
                        BUFFER_TRACE(bh, "retaking write access");
                        ext4_journal_get_write_access(handle, bh);
@@ -3578,10 +4129,11 @@ static void ext4_clear_blocks(handle_t *handle, struct inode *inode,
        }
 
        /*
-        * Any buffers which are on the journal will be in memory. We find
-        * them on the hash table so jbd2_journal_revoke() will run jbd2_journal_forget()
-        * on them.  We've already detached each block from the file, so
-        * bforget() in jbd2_journal_forget() should be safe.
+        * Any buffers which are on the journal will be in memory. We
+        * find them on the hash table so jbd2_journal_revoke() will
+        * run jbd2_journal_forget() on them.  We've already detached
+        * each block from the file, so bforget() in
+        * jbd2_journal_forget() should be safe.
         *
         * AKPM: turn on bforget in jbd2_journal_forget()!!!
         */
@@ -3780,7 +4332,8 @@ static void ext4_free_branches(handle_t *handle, struct inode *inode,
                                return;
                        if (try_to_extend_transaction(handle, inode)) {
                                ext4_mark_inode_dirty(handle, inode);
-                               ext4_journal_test_restart(handle, inode);
+                               ext4_truncate_restart_trans(handle, inode,
+                                           blocks_for_truncate(inode));
                        }
 
                        ext4_free_blocks(handle, inode, nr, 1, 1);
@@ -3868,6 +4421,9 @@ void ext4_truncate(struct inode *inode)
        if (!ext4_can_truncate(inode))
                return;
 
+       if (inode->i_size == 0 && !test_opt(inode->i_sb, NO_AUTO_DA_ALLOC))
+               ei->i_state |= EXT4_STATE_DA_ALLOC_CLOSE;
+
        if (EXT4_I(inode)->i_flags & EXT4_EXTENTS_FL) {
                ext4_ext_truncate(inode);
                return;
@@ -3949,7 +4505,7 @@ void ext4_truncate(struct inode *inode)
                                   (__le32*)partial->bh->b_data+addr_per_block,
                                   (chain+n-1) - partial);
                BUFFER_TRACE(partial->bh, "call brelse");
-               brelse (partial->bh);
+               brelse(partial->bh);
                partial--;
        }
 do_indirects:
@@ -4110,12 +4666,7 @@ make_io:
                        unsigned num;
 
                        table = ext4_inode_table(sb, gdp);
-                       /* Make sure s_inode_readahead_blks is a power of 2 */
-                       while (EXT4_SB(sb)->s_inode_readahead_blks &
-                              (EXT4_SB(sb)->s_inode_readahead_blks-1))
-                               EXT4_SB(sb)->s_inode_readahead_blks = 
-                                  (EXT4_SB(sb)->s_inode_readahead_blks &
-                                   (EXT4_SB(sb)->s_inode_readahead_blks-1));
+                       /* s_inode_readahead_blks is always a power of 2 */
                        b = block & ~(EXT4_SB(sb)->s_inode_readahead_blks-1);
                        if (table > b)
                                b = table;
@@ -4195,8 +4746,9 @@ void ext4_get_inode_flags(struct ext4_inode_info *ei)
        if (flags & S_DIRSYNC)
                ei->i_flags |= EXT4_DIRSYNC_FL;
 }
+
 static blkcnt_t ext4_inode_blocks(struct ext4_inode *raw_inode,
-                                       struct ext4_inode_info *ei)
+                                 struct ext4_inode_info *ei)
 {
        blkcnt_t i_blocks ;
        struct inode *inode = &(ei->vfs_inode);
@@ -4235,10 +4787,6 @@ struct inode *ext4_iget(struct super_block *sb, unsigned long ino)
                return inode;
 
        ei = EXT4_I(inode);
-#ifdef CONFIG_EXT4_FS_POSIX_ACL
-       ei->i_acl = EXT4_ACL_NOT_CACHED;
-       ei->i_default_acl = EXT4_ACL_NOT_CACHED;
-#endif
 
        ret = __ext4_get_inode_loc(inode, &iloc, 0);
        if (ret < 0)
@@ -4278,15 +4826,14 @@ struct inode *ext4_iget(struct super_block *sb, unsigned long ino)
        ei->i_flags = le32_to_cpu(raw_inode->i_flags);
        inode->i_blocks = ext4_inode_blocks(raw_inode, ei);
        ei->i_file_acl = le32_to_cpu(raw_inode->i_file_acl_lo);
-       if (EXT4_SB(inode->i_sb)->s_es->s_creator_os !=
-           cpu_to_le32(EXT4_OS_HURD)) {
+       if (EXT4_HAS_INCOMPAT_FEATURE(sb, EXT4_FEATURE_INCOMPAT_64BIT))
                ei->i_file_acl |=
                        ((__u64)le16_to_cpu(raw_inode->i_file_acl_high)) << 32;
-       }
        inode->i_size = ext4_isize(raw_inode);
        ei->i_disksize = inode->i_size;
        inode->i_generation = le32_to_cpu(raw_inode->i_generation);
        ei->i_block_group = iloc.block_group;
+       ei->i_last_alloc_group = ~0;
        /*
         * NOTE! The in-memory inode i_data array is in little-endian order
         * even on big-endian machines: we do NOT byteswap the block numbers!
@@ -4312,7 +4859,7 @@ struct inode *ext4_iget(struct super_block *sb, unsigned long ino)
                                        EXT4_GOOD_OLD_INODE_SIZE +
                                        ei->i_extra_isize;
                        if (*magic == cpu_to_le32(EXT4_XATTR_MAGIC))
-                                ei->i_state |= EXT4_STATE_XATTR;
+                               ei->i_state |= EXT4_STATE_XATTR;
                }
        } else
                ei->i_extra_isize = 0;
@@ -4329,6 +4876,34 @@ struct inode *ext4_iget(struct super_block *sb, unsigned long ino)
                        (__u64)(le32_to_cpu(raw_inode->i_version_hi)) << 32;
        }
 
+       ret = 0;
+       if (ei->i_file_acl &&
+           ((ei->i_file_acl <
+             (le32_to_cpu(EXT4_SB(sb)->s_es->s_first_data_block) +
+              EXT4_SB(sb)->s_gdb_count)) ||
+            (ei->i_file_acl >= ext4_blocks_count(EXT4_SB(sb)->s_es)))) {
+               ext4_error(sb, __func__,
+                          "bad extended attribute block %llu in inode #%lu",
+                          ei->i_file_acl, inode->i_ino);
+               ret = -EIO;
+               goto bad_inode;
+       } else if (ei->i_flags & EXT4_EXTENTS_FL) {
+               if (S_ISREG(inode->i_mode) || S_ISDIR(inode->i_mode) ||
+                   (S_ISLNK(inode->i_mode) &&
+                    !ext4_inode_is_fast_symlink(inode)))
+                       /* Validate extent which is part of inode */
+                       ret = ext4_ext_check_inode(inode);
+       } else if (S_ISREG(inode->i_mode) || S_ISDIR(inode->i_mode) ||
+                  (S_ISLNK(inode->i_mode) &&
+                   !ext4_inode_is_fast_symlink(inode))) {
+               /* Validate block references which are part of inode */
+               ret = ext4_check_inode_blockref(inode);
+       }
+       if (ret) {
+               brelse(bh);
+               goto bad_inode;
+       }
+
        if (S_ISREG(inode->i_mode)) {
                inode->i_op = &ext4_file_inode_operations;
                inode->i_fop = &ext4_file_operations;
@@ -4345,7 +4920,8 @@ struct inode *ext4_iget(struct super_block *sb, unsigned long ino)
                        inode->i_op = &ext4_symlink_inode_operations;
                        ext4_set_aops(inode);
                }
-       } else {
+       } else if (S_ISCHR(inode->i_mode) || S_ISBLK(inode->i_mode) ||
+             S_ISFIFO(inode->i_mode) || S_ISSOCK(inode->i_mode)) {
                inode->i_op = &ext4_special_inode_operations;
                if (raw_inode->i_block[0])
                        init_special_inode(inode, inode->i_mode,
@@ -4353,6 +4929,13 @@ struct inode *ext4_iget(struct super_block *sb, unsigned long ino)
                else
                        init_special_inode(inode, inode->i_mode,
                           new_decode_dev(le32_to_cpu(raw_inode->i_block[1])));
+       } else {
+               brelse(bh);
+               ret = -EIO;
+               ext4_error(inode->i_sb, __func__,
+                          "bogus i_mode (%o) for inode=%lu",
+                          inode->i_mode, inode->i_ino);
+               goto bad_inode;
        }
        brelse(iloc.bh);
        ext4_set_inode_flags(inode);
@@ -4460,8 +5043,7 @@ static int ext4_do_update_inode(handle_t *handle,
        if (ext4_inode_blocks_set(handle, raw_inode, ei))
                goto out_brelse;
        raw_inode->i_dtime = cpu_to_le32(ei->i_dtime);
-       /* clear the migrate flag in the raw_inode */
-       raw_inode->i_flags = cpu_to_le32(ei->i_flags & ~EXT4_EXT_MIGRATE);
+       raw_inode->i_flags = cpu_to_le32(ei->i_flags);
        if (EXT4_SB(inode->i_sb)->s_es->s_creator_os !=
            cpu_to_le32(EXT4_OS_HURD))
                raw_inode->i_file_acl_high =
@@ -4502,8 +5084,9 @@ static int ext4_do_update_inode(handle_t *handle,
                                cpu_to_le32(new_encode_dev(inode->i_rdev));
                        raw_inode->i_block[2] = 0;
                }
-       } else for (block = 0; block < EXT4_N_BLOCKS; block++)
-               raw_inode->i_block[block] = ei->i_data[block];
+       } else
+               for (block = 0; block < EXT4_N_BLOCKS; block++)
+                       raw_inode->i_block[block] = ei->i_data[block];
 
        raw_inode->i_disk_version = cpu_to_le32(inode->i_version);
        if (ei->i_extra_isize) {
@@ -4562,34 +5145,36 @@ out_brelse:
  */
 int ext4_write_inode(struct inode *inode, int wait)
 {
+       int err;
+
        if (current->flags & PF_MEMALLOC)
                return 0;
 
-       if (ext4_journal_current_handle()) {
-               jbd_debug(1, "called recursively, non-PF_MEMALLOC!\n");
-               dump_stack();
-               return -EIO;
-       }
-
-       if (!wait)
-               return 0;
+       if (EXT4_SB(inode->i_sb)->s_journal) {
+               if (ext4_journal_current_handle()) {
+                       jbd_debug(1, "called recursively, non-PF_MEMALLOC!\n");
+                       dump_stack();
+                       return -EIO;
+               }
 
-       return ext4_force_commit(inode->i_sb);
-}
+               if (!wait)
+                       return 0;
 
-int __ext4_write_dirty_metadata(struct inode *inode, struct buffer_head *bh)
-{
-       int err = 0;
+               err = ext4_force_commit(inode->i_sb);
+       } else {
+               struct ext4_iloc iloc;
 
-       mark_buffer_dirty(bh);
-       if (inode && inode_needs_sync(inode)) {
-               sync_dirty_buffer(bh);
-               if (buffer_req(bh) && !buffer_uptodate(bh)) {
+               err = ext4_get_inode_loc(inode, &iloc);
+               if (err)
+                       return err;
+               if (wait)
+                       sync_dirty_buffer(iloc.bh);
+               if (buffer_req(iloc.bh) && !buffer_uptodate(iloc.bh)) {
                        ext4_error(inode->i_sb, __func__,
                                   "IO error syncing inode, "
                                   "inode=%lu, block=%llu",
                                   inode->i_ino,
-                                  (unsigned long long)bh->b_blocknr);
+                                  (unsigned long long)iloc.bh->b_blocknr);
                        err = -EIO;
                }
        }
@@ -4785,14 +5370,15 @@ static int ext4_index_trans_blocks(struct inode *inode, int nrblocks, int chunk)
  * worse case, the indexs blocks spread over different block groups
  *
  * If datablocks are discontiguous, they are possible to spread over
- * different block groups too. If they are contiugous, with flexbg,
+ * different block groups too. If they are contiuguous, with flexbg,
  * they could still across block group boundary.
  *
  * Also account for superblock, inode, quota and xattr blocks
  */
 int ext4_meta_trans_blocks(struct inode *inode, int nrblocks, int chunk)
 {
-       int groups, gdpblocks;
+       ext4_group_t groups, ngroups = ext4_get_groups_count(inode->i_sb);
+       int gdpblocks;
        int idxblocks;
        int ret = 0;
 
@@ -4819,8 +5405,8 @@ int ext4_meta_trans_blocks(struct inode *inode, int nrblocks, int chunk)
                groups += nrblocks;
 
        gdpblocks = groups;
-       if (groups > EXT4_SB(inode->i_sb)->s_groups_count)
-               groups = EXT4_SB(inode->i_sb)->s_groups_count;
+       if (groups > ngroups)
+               groups = ngroups;
        if (groups > EXT4_SB(inode->i_sb)->s_gdb_count)
                gdpblocks = EXT4_SB(inode->i_sb)->s_gdb_count;
 
@@ -4860,7 +5446,7 @@ int ext4_writepage_trans_blocks(struct inode *inode)
  * Calculate the journal credits for a chunk of data modification.
  *
  * This is called from DIO, fallocate or whoever calling
- * ext4_get_blocks_wrap() to map/allocate a chunk of contigous disk blocks.
+ * ext4_get_blocks() to map/allocate a chunk of contiguous disk blocks.
  *
  * journal buffers for data blocks are not included here, as DIO
  * and fallocate do no need to journal data buffers.
@@ -4875,7 +5461,7 @@ int ext4_chunk_trans_blocks(struct inode *inode, int nrblocks)
  * Give this, we know that the caller already has write access to iloc->bh.
  */
 int ext4_mark_iloc_dirty(handle_t *handle,
-               struct inode *inode, struct ext4_iloc *iloc)
+                        struct inode *inode, struct ext4_iloc *iloc)
 {
        int err = 0;
 
@@ -5030,27 +5616,14 @@ int ext4_mark_inode_dirty(handle_t *handle, struct inode *inode)
  */
 void ext4_dirty_inode(struct inode *inode)
 {
-       handle_t *current_handle = ext4_journal_current_handle();
        handle_t *handle;
 
-       if (!ext4_handle_valid(current_handle)) {
-               ext4_mark_inode_dirty(current_handle, inode);
-               return;
-       }
-
        handle = ext4_journal_start(inode, 2);
        if (IS_ERR(handle))
                goto out;
-       if (current_handle &&
-               current_handle->h_transaction != handle->h_transaction) {
-               /* This task has a transaction open against a different fs */
-               printk(KERN_EMERG "%s: transactions do not match!\n",
-                      __func__);
-       } else {
-               jbd_debug(5, "marking dirty.  outer handle=%p\n",
-                               current_handle);
-               ext4_mark_inode_dirty(handle, inode);
-       }
+
+       ext4_mark_inode_dirty(handle, inode);
+
        ext4_journal_stop(handle);
 out:
        return;
@@ -5146,8 +5719,9 @@ static int ext4_bh_unmapped(handle_t *handle, struct buffer_head *bh)
        return !buffer_mapped(bh);
 }
 
-int ext4_page_mkwrite(struct vm_area_struct *vma, struct page *page)
+int ext4_page_mkwrite(struct vm_area_struct *vma, struct vm_fault *vmf)
 {
+       struct page *page = vmf->page;
        loff_t size;
        unsigned long len;
        int ret = -EINVAL;
@@ -5176,12 +5750,21 @@ int ext4_page_mkwrite(struct vm_area_struct *vma, struct page *page)
        else
                len = PAGE_CACHE_SIZE;
 
+       lock_page(page);
+       /*
+        * return if we have all the buffers mapped. This avoid
+        * the need to call write_begin/write_end which does a
+        * journal_start/journal_stop which can block and take
+        * long time
+        */
        if (page_has_buffers(page)) {
-               /* return if we have all the buffers mapped */
                if (!walk_page_buffers(NULL, page_buffers(page), 0, len, NULL,
-                                      ext4_bh_unmapped))
+                                       ext4_bh_unmapped)) {
+                       unlock_page(page);
                        goto out_unlock;
+               }
        }
+       unlock_page(page);
        /*
         * OK, we need to fill the hole... Do write_begin write_end
         * to do block allocation/reservation.We are not holding
@@ -5199,6 +5782,8 @@ int ext4_page_mkwrite(struct vm_area_struct *vma, struct page *page)
                goto out_unlock;
        ret = 0;
 out_unlock:
+       if (ret)
+               ret = VM_FAULT_SIGBUS;
        up_read(&inode->i_alloc_sem);
        return ret;
 }