ext4: return correct wbc.nr_to_write in ext4_da_writepages
[safe/jmp/linux-2.6] / fs / ext4 / inode.c
index 9887a0c..3e3b454 100644 (file)
@@ -37,6 +37,7 @@
 #include <linux/namei.h>
 #include <linux/uio.h>
 #include <linux/bio.h>
+#include <linux/workqueue.h>
 
 #include "ext4_jbd2.h"
 #include "xattr.h"
@@ -70,58 +71,6 @@ static int ext4_inode_is_fast_symlink(struct inode *inode)
 }
 
 /*
- * The ext4 forget function must perform a revoke if we are freeing data
- * which has been journaled.  Metadata (eg. indirect blocks) must be
- * revoked in all cases.
- *
- * "bh" may be NULL: a metadata block may have been freed from memory
- * but there may still be a record of it in the journal, and that record
- * still needs to be revoked.
- *
- * If the handle isn't valid we're not journaling, but we still need to
- * call into ext4_journal_revoke() to put the buffer head.
- */
-int ext4_forget(handle_t *handle, int is_metadata, struct inode *inode,
-               struct buffer_head *bh, ext4_fsblk_t blocknr)
-{
-       int err;
-
-       might_sleep();
-
-       BUFFER_TRACE(bh, "enter");
-
-       jbd_debug(4, "forgetting bh %p: is_metadata = %d, mode %o, "
-                 "data mode %x\n",
-                 bh, is_metadata, inode->i_mode,
-                 test_opt(inode->i_sb, DATA_FLAGS));
-
-       /* Never use the revoke function if we are doing full data
-        * journaling: there is no need to, and a V1 superblock won't
-        * support it.  Otherwise, only skip the revoke on un-journaled
-        * data blocks. */
-
-       if (test_opt(inode->i_sb, DATA_FLAGS) == EXT4_MOUNT_JOURNAL_DATA ||
-           (!is_metadata && !ext4_should_journal_data(inode))) {
-               if (bh) {
-                       BUFFER_TRACE(bh, "call jbd2_journal_forget");
-                       return ext4_journal_forget(handle, bh);
-               }
-               return 0;
-       }
-
-       /*
-        * data!=journal && (is_metadata || should_journal_data(inode))
-        */
-       BUFFER_TRACE(bh, "call ext4_journal_revoke");
-       err = ext4_journal_revoke(handle, blocknr, bh);
-       if (err)
-               ext4_abort(inode->i_sb, __func__,
-                          "error %d when attempting revoke", err);
-       BUFFER_TRACE(bh, "exit");
-       return err;
-}
-
-/*
  * Work out how many blocks we need to proceed with the next chunk of a
  * truncate transaction.
  */
@@ -192,7 +141,7 @@ static int try_to_extend_transaction(handle_t *handle, struct inode *inode)
  * so before we call here everything must be consistently dirtied against
  * this transaction.
  */
- int ext4_truncate_restart_trans(handle_t *handle, struct inode *inode,
+int ext4_truncate_restart_trans(handle_t *handle, struct inode *inode,
                                 int nblocks)
 {
        int ret;
@@ -208,6 +157,7 @@ static int try_to_extend_transaction(handle_t *handle, struct inode *inode)
        up_write(&EXT4_I(inode)->i_data_sem);
        ret = ext4_journal_restart(handle, blocks_for_truncate(inode));
        down_write(&EXT4_I(inode)->i_data_sem);
+       ext4_discard_preallocations(inode);
 
        return ret;
 }
@@ -719,7 +669,7 @@ allocated:
        return ret;
 failed_out:
        for (i = 0; i < index; i++)
-               ext4_free_blocks(handle, inode, new_blocks[i], 1, 0);
+               ext4_free_blocks(handle, inode, 0, new_blocks[i], 1, 0);
        return ret;
 }
 
@@ -815,14 +765,20 @@ static int ext4_alloc_branch(handle_t *handle, struct inode *inode,
        return err;
 failed:
        /* Allocation failed, free what we already allocated */
+       ext4_free_blocks(handle, inode, 0, new_blocks[0], 1, 0);
        for (i = 1; i <= n ; i++) {
-               BUFFER_TRACE(branch[i].bh, "call jbd2_journal_forget");
-               ext4_journal_forget(handle, branch[i].bh);
+               /* 
+                * branch[i].bh is newly allocated, so there is no
+                * need to revoke the block, which is why we don't
+                * need to set EXT4_FREE_BLOCKS_METADATA.
+                */
+               ext4_free_blocks(handle, inode, 0, new_blocks[i], 1,
+                                EXT4_FREE_BLOCKS_FORGET);
        }
-       for (i = 0; i < indirect_blks; i++)
-               ext4_free_blocks(handle, inode, new_blocks[i], 1, 0);
+       for (i = n+1; i < indirect_blks; i++)
+               ext4_free_blocks(handle, inode, 0, new_blocks[i], 1, 0);
 
-       ext4_free_blocks(handle, inode, new_blocks[i], num, 0);
+       ext4_free_blocks(handle, inode, 0, new_blocks[i], num, 0);
 
        return err;
 }
@@ -901,12 +857,16 @@ static int ext4_splice_branch(handle_t *handle, struct inode *inode,
 
 err_out:
        for (i = 1; i <= num; i++) {
-               BUFFER_TRACE(where[i].bh, "call jbd2_journal_forget");
-               ext4_journal_forget(handle, where[i].bh);
-               ext4_free_blocks(handle, inode,
-                                       le32_to_cpu(where[i-1].key), 1, 0);
+               /* 
+                * branch[i].bh is newly allocated, so there is no
+                * need to revoke the block, which is why we don't
+                * need to set EXT4_FREE_BLOCKS_METADATA.
+                */
+               ext4_free_blocks(handle, inode, where[i].bh, 0, 1,
+                                EXT4_FREE_BLOCKS_FORGET);
        }
-       ext4_free_blocks(handle, inode, le32_to_cpu(where[num].key), blks, 0);
+       ext4_free_blocks(handle, inode, 0, le32_to_cpu(where[num].key),
+                        blks, 0);
 
        return err;
 }
@@ -1019,10 +979,12 @@ static int ext4_ind_get_blocks(handle_t *handle, struct inode *inode,
        if (!err)
                err = ext4_splice_branch(handle, inode, iblock,
                                         partial, indirect_blks, count);
-       else
+       if (err)
                goto cleanup;
 
        set_buffer_new(bh_result);
+
+       ext4_update_inode_fsync_trans(handle, inode, 1);
 got_it:
        map_bh(bh_result, inode->i_sb, le32_to_cpu(chain[depth-1].key));
        if (count > blocks_to_boundary)
@@ -1041,17 +1003,12 @@ out:
        return err;
 }
 
-qsize_t ext4_get_reserved_space(struct inode *inode)
+#ifdef CONFIG_QUOTA
+qsize_t *ext4_get_reserved_space(struct inode *inode)
 {
-       unsigned long long total;
-
-       spin_lock(&EXT4_I(inode)->i_block_reservation_lock);
-       total = EXT4_I(inode)->i_reserved_data_blocks +
-               EXT4_I(inode)->i_reserved_meta_blocks;
-       spin_unlock(&EXT4_I(inode)->i_block_reservation_lock);
-
-       return total;
+       return &EXT4_I(inode)->i_reserved_quota;
 }
+#endif
 /*
  * Calculate the number of metadata blocks need to reserve
  * to allocate @blocks for non extent file based file
@@ -1089,7 +1046,7 @@ static int ext4_calc_metadata_amount(struct inode *inode, int blocks)
 static void ext4_da_update_reserve_space(struct inode *inode, int used)
 {
        struct ext4_sb_info *sbi = EXT4_SB(inode->i_sb);
-       int total, mdb, mdb_free;
+       int total, mdb, mdb_free, mdb_claim = 0;
 
        spin_lock(&EXT4_I(inode)->i_block_reservation_lock);
        /* recalculate the number of metablocks still need to be reserved */
@@ -1102,7 +1059,9 @@ static void ext4_da_update_reserve_space(struct inode *inode, int used)
 
        if (mdb_free) {
                /* Account for allocated meta_blocks */
-               mdb_free -= EXT4_I(inode)->i_allocated_meta_blocks;
+               mdb_claim = EXT4_I(inode)->i_allocated_meta_blocks;
+               BUG_ON(mdb_free < mdb_claim);
+               mdb_free -= mdb_claim;
 
                /* update fs dirty blocks counter */
                percpu_counter_sub(&sbi->s_dirtyblocks_counter, mdb_free);
@@ -1113,8 +1072,11 @@ static void ext4_da_update_reserve_space(struct inode *inode, int used)
        /* update per-inode reservations */
        BUG_ON(used  > EXT4_I(inode)->i_reserved_data_blocks);
        EXT4_I(inode)->i_reserved_data_blocks -= used;
+       percpu_counter_sub(&sbi->s_dirtyblocks_counter, used + mdb_claim);
        spin_unlock(&EXT4_I(inode)->i_block_reservation_lock);
 
+       vfs_dq_claim_block(inode, used + mdb_claim);
+
        /*
         * free those over-booking quota for metadata blocks
         */
@@ -1145,6 +1107,64 @@ static int check_block_validity(struct inode *inode, const char *msg,
 }
 
 /*
+ * Return the number of contiguous dirty pages in a given inode
+ * starting at page frame idx.
+ */
+static pgoff_t ext4_num_dirty_pages(struct inode *inode, pgoff_t idx,
+                                   unsigned int max_pages)
+{
+       struct address_space *mapping = inode->i_mapping;
+       pgoff_t index;
+       struct pagevec pvec;
+       pgoff_t num = 0;
+       int i, nr_pages, done = 0;
+
+       if (max_pages == 0)
+               return 0;
+       pagevec_init(&pvec, 0);
+       while (!done) {
+               index = idx;
+               nr_pages = pagevec_lookup_tag(&pvec, mapping, &index,
+                                             PAGECACHE_TAG_DIRTY,
+                                             (pgoff_t)PAGEVEC_SIZE);
+               if (nr_pages == 0)
+                       break;
+               for (i = 0; i < nr_pages; i++) {
+                       struct page *page = pvec.pages[i];
+                       struct buffer_head *bh, *head;
+
+                       lock_page(page);
+                       if (unlikely(page->mapping != mapping) ||
+                           !PageDirty(page) ||
+                           PageWriteback(page) ||
+                           page->index != idx) {
+                               done = 1;
+                               unlock_page(page);
+                               break;
+                       }
+                       if (page_has_buffers(page)) {
+                               bh = head = page_buffers(page);
+                               do {
+                                       if (!buffer_delay(bh) &&
+                                           !buffer_unwritten(bh))
+                                               done = 1;
+                                       bh = bh->b_this_page;
+                               } while (!done && (bh != head));
+                       }
+                       unlock_page(page);
+                       if (done)
+                               break;
+                       idx++;
+                       num++;
+                       if (num >= max_pages)
+                               break;
+               }
+               pagevec_release(&pvec);
+       }
+       return num;
+}
+
+/*
  * The ext4_get_blocks() function tries to look up the requested blocks,
  * and returns if the blocks are already mapped.
  *
@@ -1175,6 +1195,9 @@ int ext4_get_blocks(handle_t *handle, struct inode *inode, sector_t block,
        clear_buffer_mapped(bh);
        clear_buffer_unwritten(bh);
 
+       ext_debug("ext4_get_blocks(): inode %lu, flag %d, max_blocks %u,"
+                 "logical block %lu\n", inode->i_ino, flags, max_blocks,
+                 (unsigned long)block);
        /*
         * Try to see if we can get the block without requesting a new
         * file system block.
@@ -1471,6 +1494,16 @@ static int do_journal_get_write_access(handle_t *handle,
        return ext4_journal_get_write_access(handle, bh);
 }
 
+/*
+ * Truncate blocks that were not used by write. We have to truncate the
+ * pagecache as well so that corresponding buffers get properly unmapped.
+ */
+static void ext4_truncate_failed_write(struct inode *inode)
+{
+       truncate_inode_pages(inode->i_mapping, inode->i_size);
+       ext4_truncate(inode);
+}
+
 static int ext4_write_begin(struct file *file, struct address_space *mapping,
                            loff_t pos, unsigned len, unsigned flags,
                            struct page **pagep, void **fsdata)
@@ -1536,7 +1569,7 @@ retry:
 
                ext4_journal_stop(handle);
                if (pos + len > inode->i_size) {
-                       ext4_truncate(inode);
+                       ext4_truncate_failed_write(inode);
                        /*
                         * If truncate failed early the inode might
                         * still be on the orphan list; we need to
@@ -1646,7 +1679,7 @@ static int ext4_ordered_write_end(struct file *file,
                ret = ret2;
 
        if (pos + len > inode->i_size) {
-               ext4_truncate(inode);
+               ext4_truncate_failed_write(inode);
                /*
                 * If truncate failed early the inode might still be
                 * on the orphan list; we need to make sure the inode
@@ -1688,7 +1721,7 @@ static int ext4_writeback_write_end(struct file *file,
                ret = ret2;
 
        if (pos + len > inode->i_size) {
-               ext4_truncate(inode);
+               ext4_truncate_failed_write(inode);
                /*
                 * If truncate failed early the inode might still be
                 * on the orphan list; we need to make sure the inode
@@ -1751,7 +1784,7 @@ static int ext4_journalled_write_end(struct file *file,
        if (!ret)
                ret = ret2;
        if (pos + len > inode->i_size) {
-               ext4_truncate(inode);
+               ext4_truncate_failed_write(inode);
                /*
                 * If truncate failed early the inode might still be
                 * on the orphan list; we need to make sure the inode
@@ -1783,30 +1816,29 @@ repeat:
 
        md_needed = mdblocks - EXT4_I(inode)->i_reserved_meta_blocks;
        total = md_needed + nrblocks;
+       spin_unlock(&EXT4_I(inode)->i_block_reservation_lock);
 
        /*
         * Make quota reservation here to prevent quota overflow
         * later. Real quota accounting is done at pages writeout
         * time.
         */
-       if (vfs_dq_reserve_block(inode, total)) {
-               spin_unlock(&EXT4_I(inode)->i_block_reservation_lock);
+       if (vfs_dq_reserve_block(inode, total))
                return -EDQUOT;
-       }
 
        if (ext4_claim_free_blocks(sbi, total)) {
-               spin_unlock(&EXT4_I(inode)->i_block_reservation_lock);
+               vfs_dq_release_reservation_block(inode, total);
                if (ext4_should_retry_alloc(inode->i_sb, &retries)) {
                        yield();
                        goto repeat;
                }
-               vfs_dq_release_reservation_block(inode, total);
                return -ENOSPC;
        }
+       spin_lock(&EXT4_I(inode)->i_block_reservation_lock);
        EXT4_I(inode)->i_reserved_data_blocks += nrblocks;
-       EXT4_I(inode)->i_reserved_meta_blocks = mdblocks;
-
+       EXT4_I(inode)->i_reserved_meta_blocks += md_needed;
        spin_unlock(&EXT4_I(inode)->i_block_reservation_lock);
+
        return 0;       /* success */
 }
 
@@ -2092,18 +2124,18 @@ static void ext4_da_block_invalidatepages(struct mpage_da_data *mpd,
 static void ext4_print_free_blocks(struct inode *inode)
 {
        struct ext4_sb_info *sbi = EXT4_SB(inode->i_sb);
-       printk(KERN_EMERG "Total free blocks count %lld\n",
-                       ext4_count_free_blocks(inode->i_sb));
-       printk(KERN_EMERG "Free/Dirty block details\n");
-       printk(KERN_EMERG "free_blocks=%lld\n",
-                       (long long)percpu_counter_sum(&sbi->s_freeblocks_counter));
-       printk(KERN_EMERG "dirty_blocks=%lld\n",
-                       (long long)percpu_counter_sum(&sbi->s_dirtyblocks_counter));
-       printk(KERN_EMERG "Block reservation details\n");
-       printk(KERN_EMERG "i_reserved_data_blocks=%u\n",
-                       EXT4_I(inode)->i_reserved_data_blocks);
-       printk(KERN_EMERG "i_reserved_meta_blocks=%u\n",
-                       EXT4_I(inode)->i_reserved_meta_blocks);
+       printk(KERN_CRIT "Total free blocks count %lld\n",
+              ext4_count_free_blocks(inode->i_sb));
+       printk(KERN_CRIT "Free/Dirty block details\n");
+       printk(KERN_CRIT "free_blocks=%lld\n",
+              (long long) percpu_counter_sum(&sbi->s_freeblocks_counter));
+       printk(KERN_CRIT "dirty_blocks=%lld\n",
+              (long long) percpu_counter_sum(&sbi->s_dirtyblocks_counter));
+       printk(KERN_CRIT "Block reservation details\n");
+       printk(KERN_CRIT "i_reserved_data_blocks=%u\n",
+              EXT4_I(inode)->i_reserved_data_blocks);
+       printk(KERN_CRIT "i_reserved_meta_blocks=%u\n",
+              EXT4_I(inode)->i_reserved_meta_blocks);
        return;
 }
 
@@ -2189,14 +2221,14 @@ static int mpage_da_map_blocks(struct mpage_da_data *mpd)
                 * writepage and writepages will again try to write
                 * the same.
                 */
-               printk(KERN_EMERG "%s block allocation failed for inode %lu "
-                                 "at logical offset %llu with max blocks "
-                                 "%zd with error %d\n",
-                                 __func__, mpd->inode->i_ino,
-                                 (unsigned long long)next,
-                                 mpd->b_size >> mpd->inode->i_blkbits, err);
-               printk(KERN_EMERG "This should not happen.!! "
-                                       "Data will be lost\n");
+               ext4_msg(mpd->inode->i_sb, KERN_CRIT,
+                        "delayed block allocation failed for inode %lu at "
+                        "logical offset %llu with max blocks %zd with "
+                        "error %d\n", mpd->inode->i_ino,
+                        (unsigned long long) next,
+                        mpd->b_size >> mpd->inode->i_blkbits, err);
+               printk(KERN_CRIT "This should not happen!!  "
+                      "Data will be lost\n");
                if (err == -ENOSPC) {
                        ext4_print_free_blocks(mpd->inode);
                }
@@ -2337,7 +2369,7 @@ static int __mpage_da_writepage(struct page *page,
                /*
                 * Rest of the page in the page_vec
                 * redirty then and skip then. We will
-                * try to to write them again after
+                * try to write them again after
                 * starting a new transaction
                 */
                redirty_page_for_writepage(wbc, page);
@@ -2537,7 +2569,6 @@ static int bput_one(handle_t *handle, struct buffer_head *bh)
 }
 
 static int __ext4_journalled_writepage(struct page *page,
-                                      struct writeback_control *wbc,
                                       unsigned int len)
 {
        struct address_space *mapping = page->mapping;
@@ -2695,7 +2726,7 @@ static int ext4_writepage(struct page *page,
                 * doesn't seem much point in redirtying the page here.
                 */
                ClearPageChecked(page);
-               return __ext4_journalled_writepage(page, wbc, len);
+               return __ext4_journalled_writepage(page, len);
        }
 
        if (test_opt(inode->i_sb, NOBH) && ext4_should_writeback_data(inode))
@@ -2725,7 +2756,7 @@ static int ext4_da_writepages_trans_blocks(struct inode *inode)
         * number of contiguous block. So we will limit
         * number of contiguous block to a sane value
         */
-       if (!(inode->i_flags & EXT4_EXTENTS_FL) &&
+       if (!(EXT4_I(inode)->i_flags & EXT4_EXTENTS_FL) &&
            (max_blocks > EXT4_MAX_TRANS_DATA))
                max_blocks = EXT4_MAX_TRANS_DATA;
 
@@ -2743,8 +2774,10 @@ static int ext4_da_writepages(struct address_space *mapping,
        int no_nrwrite_index_update;
        int pages_written = 0;
        long pages_skipped;
+       unsigned int max_pages;
        int range_cyclic, cycled = 1, io_done = 0;
-       int needed_blocks, ret = 0, nr_to_writebump = 0;
+       int needed_blocks, ret = 0;
+       long desired_nr_to_write, nr_to_writebump = 0;
        loff_t range_start = wbc->range_start;
        struct ext4_sb_info *sbi = EXT4_SB(mapping->host->i_sb);
 
@@ -2771,16 +2804,6 @@ static int ext4_da_writepages(struct address_space *mapping,
        if (unlikely(sbi->s_mount_flags & EXT4_MF_FS_ABORTED))
                return -EROFS;
 
-       /*
-        * Make sure nr_to_write is >= sbi->s_mb_stream_request
-        * This make sure small files blocks are allocated in
-        * single attempt. This ensure that small files
-        * get less fragmented.
-        */
-       if (wbc->nr_to_write < sbi->s_mb_stream_request) {
-               nr_to_writebump = sbi->s_mb_stream_request - wbc->nr_to_write;
-               wbc->nr_to_write = sbi->s_mb_stream_request;
-       }
        if (wbc->range_start == 0 && wbc->range_end == LLONG_MAX)
                range_whole = 1;
 
@@ -2795,6 +2818,36 @@ static int ext4_da_writepages(struct address_space *mapping,
        } else
                index = wbc->range_start >> PAGE_CACHE_SHIFT;
 
+       /*
+        * This works around two forms of stupidity.  The first is in
+        * the writeback code, which caps the maximum number of pages
+        * written to be 1024 pages.  This is wrong on multiple
+        * levels; different architectues have a different page size,
+        * which changes the maximum amount of data which gets
+        * written.  Secondly, 4 megabytes is way too small.  XFS
+        * forces this value to be 16 megabytes by multiplying
+        * nr_to_write parameter by four, and then relies on its
+        * allocator to allocate larger extents to make them
+        * contiguous.  Unfortunately this brings us to the second
+        * stupidity, which is that ext4's mballoc code only allocates
+        * at most 2048 blocks.  So we force contiguous writes up to
+        * the number of dirty blocks in the inode, or
+        * sbi->max_writeback_mb_bump whichever is smaller.
+        */
+       max_pages = sbi->s_max_writeback_mb_bump << (20 - PAGE_CACHE_SHIFT);
+       if (!range_cyclic && range_whole)
+               desired_nr_to_write = wbc->nr_to_write * 8;
+       else
+               desired_nr_to_write = ext4_num_dirty_pages(inode, index,
+                                                          max_pages);
+       if (desired_nr_to_write > max_pages)
+               desired_nr_to_write = max_pages;
+
+       if (wbc->nr_to_write < desired_nr_to_write) {
+               nr_to_writebump = desired_nr_to_write - wbc->nr_to_write;
+               wbc->nr_to_write = desired_nr_to_write;
+       }
+
        mpd.wbc = wbc;
        mpd.inode = mapping->host;
 
@@ -2822,10 +2875,9 @@ retry:
                handle = ext4_journal_start(inode, needed_blocks);
                if (IS_ERR(handle)) {
                        ret = PTR_ERR(handle);
-                       printk(KERN_CRIT "%s: jbd2_start: "
+                       ext4_msg(inode->i_sb, KERN_CRIT, "%s: jbd2_start: "
                               "%ld pages, ino %lu; err %d\n", __func__,
                                wbc->nr_to_write, inode->i_ino, ret);
-                       dump_stack();
                        goto out_writepages;
                }
 
@@ -2849,7 +2901,7 @@ retry:
                ret = write_cache_pages(mapping, wbc, __mpage_da_writepage,
                                        &mpd);
                /*
-                * If we have a contigous extent of pages and we
+                * If we have a contiguous extent of pages and we
                 * haven't done the I/O yet, map the blocks and submit
                 * them for I/O.
                 */
@@ -2897,9 +2949,10 @@ retry:
                goto retry;
        }
        if (pages_skipped != wbc->pages_skipped)
-               printk(KERN_EMERG "This should not happen leaving %s "
-                               "with nr_to_write = %ld ret = %d\n",
-                               __func__, wbc->nr_to_write, ret);
+               ext4_msg(inode->i_sb, KERN_CRIT,
+                        "This should not happen leaving %s "
+                        "with nr_to_write = %ld ret = %d\n",
+                        __func__, wbc->nr_to_write, ret);
 
        /* Update index */
        index += pages_written;
@@ -2939,11 +2992,18 @@ static int ext4_nonda_switch(struct super_block *sb)
        if (2 * free_blocks < 3 * dirty_blocks ||
                free_blocks < (dirty_blocks + EXT4_FREEBLOCKS_WATERMARK)) {
                /*
-                * free block count is less that 150% of dirty blocks
-                * or free blocks is less that watermark
+                * free block count is less than 150% of dirty blocks
+                * or free blocks is less than watermark
                 */
                return 1;
        }
+       /*
+        * Even if we don't switch but are nearing capacity,
+        * start pushing delalloc when 1/2 of free blocks are dirty.
+        */
+       if (free_blocks < 2 * dirty_blocks)
+               writeback_inodes_sb_if_idle(sb);
+
        return 0;
 }
 
@@ -3005,7 +3065,7 @@ retry:
                 * i_size_read because we hold i_mutex.
                 */
                if (pos + len > inode->i_size)
-                       ext4_truncate(inode);
+                       ext4_truncate_failed_write(inode);
        }
 
        if (ret == -ENOSPC && ext4_should_retry_alloc(inode->i_sb, &retries))
@@ -3272,6 +3332,8 @@ static int ext4_releasepage(struct page *page, gfp_t wait)
 }
 
 /*
+ * O_DIRECT for ext3 (or indirect map) based files
+ *
  * If the O_DIRECT write will extend the file then add this inode to the
  * orphan list.  So recovery will truncate it back to the original size
  * if the machine crashes during the write.
@@ -3280,7 +3342,7 @@ static int ext4_releasepage(struct page *page, gfp_t wait)
  * crashes then stale disk data _may_ be exposed inside the file. But current
  * VFS code falls back into buffered path in that case so we are safe.
  */
-static ssize_t ext4_direct_IO(int rw, struct kiocb *iocb,
+static ssize_t ext4_ind_direct_IO(int rw, struct kiocb *iocb,
                              const struct iovec *iov, loff_t offset,
                              unsigned long nr_segs)
 {
@@ -3291,6 +3353,7 @@ static ssize_t ext4_direct_IO(int rw, struct kiocb *iocb,
        ssize_t ret;
        int orphan = 0;
        size_t count = iov_length(iov, nr_segs);
+       int retries = 0;
 
        if (rw == WRITE) {
                loff_t final_size = offset + count;
@@ -3313,9 +3376,12 @@ static ssize_t ext4_direct_IO(int rw, struct kiocb *iocb,
                }
        }
 
+retry:
        ret = blockdev_direct_IO(rw, iocb, inode, inode->i_sb->s_bdev, iov,
                                 offset, nr_segs,
                                 ext4_get_block, NULL);
+       if (ret == -ENOSPC && ext4_should_retry_alloc(inode->i_sb, &retries))
+               goto retry;
 
        if (orphan) {
                int err;
@@ -3354,6 +3420,364 @@ out:
        return ret;
 }
 
+static int ext4_get_block_dio_write(struct inode *inode, sector_t iblock,
+                  struct buffer_head *bh_result, int create)
+{
+       handle_t *handle = NULL;
+       int ret = 0;
+       unsigned max_blocks = bh_result->b_size >> inode->i_blkbits;
+       int dio_credits;
+
+       ext4_debug("ext4_get_block_dio_write: inode %lu, create flag %d\n",
+                  inode->i_ino, create);
+       /*
+        * DIO VFS code passes create = 0 flag for write to
+        * the middle of file. It does this to avoid block
+        * allocation for holes, to prevent expose stale data
+        * out when there is parallel buffered read (which does
+        * not hold the i_mutex lock) while direct IO write has
+        * not completed. DIO request on holes finally falls back
+        * to buffered IO for this reason.
+        *
+        * For ext4 extent based file, since we support fallocate,
+        * new allocated extent as uninitialized, for holes, we
+        * could fallocate blocks for holes, thus parallel
+        * buffered IO read will zero out the page when read on
+        * a hole while parallel DIO write to the hole has not completed.
+        *
+        * when we come here, we know it's a direct IO write to
+        * to the middle of file (<i_size)
+        * so it's safe to override the create flag from VFS.
+        */
+       create = EXT4_GET_BLOCKS_DIO_CREATE_EXT;
+
+       if (max_blocks > DIO_MAX_BLOCKS)
+               max_blocks = DIO_MAX_BLOCKS;
+       dio_credits = ext4_chunk_trans_blocks(inode, max_blocks);
+       handle = ext4_journal_start(inode, dio_credits);
+       if (IS_ERR(handle)) {
+               ret = PTR_ERR(handle);
+               goto out;
+       }
+       ret = ext4_get_blocks(handle, inode, iblock, max_blocks, bh_result,
+                             create);
+       if (ret > 0) {
+               bh_result->b_size = (ret << inode->i_blkbits);
+               ret = 0;
+       }
+       ext4_journal_stop(handle);
+out:
+       return ret;
+}
+
+static void ext4_free_io_end(ext4_io_end_t *io)
+{
+       BUG_ON(!io);
+       iput(io->inode);
+       kfree(io);
+}
+static void dump_aio_dio_list(struct inode * inode)
+{
+#ifdef EXT4_DEBUG
+       struct list_head *cur, *before, *after;
+       ext4_io_end_t *io, *io0, *io1;
+
+       if (list_empty(&EXT4_I(inode)->i_aio_dio_complete_list)){
+               ext4_debug("inode %lu aio dio list is empty\n", inode->i_ino);
+               return;
+       }
+
+       ext4_debug("Dump inode %lu aio_dio_completed_IO list \n", inode->i_ino);
+       list_for_each_entry(io, &EXT4_I(inode)->i_aio_dio_complete_list, list){
+               cur = &io->list;
+               before = cur->prev;
+               io0 = container_of(before, ext4_io_end_t, list);
+               after = cur->next;
+               io1 = container_of(after, ext4_io_end_t, list);
+
+               ext4_debug("io 0x%p from inode %lu,prev 0x%p,next 0x%p\n",
+                           io, inode->i_ino, io0, io1);
+       }
+#endif
+}
+
+/*
+ * check a range of space and convert unwritten extents to written.
+ */
+static int ext4_end_aio_dio_nolock(ext4_io_end_t *io)
+{
+       struct inode *inode = io->inode;
+       loff_t offset = io->offset;
+       size_t size = io->size;
+       int ret = 0;
+
+       ext4_debug("end_aio_dio_onlock: io 0x%p from inode %lu,list->next 0x%p,"
+                  "list->prev 0x%p\n",
+                  io, inode->i_ino, io->list.next, io->list.prev);
+
+       if (list_empty(&io->list))
+               return ret;
+
+       if (io->flag != DIO_AIO_UNWRITTEN)
+               return ret;
+
+       if (offset + size <= i_size_read(inode))
+               ret = ext4_convert_unwritten_extents(inode, offset, size);
+
+       if (ret < 0) {
+               printk(KERN_EMERG "%s: failed to convert unwritten"
+                       "extents to written extents, error is %d"
+                       " io is still on inode %lu aio dio list\n",
+                       __func__, ret, inode->i_ino);
+               return ret;
+       }
+
+       /* clear the DIO AIO unwritten flag */
+       io->flag = 0;
+       return ret;
+}
+/*
+ * work on completed aio dio IO, to convert unwritten extents to extents
+ */
+static void ext4_end_aio_dio_work(struct work_struct *work)
+{
+       ext4_io_end_t *io  = container_of(work, ext4_io_end_t, work);
+       struct inode *inode = io->inode;
+       int ret = 0;
+
+       mutex_lock(&inode->i_mutex);
+       ret = ext4_end_aio_dio_nolock(io);
+       if (ret >= 0) {
+               if (!list_empty(&io->list))
+                       list_del_init(&io->list);
+               ext4_free_io_end(io);
+       }
+       mutex_unlock(&inode->i_mutex);
+}
+/*
+ * This function is called from ext4_sync_file().
+ *
+ * When AIO DIO IO is completed, the work to convert unwritten
+ * extents to written is queued on workqueue but may not get immediately
+ * scheduled. When fsync is called, we need to ensure the
+ * conversion is complete before fsync returns.
+ * The inode keeps track of a list of completed AIO from DIO path
+ * that might needs to do the conversion. This function walks through
+ * the list and convert the related unwritten extents to written.
+ */
+int flush_aio_dio_completed_IO(struct inode *inode)
+{
+       ext4_io_end_t *io;
+       int ret = 0;
+       int ret2 = 0;
+
+       if (list_empty(&EXT4_I(inode)->i_aio_dio_complete_list))
+               return ret;
+
+       dump_aio_dio_list(inode);
+       while (!list_empty(&EXT4_I(inode)->i_aio_dio_complete_list)){
+               io = list_entry(EXT4_I(inode)->i_aio_dio_complete_list.next,
+                               ext4_io_end_t, list);
+               /*
+                * Calling ext4_end_aio_dio_nolock() to convert completed
+                * IO to written.
+                *
+                * When ext4_sync_file() is called, run_queue() may already
+                * about to flush the work corresponding to this io structure.
+                * It will be upset if it founds the io structure related
+                * to the work-to-be schedule is freed.
+                *
+                * Thus we need to keep the io structure still valid here after
+                * convertion finished. The io structure has a flag to
+                * avoid double converting from both fsync and background work
+                * queue work.
+                */
+               ret = ext4_end_aio_dio_nolock(io);
+               if (ret < 0)
+                       ret2 = ret;
+               else
+                       list_del_init(&io->list);
+       }
+       return (ret2 < 0) ? ret2 : 0;
+}
+
+static ext4_io_end_t *ext4_init_io_end (struct inode *inode)
+{
+       ext4_io_end_t *io = NULL;
+
+       io = kmalloc(sizeof(*io), GFP_NOFS);
+
+       if (io) {
+               igrab(inode);
+               io->inode = inode;
+               io->flag = 0;
+               io->offset = 0;
+               io->size = 0;
+               io->error = 0;
+               INIT_WORK(&io->work, ext4_end_aio_dio_work);
+               INIT_LIST_HEAD(&io->list);
+       }
+
+       return io;
+}
+
+static void ext4_end_io_dio(struct kiocb *iocb, loff_t offset,
+                           ssize_t size, void *private)
+{
+        ext4_io_end_t *io_end = iocb->private;
+       struct workqueue_struct *wq;
+
+       /* if not async direct IO or dio with 0 bytes write, just return */
+       if (!io_end || !size)
+               return;
+
+       ext_debug("ext4_end_io_dio(): io_end 0x%p"
+                 "for inode %lu, iocb 0x%p, offset %llu, size %llu\n",
+                 iocb->private, io_end->inode->i_ino, iocb, offset,
+                 size);
+
+       /* if not aio dio with unwritten extents, just free io and return */
+       if (io_end->flag != DIO_AIO_UNWRITTEN){
+               ext4_free_io_end(io_end);
+               iocb->private = NULL;
+               return;
+       }
+
+       io_end->offset = offset;
+       io_end->size = size;
+       wq = EXT4_SB(io_end->inode->i_sb)->dio_unwritten_wq;
+
+       /* queue the work to convert unwritten extents to written */
+       queue_work(wq, &io_end->work);
+
+       /* Add the io_end to per-inode completed aio dio list*/
+       list_add_tail(&io_end->list,
+                &EXT4_I(io_end->inode)->i_aio_dio_complete_list);
+       iocb->private = NULL;
+}
+/*
+ * For ext4 extent files, ext4 will do direct-io write to holes,
+ * preallocated extents, and those write extend the file, no need to
+ * fall back to buffered IO.
+ *
+ * For holes, we fallocate those blocks, mark them as unintialized
+ * If those blocks were preallocated, we mark sure they are splited, but
+ * still keep the range to write as unintialized.
+ *
+ * The unwrritten extents will be converted to written when DIO is completed.
+ * For async direct IO, since the IO may still pending when return, we
+ * set up an end_io call back function, which will do the convertion
+ * when async direct IO completed.
+ *
+ * If the O_DIRECT write will extend the file then add this inode to the
+ * orphan list.  So recovery will truncate it back to the original size
+ * if the machine crashes during the write.
+ *
+ */
+static ssize_t ext4_ext_direct_IO(int rw, struct kiocb *iocb,
+                             const struct iovec *iov, loff_t offset,
+                             unsigned long nr_segs)
+{
+       struct file *file = iocb->ki_filp;
+       struct inode *inode = file->f_mapping->host;
+       ssize_t ret;
+       size_t count = iov_length(iov, nr_segs);
+
+       loff_t final_size = offset + count;
+       if (rw == WRITE && final_size <= inode->i_size) {
+               /*
+                * We could direct write to holes and fallocate.
+                *
+                * Allocated blocks to fill the hole are marked as uninitialized
+                * to prevent paralel buffered read to expose the stale data
+                * before DIO complete the data IO.
+                *
+                * As to previously fallocated extents, ext4 get_block
+                * will just simply mark the buffer mapped but still
+                * keep the extents uninitialized.
+                *
+                * for non AIO case, we will convert those unwritten extents
+                * to written after return back from blockdev_direct_IO.
+                *
+                * for async DIO, the conversion needs to be defered when
+                * the IO is completed. The ext4 end_io callback function
+                * will be called to take care of the conversion work.
+                * Here for async case, we allocate an io_end structure to
+                * hook to the iocb.
+                */
+               iocb->private = NULL;
+               EXT4_I(inode)->cur_aio_dio = NULL;
+               if (!is_sync_kiocb(iocb)) {
+                       iocb->private = ext4_init_io_end(inode);
+                       if (!iocb->private)
+                               return -ENOMEM;
+                       /*
+                        * we save the io structure for current async
+                        * direct IO, so that later ext4_get_blocks()
+                        * could flag the io structure whether there
+                        * is a unwritten extents needs to be converted
+                        * when IO is completed.
+                        */
+                       EXT4_I(inode)->cur_aio_dio = iocb->private;
+               }
+
+               ret = blockdev_direct_IO(rw, iocb, inode,
+                                        inode->i_sb->s_bdev, iov,
+                                        offset, nr_segs,
+                                        ext4_get_block_dio_write,
+                                        ext4_end_io_dio);
+               if (iocb->private)
+                       EXT4_I(inode)->cur_aio_dio = NULL;
+               /*
+                * The io_end structure takes a reference to the inode,
+                * that structure needs to be destroyed and the
+                * reference to the inode need to be dropped, when IO is
+                * complete, even with 0 byte write, or failed.
+                *
+                * In the successful AIO DIO case, the io_end structure will be
+                * desctroyed and the reference to the inode will be dropped
+                * after the end_io call back function is called.
+                *
+                * In the case there is 0 byte write, or error case, since
+                * VFS direct IO won't invoke the end_io call back function,
+                * we need to free the end_io structure here.
+                */
+               if (ret != -EIOCBQUEUED && ret <= 0 && iocb->private) {
+                       ext4_free_io_end(iocb->private);
+                       iocb->private = NULL;
+               } else if (ret > 0 && (EXT4_I(inode)->i_state &
+                                      EXT4_STATE_DIO_UNWRITTEN)) {
+                       int err;
+                       /*
+                        * for non AIO case, since the IO is already
+                        * completed, we could do the convertion right here
+                        */
+                       err = ext4_convert_unwritten_extents(inode,
+                                                            offset, ret);
+                       if (err < 0)
+                               ret = err;
+                       EXT4_I(inode)->i_state &= ~EXT4_STATE_DIO_UNWRITTEN;
+               }
+               return ret;
+       }
+
+       /* for write the the end of file case, we fall back to old way */
+       return ext4_ind_direct_IO(rw, iocb, iov, offset, nr_segs);
+}
+
+static ssize_t ext4_direct_IO(int rw, struct kiocb *iocb,
+                             const struct iovec *iov, loff_t offset,
+                             unsigned long nr_segs)
+{
+       struct file *file = iocb->ki_filp;
+       struct inode *inode = file->f_mapping->host;
+
+       if (EXT4_I(inode)->i_flags & EXT4_EXTENTS_FL)
+               return ext4_ext_direct_IO(rw, iocb, iov, offset, nr_segs);
+
+       return ext4_ind_direct_IO(rw, iocb, iov, offset, nr_segs);
+}
+
 /*
  * Pages can be marked dirty completely asynchronously from ext4's journalling
  * activity.  By filemap_sync_pte(), try_to_unmap_one(), etc.  We cannot do
@@ -3386,6 +3810,7 @@ static const struct address_space_operations ext4_ordered_aops = {
        .direct_IO              = ext4_direct_IO,
        .migratepage            = buffer_migrate_page,
        .is_partially_uptodate  = block_is_partially_uptodate,
+       .error_remove_page      = generic_error_remove_page,
 };
 
 static const struct address_space_operations ext4_writeback_aops = {
@@ -3401,6 +3826,7 @@ static const struct address_space_operations ext4_writeback_aops = {
        .direct_IO              = ext4_direct_IO,
        .migratepage            = buffer_migrate_page,
        .is_partially_uptodate  = block_is_partially_uptodate,
+       .error_remove_page      = generic_error_remove_page,
 };
 
 static const struct address_space_operations ext4_journalled_aops = {
@@ -3415,6 +3841,7 @@ static const struct address_space_operations ext4_journalled_aops = {
        .invalidatepage         = ext4_invalidatepage,
        .releasepage            = ext4_releasepage,
        .is_partially_uptodate  = block_is_partially_uptodate,
+       .error_remove_page      = generic_error_remove_page,
 };
 
 static const struct address_space_operations ext4_da_aops = {
@@ -3431,6 +3858,7 @@ static const struct address_space_operations ext4_da_aops = {
        .direct_IO              = ext4_direct_IO,
        .migratepage            = buffer_migrate_page,
        .is_partially_uptodate  = block_is_partially_uptodate,
+       .error_remove_page      = generic_error_remove_page,
 };
 
 void ext4_set_aops(struct inode *inode)
@@ -3610,7 +4038,7 @@ static Indirect *ext4_find_shared(struct inode *inode, int depth,
        int k, err;
 
        *top = 0;
-       /* Make k index the deepest non-null offest + 1 */
+       /* Make k index the deepest non-null offset + 1 */
        for (k = depth; k > 1 && !offsets[k-1]; k--)
                ;
        partial = ext4_get_branch(inode, k, offsets, chain, &err);
@@ -3666,6 +4094,11 @@ static void ext4_clear_blocks(handle_t *handle, struct inode *inode,
                              __le32 *last)
 {
        __le32 *p;
+       int     flags = EXT4_FREE_BLOCKS_FORGET;
+
+       if (S_ISDIR(inode->i_mode) || S_ISLNK(inode->i_mode))
+               flags |= EXT4_FREE_BLOCKS_METADATA;
+
        if (try_to_extend_transaction(handle, inode)) {
                if (bh) {
                        BUFFER_TRACE(bh, "call ext4_handle_dirty_metadata");
@@ -3680,27 +4113,10 @@ static void ext4_clear_blocks(handle_t *handle, struct inode *inode,
                }
        }
 
-       /*
-        * Any buffers which are on the journal will be in memory. We
-        * find them on the hash table so jbd2_journal_revoke() will
-        * run jbd2_journal_forget() on them.  We've already detached
-        * each block from the file, so bforget() in
-        * jbd2_journal_forget() should be safe.
-        *
-        * AKPM: turn on bforget in jbd2_journal_forget()!!!
-        */
-       for (p = first; p < last; p++) {
-               u32 nr = le32_to_cpu(*p);
-               if (nr) {
-                       struct buffer_head *tbh;
-
-                       *p = 0;
-                       tbh = sb_find_get_block(inode->i_sb, nr);
-                       ext4_forget(handle, 0, inode, tbh, nr);
-               }
-       }
+       for (p = first; p < last; p++)
+               *p = 0;
 
-       ext4_free_blocks(handle, inode, block_to_free, count, 0);
+       ext4_free_blocks(handle, inode, 0, block_to_free, count, flags);
 }
 
 /**
@@ -3888,7 +4304,8 @@ static void ext4_free_branches(handle_t *handle, struct inode *inode,
                                            blocks_for_truncate(inode));
                        }
 
-                       ext4_free_blocks(handle, inode, nr, 1, 1);
+                       ext4_free_blocks(handle, inode, 0, nr, 1,
+                                        EXT4_FREE_BLOCKS_METADATA);
 
                        if (parent_bh) {
                                /*
@@ -3973,8 +4390,7 @@ void ext4_truncate(struct inode *inode)
        if (!ext4_can_truncate(inode))
                return;
 
-       if (ei->i_disksize && inode->i_size == 0 &&
-           !test_opt(inode->i_sb, NO_AUTO_DA_ALLOC))
+       if (inode->i_size == 0 && !test_opt(inode->i_sb, NO_AUTO_DA_ALLOC))
                ei->i_state |= EXT4_STATE_DA_ALLOC_CLOSE;
 
        if (EXT4_I(inode)->i_flags & EXT4_EXTENTS_FL) {
@@ -4328,8 +4744,8 @@ struct inode *ext4_iget(struct super_block *sb, unsigned long ino)
        struct ext4_iloc iloc;
        struct ext4_inode *raw_inode;
        struct ext4_inode_info *ei;
-       struct buffer_head *bh;
        struct inode *inode;
+       journal_t *journal = EXT4_SB(sb)->s_journal;
        long ret;
        int block;
 
@@ -4340,11 +4756,11 @@ struct inode *ext4_iget(struct super_block *sb, unsigned long ino)
                return inode;
 
        ei = EXT4_I(inode);
+       iloc.bh = 0;
 
        ret = __ext4_get_inode_loc(inode, &iloc, 0);
        if (ret < 0)
                goto bad_inode;
-       bh = iloc.bh;
        raw_inode = ext4_raw_inode(&iloc);
        inode->i_mode = le16_to_cpu(raw_inode->i_mode);
        inode->i_uid = (uid_t)le16_to_cpu(raw_inode->i_uid_low);
@@ -4367,7 +4783,6 @@ struct inode *ext4_iget(struct super_block *sb, unsigned long ino)
                if (inode->i_mode == 0 ||
                    !(EXT4_SB(inode->i_sb)->s_mount_state & EXT4_ORPHAN_FS)) {
                        /* this inode is deleted */
-                       brelse(bh);
                        ret = -ESTALE;
                        goto bad_inode;
                }
@@ -4384,6 +4799,9 @@ struct inode *ext4_iget(struct super_block *sb, unsigned long ino)
                        ((__u64)le16_to_cpu(raw_inode->i_file_acl_high)) << 32;
        inode->i_size = ext4_isize(raw_inode);
        ei->i_disksize = inode->i_size;
+#ifdef CONFIG_QUOTA
+       ei->i_reserved_quota = 0;
+#endif
        inode->i_generation = le32_to_cpu(raw_inode->i_generation);
        ei->i_block_group = iloc.block_group;
        ei->i_last_alloc_group = ~0;
@@ -4395,11 +4813,35 @@ struct inode *ext4_iget(struct super_block *sb, unsigned long ino)
                ei->i_data[block] = raw_inode->i_block[block];
        INIT_LIST_HEAD(&ei->i_orphan);
 
+       /*
+        * Set transaction id's of transactions that have to be committed
+        * to finish f[data]sync. We set them to currently running transaction
+        * as we cannot be sure that the inode or some of its metadata isn't
+        * part of the transaction - the inode could have been reclaimed and
+        * now it is reread from disk.
+        */
+       if (journal) {
+               transaction_t *transaction;
+               tid_t tid;
+
+               spin_lock(&journal->j_state_lock);
+               if (journal->j_running_transaction)
+                       transaction = journal->j_running_transaction;
+               else
+                       transaction = journal->j_committing_transaction;
+               if (transaction)
+                       tid = transaction->t_tid;
+               else
+                       tid = journal->j_commit_sequence;
+               spin_unlock(&journal->j_state_lock);
+               ei->i_sync_tid = tid;
+               ei->i_datasync_tid = tid;
+       }
+
        if (EXT4_INODE_SIZE(inode->i_sb) > EXT4_GOOD_OLD_INODE_SIZE) {
                ei->i_extra_isize = le16_to_cpu(raw_inode->i_extra_isize);
                if (EXT4_GOOD_OLD_INODE_SIZE + ei->i_extra_isize >
                    EXT4_INODE_SIZE(inode->i_sb)) {
-                       brelse(bh);
                        ret = -EIO;
                        goto bad_inode;
                }
@@ -4431,10 +4873,7 @@ struct inode *ext4_iget(struct super_block *sb, unsigned long ino)
 
        ret = 0;
        if (ei->i_file_acl &&
-           ((ei->i_file_acl <
-             (le32_to_cpu(EXT4_SB(sb)->s_es->s_first_data_block) +
-              EXT4_SB(sb)->s_gdb_count)) ||
-            (ei->i_file_acl >= ext4_blocks_count(EXT4_SB(sb)->s_es)))) {
+           !ext4_data_block_valid(EXT4_SB(sb), ei->i_file_acl, 1)) {
                ext4_error(sb, __func__,
                           "bad extended attribute block %llu in inode #%lu",
                           ei->i_file_acl, inode->i_ino);
@@ -4452,10 +4891,8 @@ struct inode *ext4_iget(struct super_block *sb, unsigned long ino)
                /* Validate block references which are part of inode */
                ret = ext4_check_inode_blockref(inode);
        }
-       if (ret) {
-               brelse(bh);
+       if (ret)
                goto bad_inode;
-       }
 
        if (S_ISREG(inode->i_mode)) {
                inode->i_op = &ext4_file_inode_operations;
@@ -4483,7 +4920,6 @@ struct inode *ext4_iget(struct super_block *sb, unsigned long ino)
                        init_special_inode(inode, inode->i_mode,
                           new_decode_dev(le32_to_cpu(raw_inode->i_block[1])));
        } else {
-               brelse(bh);
                ret = -EIO;
                ext4_error(inode->i_sb, __func__,
                           "bogus i_mode (%o) for inode=%lu",
@@ -4496,6 +4932,7 @@ struct inode *ext4_iget(struct super_block *sb, unsigned long ino)
        return inode;
 
 bad_inode:
+       brelse(iloc.bh);
        iget_failed(inode);
        return ERR_PTR(ret);
 }
@@ -4548,8 +4985,7 @@ static int ext4_inode_blocks_set(handle_t *handle,
  */
 static int ext4_do_update_inode(handle_t *handle,
                                struct inode *inode,
-                               struct ext4_iloc *iloc,
-                               int do_sync)
+                               struct ext4_iloc *iloc)
 {
        struct ext4_inode *raw_inode = ext4_raw_inode(iloc);
        struct ext4_inode_info *ei = EXT4_I(inode);
@@ -4650,24 +5086,13 @@ static int ext4_do_update_inode(handle_t *handle,
                raw_inode->i_extra_isize = cpu_to_le16(ei->i_extra_isize);
        }
 
-       /*
-        * If we're not using a journal and we were called from
-        * ext4_write_inode() to sync the inode (making do_sync true),
-        * we can just use sync_dirty_buffer() directly to do our dirty
-        * work.  Testing s_journal here is a bit redundant but it's
-        * worth it to avoid potential future trouble.
-        */
-       if (EXT4_SB(inode->i_sb)->s_journal == NULL && do_sync) {
-               BUFFER_TRACE(bh, "call sync_dirty_buffer");
-               sync_dirty_buffer(bh);
-       } else {
-               BUFFER_TRACE(bh, "call ext4_handle_dirty_metadata");
-               rc = ext4_handle_dirty_metadata(handle, inode, bh);
-               if (!err)
-                       err = rc;
-       }
+       BUFFER_TRACE(bh, "call ext4_handle_dirty_metadata");
+       rc = ext4_handle_dirty_metadata(handle, inode, bh);
+       if (!err)
+               err = rc;
        ei->i_state &= ~EXT4_STATE_NEW;
 
+       ext4_update_inode_fsync_trans(handle, inode, 0);
 out_brelse:
        brelse(bh);
        ext4_std_error(inode->i_sb, err);
@@ -4733,8 +5158,16 @@ int ext4_write_inode(struct inode *inode, int wait)
                err = ext4_get_inode_loc(inode, &iloc);
                if (err)
                        return err;
-               err = ext4_do_update_inode(EXT4_NOJOURNAL_HANDLE,
-                                          inode, &iloc, wait);
+               if (wait)
+                       sync_dirty_buffer(iloc.bh);
+               if (buffer_req(iloc.bh) && !buffer_uptodate(iloc.bh)) {
+                       ext4_error(inode->i_sb, __func__,
+                                  "IO error syncing inode, "
+                                  "inode=%lu, block=%llu",
+                                  inode->i_ino,
+                                  (unsigned long long)iloc.bh->b_blocknr);
+                       err = -EIO;
+               }
        }
        return err;
 }
@@ -4779,8 +5212,8 @@ int ext4_setattr(struct dentry *dentry, struct iattr *attr)
 
                /* (user+group)*(old+new) structure, inode write (sb,
                 * inode block, ? - but truncate inode update has it) */
-               handle = ext4_journal_start(inode, 2*(EXT4_QUOTA_INIT_BLOCKS(inode->i_sb)+
-                                       EXT4_QUOTA_DEL_BLOCKS(inode->i_sb))+3);
+               handle = ext4_journal_start(inode, (EXT4_MAXQUOTAS_INIT_BLOCKS(inode->i_sb)+
+                                       EXT4_MAXQUOTAS_DEL_BLOCKS(inode->i_sb))+3);
                if (IS_ERR(handle)) {
                        error = PTR_ERR(handle);
                        goto err_out;
@@ -4928,7 +5361,7 @@ static int ext4_index_trans_blocks(struct inode *inode, int nrblocks, int chunk)
  * worse case, the indexs blocks spread over different block groups
  *
  * If datablocks are discontiguous, they are possible to spread over
- * different block groups too. If they are contiugous, with flexbg,
+ * different block groups too. If they are contiuguous, with flexbg,
  * they could still across block group boundary.
  *
  * Also account for superblock, inode, quota and xattr blocks
@@ -5004,7 +5437,7 @@ int ext4_writepage_trans_blocks(struct inode *inode)
  * Calculate the journal credits for a chunk of data modification.
  *
  * This is called from DIO, fallocate or whoever calling
- * ext4_get_blocks() to map/allocate a chunk of contigous disk blocks.
+ * ext4_get_blocks() to map/allocate a chunk of contiguous disk blocks.
  *
  * journal buffers for data blocks are not included here, as DIO
  * and fallocate do no need to journal data buffers.
@@ -5030,7 +5463,7 @@ int ext4_mark_iloc_dirty(handle_t *handle,
        get_bh(iloc->bh);
 
        /* ext4_do_update_inode() does jbd2_journal_dirty_metadata */
-       err = ext4_do_update_inode(handle, inode, iloc, 0);
+       err = ext4_do_update_inode(handle, inode, iloc);
        put_bh(iloc->bh);
        return err;
 }
@@ -5174,27 +5607,14 @@ int ext4_mark_inode_dirty(handle_t *handle, struct inode *inode)
  */
 void ext4_dirty_inode(struct inode *inode)
 {
-       handle_t *current_handle = ext4_journal_current_handle();
        handle_t *handle;
 
-       if (!ext4_handle_valid(current_handle)) {
-               ext4_mark_inode_dirty(current_handle, inode);
-               return;
-       }
-
        handle = ext4_journal_start(inode, 2);
        if (IS_ERR(handle))
                goto out;
-       if (current_handle &&
-               current_handle->h_transaction != handle->h_transaction) {
-               /* This task has a transaction open against a different fs */
-               printk(KERN_EMERG "%s: transactions do not match!\n",
-                      __func__);
-       } else {
-               jbd_debug(5, "marking dirty.  outer handle=%p\n",
-                               current_handle);
-               ext4_mark_inode_dirty(handle, inode);
-       }
+
+       ext4_mark_inode_dirty(handle, inode);
+
        ext4_journal_stop(handle);
 out:
        return;