dquot: move dquot initialization responsibility into the filesystem
[safe/jmp/linux-2.6] / fs / ext3 / inode.c
index ea58886..d7962b0 100644 (file)
  *  Copyright (C) 1991, 1992  Linus Torvalds
  *
  *  Goal-directed block allocation by Stephen Tweedie
- *     (sct@redhat.com), 1993, 1998
+ *     (sct@redhat.com), 1993, 1998
  *  Big-endian to little-endian byte-swapping/bitmaps by
  *        David S. Miller (davem@caip.rutgers.edu), 1995
  *  64-bit file support on 64-bit platforms by Jakub Jelinek
- *     (jj@sunsite.ms.mff.cuni.cz)
+ *     (jj@sunsite.ms.mff.cuni.cz)
  *
  *  Assorted race fixes, rewrite of ext3_get_block() by Al Viro, 2000
  */
@@ -27,7 +27,6 @@
 #include <linux/time.h>
 #include <linux/ext3_jbd.h>
 #include <linux/jbd.h>
-#include <linux/smp_lock.h>
 #include <linux/highuid.h>
 #include <linux/pagemap.h>
 #include <linux/quotaops.h>
@@ -36,6 +35,9 @@
 #include <linux/writeback.h>
 #include <linux/mpage.h>
 #include <linux/uio.h>
+#include <linux/bio.h>
+#include <linux/fiemap.h>
+#include <linux/namei.h>
 #include "xattr.h"
 #include "acl.h"
 
@@ -44,27 +46,25 @@ static int ext3_writepage_trans_blocks(struct inode *inode);
 /*
  * Test whether an inode is a fast symlink.
  */
-static inline int ext3_inode_is_fast_symlink(struct inode *inode)
+static int ext3_inode_is_fast_symlink(struct inode *inode)
 {
        int ea_blocks = EXT3_I(inode)->i_file_acl ?
                (inode->i_sb->s_blocksize >> 9) : 0;
 
-       return (S_ISLNK(inode->i_mode) &&
-               inode->i_blocks - ea_blocks == 0);
+       return (S_ISLNK(inode->i_mode) && inode->i_blocks - ea_blocks == 0);
 }
 
-/* The ext3 forget function must perform a revoke if we are freeing data
+/*
+ * The ext3 forget function must perform a revoke if we are freeing data
  * which has been journaled.  Metadata (eg. indirect blocks) must be
- * revoked in all cases. 
+ * revoked in all cases.
  *
  * "bh" may be NULL: a metadata block may have been freed from memory
  * but there may still be a record of it in the journal, and that record
  * still needs to be revoked.
  */
-
-int ext3_forget(handle_t *handle, int is_metadata,
-                      struct inode *inode, struct buffer_head *bh,
-                      int blocknr)
+int ext3_forget(handle_t *handle, int is_metadata, struct inode *inode,
+                       struct buffer_head *bh, ext3_fsblk_t blocknr)
 {
        int err;
 
@@ -97,18 +97,17 @@ int ext3_forget(handle_t *handle, int is_metadata,
        BUFFER_TRACE(bh, "call ext3_journal_revoke");
        err = ext3_journal_revoke(handle, blocknr, bh);
        if (err)
-               ext3_abort(inode->i_sb, __FUNCTION__,
+               ext3_abort(inode->i_sb, __func__,
                           "error %d when attempting revoke", err);
        BUFFER_TRACE(bh, "exit");
        return err;
 }
 
 /*
- * Work out how many blocks we need to progress with the next chunk of a
+ * Work out how many blocks we need to proceed with the next chunk of a
  * truncate transaction.
  */
-
-static unsigned long blocks_for_truncate(struct inode *inode) 
+static unsigned long blocks_for_truncate(struct inode *inode)
 {
        unsigned long needed;
 
@@ -125,13 +124,13 @@ static unsigned long blocks_for_truncate(struct inode *inode)
 
        /* But we need to bound the transaction so we don't overflow the
         * journal. */
-       if (needed > EXT3_MAX_TRANS_DATA) 
+       if (needed > EXT3_MAX_TRANS_DATA)
                needed = EXT3_MAX_TRANS_DATA;
 
-       return EXT3_DATA_TRANS_BLOCKS + needed;
+       return EXT3_DATA_TRANS_BLOCKS(inode->i_sb) + needed;
 }
 
-/* 
+/*
  * Truncate transactions can be complex and absolutely huge.  So we need to
  * be able to restart the transaction at a conventient checkpoint to make
  * sure we don't overflow the journal.
@@ -139,10 +138,9 @@ static unsigned long blocks_for_truncate(struct inode *inode)
  * start_transaction gets us a new handle for a truncate transaction,
  * and extend_transaction tries to extend the existing one a bit.  If
  * extend fails, we need to propagate the failure up and restart the
- * transaction in the top-level truncate loop. --sct 
+ * transaction in the top-level truncate loop. --sct
  */
-
-static handle_t *start_transaction(struct inode *inode) 
+static handle_t *start_transaction(struct inode *inode)
 {
        handle_t *result;
 
@@ -174,10 +172,21 @@ static int try_to_extend_transaction(handle_t *handle, struct inode *inode)
  * so before we call here everything must be consistently dirtied against
  * this transaction.
  */
-static int ext3_journal_test_restart(handle_t *handle, struct inode *inode)
+static int truncate_restart_transaction(handle_t *handle, struct inode *inode)
 {
+       int ret;
+
        jbd_debug(2, "restarting handle %p\n", handle);
-       return ext3_journal_restart(handle, blocks_for_truncate(inode));
+       /*
+        * Drop truncate_mutex to avoid deadlock with ext3_get_blocks_handle
+        * At this moment, get_block can be called only for blocks inside
+        * i_size since page cache has been already dropped and writes are
+        * blocked by i_mutex. So we can safely drop the truncate_mutex.
+        */
+       mutex_unlock(&EXT3_I(inode)->truncate_mutex);
+       ret = ext3_journal_restart(handle, blocks_for_truncate(inode));
+       mutex_lock(&EXT3_I(inode)->truncate_mutex);
+       return ret;
 }
 
 /*
@@ -187,14 +196,21 @@ void ext3_delete_inode (struct inode * inode)
 {
        handle_t *handle;
 
+       if (!is_bad_inode(inode))
+               vfs_dq_init(inode);
+
+       truncate_inode_pages(&inode->i_data, 0);
+
        if (is_bad_inode(inode))
                goto no_delete;
 
        handle = start_transaction(inode);
        if (IS_ERR(handle)) {
-               /* If we're going to skip the normal cleanup, we still
-                * need to make sure that the in-core orphan linked list
-                * is properly cleaned up. */
+               /*
+                * If we're going to skip the normal cleanup, we still need to
+                * make sure that the in-core orphan linked list is properly
+                * cleaned up.
+                */
                ext3_orphan_del(NULL, inode);
                goto no_delete;
        }
@@ -215,12 +231,12 @@ void ext3_delete_inode (struct inode * inode)
        ext3_orphan_del(handle, inode);
        EXT3_I(inode)->i_dtime  = get_seconds();
 
-       /* 
+       /*
         * One subtle ordering requirement: if anything has gone wrong
         * (transaction abort, IO errors, whatever), then we can still
         * do these next steps (the fs will already have been marked as
         * having errors), but we can't free the inode if the mark_dirty
-        * fails.  
+        * fails.
         */
        if (ext3_mark_inode_dirty(handle, inode))
                /* If that failed, just do the required in-core inode clear. */
@@ -233,16 +249,6 @@ no_delete:
        clear_inode(inode);     /* We must guarantee clearing of inode... */
 }
 
-static int ext3_alloc_block (handle_t *handle,
-                       struct inode * inode, unsigned long goal, int *err)
-{
-       unsigned long result;
-
-       result = ext3_new_block(handle, inode, goal, err);
-       return result;
-}
-
-
 typedef struct {
        __le32  *p;
        __le32  key;
@@ -255,7 +261,7 @@ static inline void add_chain(Indirect *p, struct buffer_head *bh, __le32 *v)
        p->bh = bh;
 }
 
-static inline int verify_chain(Indirect *from, Indirect *to)
+static int verify_chain(Indirect *from, Indirect *to)
 {
        while (from <= to && from->key == *from->p)
                from++;
@@ -325,10 +331,10 @@ static int ext3_block_to_path(struct inode *inode,
                offsets[n++] = i_block & (ptrs - 1);
                final = ptrs;
        } else {
-               ext3_warning (inode->i_sb, "ext3_block_to_path", "block > big");
+               ext3_warning(inode->i_sb, "ext3_block_to_path", "block > big");
        }
        if (boundary)
-               *boundary = (i_block & (ptrs - 1)) == (final - 1);
+               *boundary = final - 1 - (i_block & (ptrs - 1));
        return n;
 }
 
@@ -402,13 +408,13 @@ no_block:
  *     @inode: owner
  *     @ind: descriptor of indirect block.
  *
- *     This function returns the prefered place for block allocation.
+ *     This function returns the preferred place for block allocation.
  *     It is used when heuristic for sequential allocation fails.
  *     Rules are:
  *       + if there is a block to the left of our position - allocate near it.
  *       + if pointer will live in indirect block - allocate near that block.
  *       + if pointer will live in inode - allocate in the same
- *         cylinder group. 
+ *         cylinder group.
  *
  * In the latter case we colour the starting block by the callers PID to
  * prevent it from clashing with concurrent allocations for a different inode
@@ -417,51 +423,50 @@ no_block:
  *
  *     Caller must make sure that @ind is valid and will stay that way.
  */
-
-static unsigned long ext3_find_near(struct inode *inode, Indirect *ind)
+static ext3_fsblk_t ext3_find_near(struct inode *inode, Indirect *ind)
 {
        struct ext3_inode_info *ei = EXT3_I(inode);
        __le32 *start = ind->bh ? (__le32*) ind->bh->b_data : ei->i_data;
        __le32 *p;
-       unsigned long bg_start;
-       unsigned long colour;
+       ext3_fsblk_t bg_start;
+       ext3_grpblk_t colour;
 
        /* Try to find previous block */
-       for (p = ind->p - 1; p >= start; p--)
+       for (p = ind->p - 1; p >= start; p--) {
                if (*p)
                        return le32_to_cpu(*p);
+       }
 
        /* No such thing, so let's try location of indirect block */
        if (ind->bh)
                return ind->bh->b_blocknr;
 
        /*
-        * It is going to be refered from inode itself? OK, just put it into
-        * the same cylinder group then.
+        * It is going to be referred to from the inode itself? OK, just put it
+        * into the same cylinder group then.
         */
-       bg_start = (ei->i_block_group * EXT3_BLOCKS_PER_GROUP(inode->i_sb)) +
-               le32_to_cpu(EXT3_SB(inode->i_sb)->s_es->s_first_data_block);
+       bg_start = ext3_group_first_block_no(inode->i_sb, ei->i_block_group);
        colour = (current->pid % 16) *
                        (EXT3_BLOCKS_PER_GROUP(inode->i_sb) / 16);
        return bg_start + colour;
 }
 
 /**
- *     ext3_find_goal - find a prefered place for allocation.
+ *     ext3_find_goal - find a preferred place for allocation.
  *     @inode: owner
  *     @block:  block we want
- *     @chain:  chain of indirect blocks
  *     @partial: pointer to the last triple within a chain
- *     @goal:  place to store the result.
  *
- *     Normally this function find the prefered place for block allocation,
- *     stores it in *@goal and returns zero.
+ *     Normally this function find the preferred place for block allocation,
+ *     returns it.
  */
 
-static unsigned long ext3_find_goal(struct inode *inode, long block,
-               Indirect chain[4], Indirect *partial)
+static ext3_fsblk_t ext3_find_goal(struct inode *inode, long block,
+                                  Indirect *partial)
 {
-       struct ext3_block_alloc_info *block_i =  EXT3_I(inode)->i_block_alloc_info;
+       struct ext3_block_alloc_info *block_i;
+
+       block_i =  EXT3_I(inode)->i_block_alloc_info;
 
        /*
         * try the heuristic for sequential allocation,
@@ -476,20 +481,120 @@ static unsigned long ext3_find_goal(struct inode *inode, long block,
 }
 
 /**
+ *     ext3_blks_to_allocate: Look up the block map and count the number
+ *     of direct blocks need to be allocated for the given branch.
+ *
+ *     @branch: chain of indirect blocks
+ *     @k: number of blocks need for indirect blocks
+ *     @blks: number of data blocks to be mapped.
+ *     @blocks_to_boundary:  the offset in the indirect block
+ *
+ *     return the total number of blocks to be allocate, including the
+ *     direct and indirect blocks.
+ */
+static int ext3_blks_to_allocate(Indirect *branch, int k, unsigned long blks,
+               int blocks_to_boundary)
+{
+       unsigned long count = 0;
+
+       /*
+        * Simple case, [t,d]Indirect block(s) has not allocated yet
+        * then it's clear blocks on that path have not allocated
+        */
+       if (k > 0) {
+               /* right now we don't handle cross boundary allocation */
+               if (blks < blocks_to_boundary + 1)
+                       count += blks;
+               else
+                       count += blocks_to_boundary + 1;
+               return count;
+       }
+
+       count++;
+       while (count < blks && count <= blocks_to_boundary &&
+               le32_to_cpu(*(branch[0].p + count)) == 0) {
+               count++;
+       }
+       return count;
+}
+
+/**
+ *     ext3_alloc_blocks: multiple allocate blocks needed for a branch
+ *     @indirect_blks: the number of blocks need to allocate for indirect
+ *                     blocks
+ *
+ *     @new_blocks: on return it will store the new block numbers for
+ *     the indirect blocks(if needed) and the first direct block,
+ *     @blks:  on return it will store the total number of allocated
+ *             direct blocks
+ */
+static int ext3_alloc_blocks(handle_t *handle, struct inode *inode,
+                       ext3_fsblk_t goal, int indirect_blks, int blks,
+                       ext3_fsblk_t new_blocks[4], int *err)
+{
+       int target, i;
+       unsigned long count = 0;
+       int index = 0;
+       ext3_fsblk_t current_block = 0;
+       int ret = 0;
+
+       /*
+        * Here we try to allocate the requested multiple blocks at once,
+        * on a best-effort basis.
+        * To build a branch, we should allocate blocks for
+        * the indirect blocks(if not allocated yet), and at least
+        * the first direct block of this branch.  That's the
+        * minimum number of blocks need to allocate(required)
+        */
+       target = blks + indirect_blks;
+
+       while (1) {
+               count = target;
+               /* allocating blocks for indirect blocks and direct blocks */
+               current_block = ext3_new_blocks(handle,inode,goal,&count,err);
+               if (*err)
+                       goto failed_out;
+
+               target -= count;
+               /* allocate blocks for indirect blocks */
+               while (index < indirect_blks && count) {
+                       new_blocks[index++] = current_block++;
+                       count--;
+               }
+
+               if (count > 0)
+                       break;
+       }
+
+       /* save the new block number for the first direct block */
+       new_blocks[index] = current_block;
+
+       /* total number of blocks allocated for direct blocks */
+       ret = count;
+       *err = 0;
+       return ret;
+failed_out:
+       for (i = 0; i <index; i++)
+               ext3_free_blocks(handle, inode, new_blocks[i], 1);
+       return ret;
+}
+
+/**
  *     ext3_alloc_branch - allocate and set up a chain of blocks.
  *     @inode: owner
- *     @num: depth of the chain (number of blocks to allocate)
+ *     @indirect_blks: number of allocated indirect blocks
+ *     @blks: number of allocated direct blocks
  *     @offsets: offsets (in the blocks) to store the pointers to next.
  *     @branch: place to store the chain in.
  *
- *     This function allocates @num blocks, zeroes out all but the last one,
+ *     This function allocates blocks, zeroes out all but the last one,
  *     links them into chain and (if we are synchronous) writes them to disk.
  *     In other words, it prepares a branch that can be spliced onto the
  *     inode. It stores the information about that chain in the branch[], in
  *     the same format as ext3_get_branch() would do. We are calling it after
  *     we had read the existing part of chain and partial points to the last
  *     triple of that (one with zero ->key). Upon the exit we have the same
- *     picture as after the successful ext3_get_block(), excpet that in one
+ *     picture as after the successful ext3_get_block(), except that in one
  *     place chain is disconnected - *branch->p is still zero (we did not
  *     set the last link), but branch->key contains the number that should
  *     be placed into *branch->p to fill that gap.
@@ -499,95 +604,107 @@ static unsigned long ext3_find_goal(struct inode *inode, long block,
  *     ext3_alloc_block() (normally -ENOSPC). Otherwise we set the chain
  *     as described above and return 0.
  */
-
 static int ext3_alloc_branch(handle_t *handle, struct inode *inode,
-                            int num,
-                            unsigned long goal,
-                            int *offsets,
-                            Indirect *branch)
+                       int indirect_blks, int *blks, ext3_fsblk_t goal,
+                       int *offsets, Indirect *branch)
 {
        int blocksize = inode->i_sb->s_blocksize;
-       int n = 0, keys = 0;
+       int i, n = 0;
        int err = 0;
-       int i;
-       int parent = ext3_alloc_block(handle, inode, goal, &err);
-
-       branch[0].key = cpu_to_le32(parent);
-       if (parent) {
-               for (n = 1; n < num; n++) {
-                       struct buffer_head *bh;
-                       /* Allocate the next block */
-                       int nr = ext3_alloc_block(handle, inode, parent, &err);
-                       if (!nr)
-                               break;
-                       branch[n].key = cpu_to_le32(nr);
-                       keys = n+1;
+       struct buffer_head *bh;
+       int num;
+       ext3_fsblk_t new_blocks[4];
+       ext3_fsblk_t current_block;
 
-                       /*
-                        * Get buffer_head for parent block, zero it out
-                        * and set the pointer to new one, then send
-                        * parent to disk.  
-                        */
-                       bh = sb_getblk(inode->i_sb, parent);
-                       branch[n].bh = bh;
-                       lock_buffer(bh);
-                       BUFFER_TRACE(bh, "call get_create_access");
-                       err = ext3_journal_get_create_access(handle, bh);
-                       if (err) {
-                               unlock_buffer(bh);
-                               brelse(bh);
-                               break;
-                       }
+       num = ext3_alloc_blocks(handle, inode, goal, indirect_blks,
+                               *blks, new_blocks, &err);
+       if (err)
+               return err;
 
-                       memset(bh->b_data, 0, blocksize);
-                       branch[n].p = (__le32*) bh->b_data + offsets[n];
-                       *branch[n].p = branch[n].key;
-                       BUFFER_TRACE(bh, "marking uptodate");
-                       set_buffer_uptodate(bh);
+       branch[0].key = cpu_to_le32(new_blocks[0]);
+       /*
+        * metadata blocks and data blocks are allocated.
+        */
+       for (n = 1; n <= indirect_blks;  n++) {
+               /*
+                * Get buffer_head for parent block, zero it out
+                * and set the pointer to new one, then send
+                * parent to disk.
+                */
+               bh = sb_getblk(inode->i_sb, new_blocks[n-1]);
+               branch[n].bh = bh;
+               lock_buffer(bh);
+               BUFFER_TRACE(bh, "call get_create_access");
+               err = ext3_journal_get_create_access(handle, bh);
+               if (err) {
                        unlock_buffer(bh);
+                       brelse(bh);
+                       goto failed;
+               }
 
-                       BUFFER_TRACE(bh, "call ext3_journal_dirty_metadata");
-                       err = ext3_journal_dirty_metadata(handle, bh);
-                       if (err)
-                               break;
-
-                       parent = nr;
+               memset(bh->b_data, 0, blocksize);
+               branch[n].p = (__le32 *) bh->b_data + offsets[n];
+               branch[n].key = cpu_to_le32(new_blocks[n]);
+               *branch[n].p = branch[n].key;
+               if ( n == indirect_blks) {
+                       current_block = new_blocks[n];
+                       /*
+                        * End of chain, update the last new metablock of
+                        * the chain to point to the new allocated
+                        * data blocks numbers
+                        */
+                       for (i=1; i < num; i++)
+                               *(branch[n].p + i) = cpu_to_le32(++current_block);
                }
-       }
-       if (n == num)
-               return 0;
+               BUFFER_TRACE(bh, "marking uptodate");
+               set_buffer_uptodate(bh);
+               unlock_buffer(bh);
 
+               BUFFER_TRACE(bh, "call ext3_journal_dirty_metadata");
+               err = ext3_journal_dirty_metadata(handle, bh);
+               if (err)
+                       goto failed;
+       }
+       *blks = num;
+       return err;
+failed:
        /* Allocation failed, free what we already allocated */
-       for (i = 1; i < keys; i++) {
+       for (i = 1; i <= n ; i++) {
                BUFFER_TRACE(branch[i].bh, "call journal_forget");
                ext3_journal_forget(handle, branch[i].bh);
        }
-       for (i = 0; i < keys; i++)
-               ext3_free_blocks(handle, inode, le32_to_cpu(branch[i].key), 1);
+       for (i = 0; i <indirect_blks; i++)
+               ext3_free_blocks(handle, inode, new_blocks[i], 1);
+
+       ext3_free_blocks(handle, inode, new_blocks[i], num);
+
        return err;
 }
 
 /**
- *     ext3_splice_branch - splice the allocated branch onto inode.
- *     @inode: owner
- *     @block: (logical) number of block we are adding
- *     @chain: chain of indirect blocks (with a missing link - see
- *             ext3_alloc_branch)
- *     @where: location of missing link
- *     @num:   number of blocks we are adding
- *
- *     This function fills the missing link and does all housekeeping needed in
- *     inode (->i_blocks, etc.). In case of success we end up with the full
- *     chain to new block and return 0.
+ * ext3_splice_branch - splice the allocated branch onto inode.
+ * @inode: owner
+ * @block: (logical) number of block we are adding
+ * @chain: chain of indirect blocks (with a missing link - see
+ *     ext3_alloc_branch)
+ * @where: location of missing link
+ * @num:   number of indirect blocks we are adding
+ * @blks:  number of direct blocks we are adding
+ *
+ * This function fills the missing link and does all housekeeping needed in
+ * inode (->i_blocks, etc.). In case of success we end up with the full
+ * chain to new block and return 0.
  */
-
-static int ext3_splice_branch(handle_t *handle, struct inode *inode, long block,
-                             Indirect chain[4], Indirect *where, int num)
+static int ext3_splice_branch(handle_t *handle, struct inode *inode,
+                       long block, Indirect *where, int num, int blks)
 {
        int i;
        int err = 0;
-       struct ext3_block_alloc_info *block_i = EXT3_I(inode)->i_block_alloc_info;
+       struct ext3_block_alloc_info *block_i;
+       ext3_fsblk_t current_block;
+       struct ext3_inode_info *ei = EXT3_I(inode);
 
+       block_i = ei->i_block_alloc_info;
        /*
         * If we're splicing into a [td]indirect block (as opposed to the
         * inode) then we need to get write access to the [td]indirect block
@@ -604,24 +721,37 @@ static int ext3_splice_branch(handle_t *handle, struct inode *inode, long block,
        *where->p = where->key;
 
        /*
+        * Update the host buffer_head or inode to point to more just allocated
+        * direct blocks blocks
+        */
+       if (num == 0 && blks > 1) {
+               current_block = le32_to_cpu(where->key) + 1;
+               for (i = 1; i < blks; i++)
+                       *(where->p + i ) = cpu_to_le32(current_block++);
+       }
+
+       /*
         * update the most recently allocated logical & physical block
         * in i_block_alloc_info, to assist find the proper goal block for next
         * allocation
         */
        if (block_i) {
-               block_i->last_alloc_logical_block = block;
-               block_i->last_alloc_physical_block = le32_to_cpu(where[num-1].key);
+               block_i->last_alloc_logical_block = block + blks - 1;
+               block_i->last_alloc_physical_block =
+                               le32_to_cpu(where[num].key) + blks - 1;
        }
 
        /* We are done with atomic stuff, now do the rest of housekeeping */
 
        inode->i_ctime = CURRENT_TIME_SEC;
        ext3_mark_inode_dirty(handle, inode);
+       /* ext3_mark_inode_dirty already updated i_sync_tid */
+       atomic_set(&ei->i_datasync_tid, handle->h_transaction->t_tid);
 
        /* had we spliced it onto indirect block? */
        if (where->bh) {
                /*
-                * akpm: If we spliced it onto an indirect block, we haven't
+                * If we spliced it onto an indirect block, we haven't
                 * altered the inode.  Note however that if it is being spliced
                 * onto an indirect block at the very end of the file (the
                 * file is growing) then we *will* alter the inode to reflect
@@ -631,7 +761,7 @@ static int ext3_splice_branch(handle_t *handle, struct inode *inode, long block,
                jbd_debug(5, "splicing indirect only\n");
                BUFFER_TRACE(where->bh, "call ext3_journal_dirty_metadata");
                err = ext3_journal_dirty_metadata(handle, where->bh);
-               if (err) 
+               if (err)
                        goto err_out;
        } else {
                /*
@@ -643,10 +773,13 @@ static int ext3_splice_branch(handle_t *handle, struct inode *inode, long block,
        return err;
 
 err_out:
-       for (i = 1; i < num; i++) {
+       for (i = 1; i <= num; i++) {
                BUFFER_TRACE(where[i].bh, "call journal_forget");
                ext3_journal_forget(handle, where[i].bh);
+               ext3_free_blocks(handle,inode,le32_to_cpu(where[i-1].key),1);
        }
+       ext3_free_blocks(handle, inode, le32_to_cpu(where[num].key), blks);
+
        return err;
 }
 
@@ -662,26 +795,33 @@ err_out:
  * allocations is needed - we simply release blocks and do not touch anything
  * reachable from inode.
  *
- * akpm: `handle' can be NULL if create == 0.
+ * `handle' can be NULL if create == 0.
  *
  * The BKL may not be held on entry here.  Be sure to take it early.
+ * return > 0, # of blocks mapped or allocated.
+ * return = 0, if plain lookup failed.
+ * return < 0, error case.
  */
-
-static int
-ext3_get_block_handle(handle_t *handle, struct inode *inode, sector_t iblock,
-               struct buffer_head *bh_result, int create, int extend_disksize)
+int ext3_get_blocks_handle(handle_t *handle, struct inode *inode,
+               sector_t iblock, unsigned long maxblocks,
+               struct buffer_head *bh_result,
+               int create)
 {
        int err = -EIO;
        int offsets[4];
        Indirect chain[4];
        Indirect *partial;
-       unsigned long goal;
-       int left;
-       int boundary = 0;
-       const int depth = ext3_block_to_path(inode, iblock, offsets, &boundary);
+       ext3_fsblk_t goal;
+       int indirect_blks;
+       int blocks_to_boundary = 0;
+       int depth;
        struct ext3_inode_info *ei = EXT3_I(inode);
+       int count = 0;
+       ext3_fsblk_t first_block = 0;
+
 
        J_ASSERT(handle != NULL || create == 0);
+       depth = ext3_block_to_path(inode,iblock,offsets,&blocks_to_boundary);
 
        if (depth == 0)
                goto out;
@@ -690,15 +830,41 @@ ext3_get_block_handle(handle_t *handle, struct inode *inode, sector_t iblock,
 
        /* Simplest case - block found, no allocation needed */
        if (!partial) {
+               first_block = le32_to_cpu(chain[depth - 1].key);
                clear_buffer_new(bh_result);
-               goto got_it;
+               count++;
+               /*map more blocks*/
+               while (count < maxblocks && count <= blocks_to_boundary) {
+                       ext3_fsblk_t blk;
+
+                       if (!verify_chain(chain, chain + depth - 1)) {
+                               /*
+                                * Indirect block might be removed by
+                                * truncate while we were reading it.
+                                * Handling of that case: forget what we've
+                                * got now. Flag the err as EAGAIN, so it
+                                * will reread.
+                                */
+                               err = -EAGAIN;
+                               count = 0;
+                               break;
+                       }
+                       blk = le32_to_cpu(*(chain[depth-1].p + count));
+
+                       if (blk == first_block + count)
+                               count++;
+                       else
+                               break;
+               }
+               if (err != -EAGAIN)
+                       goto got_it;
        }
 
        /* Next simple case - plain lookup or failed read of indirect block */
        if (!create || err == -EIO)
                goto cleanup;
 
-       down(&ei->truncate_sem);
+       mutex_lock(&ei->truncate_mutex);
 
        /*
         * If the indirect block is missing while we are reading
@@ -719,7 +885,8 @@ ext3_get_block_handle(handle_t *handle, struct inode *inode, sector_t iblock,
                }
                partial = ext3_get_branch(inode, depth, offsets, chain, &err);
                if (!partial) {
-                       up(&ei->truncate_sem);
+                       count++;
+                       mutex_unlock(&ei->truncate_mutex);
                        if (err)
                                goto cleanup;
                        clear_buffer_new(bh_result);
@@ -734,14 +901,21 @@ ext3_get_block_handle(handle_t *handle, struct inode *inode, sector_t iblock,
        if (S_ISREG(inode->i_mode) && (!ei->i_block_alloc_info))
                ext3_init_block_alloc_info(inode);
 
-       goal = ext3_find_goal(inode, iblock, chain, partial);
+       goal = ext3_find_goal(inode, iblock, partial);
 
-       left = (chain + depth) - partial;
+       /* the number of blocks need to allocate for [d,t]indirect blocks */
+       indirect_blks = (chain + depth) - partial - 1;
 
        /*
+        * Next look up the indirect map to count the totoal number of
+        * direct blocks to allocate for this branch.
+        */
+       count = ext3_blks_to_allocate(partial, indirect_blks,
+                                       maxblocks, blocks_to_boundary);
+       /*
         * Block out ext3_truncate while we alter the tree
         */
-       err = ext3_alloc_branch(handle, inode, left, goal,
+       err = ext3_alloc_branch(handle, inode, indirect_blks, &count, goal,
                                offsets + (partial - chain), partial);
 
        /*
@@ -752,24 +926,18 @@ ext3_get_block_handle(handle_t *handle, struct inode *inode, sector_t iblock,
         * may need to return -EAGAIN upwards in the worst case.  --sct
         */
        if (!err)
-               err = ext3_splice_branch(handle, inode, iblock, chain,
-                                        partial, left);
-       /*
-        * i_disksize growing is protected by truncate_sem.  Don't forget to
-        * protect it if you're about to implement concurrent
-        * ext3_get_block() -bzzz
-       */
-       if (!err && extend_disksize && inode->i_size > ei->i_disksize)
-               ei->i_disksize = inode->i_size;
-       up(&ei->truncate_sem);
+               err = ext3_splice_branch(handle, inode, iblock,
+                                       partial, indirect_blks, count);
+       mutex_unlock(&ei->truncate_mutex);
        if (err)
                goto cleanup;
 
        set_buffer_new(bh_result);
 got_it:
        map_bh(bh_result, inode->i_sb, le32_to_cpu(chain[depth-1].key));
-       if (boundary)
+       if (count > blocks_to_boundary)
                set_buffer_boundary(bh_result);
+       err = count;
        /* Clean up and exit */
        partial = chain + depth - 1;    /* the whole chain */
 cleanup:
@@ -783,78 +951,60 @@ out:
        return err;
 }
 
+/* Maximum number of blocks we map for direct IO at once. */
+#define DIO_MAX_BLOCKS 4096
+/*
+ * Number of credits we need for writing DIO_MAX_BLOCKS:
+ * We need sb + group descriptor + bitmap + inode -> 4
+ * For B blocks with A block pointers per block we need:
+ * 1 (triple ind.) + (B/A/A + 2) (doubly ind.) + (B/A + 2) (indirect).
+ * If we plug in 4096 for B and 256 for A (for 1KB block size), we get 25.
+ */
+#define DIO_CREDITS 25
+
 static int ext3_get_block(struct inode *inode, sector_t iblock,
                        struct buffer_head *bh_result, int create)
 {
-       handle_t *handle = NULL;
-       int ret;
-
-       if (create) {
-               handle = ext3_journal_current_handle();
-               J_ASSERT(handle != 0);
-       }
-       ret = ext3_get_block_handle(handle, inode, iblock,
-                               bh_result, create, 1);
-       return ret;
-}
-
-#define DIO_CREDITS (EXT3_RESERVE_TRANS_BLOCKS + 32)
-
-static int
-ext3_direct_io_get_blocks(struct inode *inode, sector_t iblock,
-               unsigned long max_blocks, struct buffer_head *bh_result,
-               int create)
-{
-       handle_t *handle = journal_current_handle();
-       int ret = 0;
-
-       if (!handle)
-               goto get_block;         /* A read */
-
-       if (handle->h_transaction->t_state == T_LOCKED) {
-               /*
-                * Huge direct-io writes can hold off commits for long
-                * periods of time.  Let this commit run.
-                */
-               ext3_journal_stop(handle);
-               handle = ext3_journal_start(inode, DIO_CREDITS);
-               if (IS_ERR(handle))
+       handle_t *handle = ext3_journal_current_handle();
+       int ret = 0, started = 0;
+       unsigned max_blocks = bh_result->b_size >> inode->i_blkbits;
+
+       if (create && !handle) {        /* Direct IO write... */
+               if (max_blocks > DIO_MAX_BLOCKS)
+                       max_blocks = DIO_MAX_BLOCKS;
+               handle = ext3_journal_start(inode, DIO_CREDITS +
+                               EXT3_MAXQUOTAS_TRANS_BLOCKS(inode->i_sb));
+               if (IS_ERR(handle)) {
                        ret = PTR_ERR(handle);
-               goto get_block;
-       }
-
-       if (handle->h_buffer_credits <= EXT3_RESERVE_TRANS_BLOCKS) {
-               /*
-                * Getting low on buffer credits...
-                */
-               ret = ext3_journal_extend(handle, DIO_CREDITS);
-               if (ret > 0) {
-                       /*
-                        * Couldn't extend the transaction.  Start a new one.
-                        */
-                       ret = ext3_journal_restart(handle, DIO_CREDITS);
+                       goto out;
                }
+               started = 1;
        }
 
-get_block:
-       if (ret == 0)
-               ret = ext3_get_block_handle(handle, inode, iblock,
-                                       bh_result, create, 0);
-       bh_result->b_size = (1 << inode->i_blkbits);
+       ret = ext3_get_blocks_handle(handle, inode, iblock,
+                                       max_blocks, bh_result, create);
+       if (ret > 0) {
+               bh_result->b_size = (ret << inode->i_blkbits);
+               ret = 0;
+       }
+       if (started)
+               ext3_journal_stop(handle);
+out:
        return ret;
 }
 
-static int ext3_writepages_get_block(struct inode *inode, sector_t iblock,
-                       struct buffer_head *bh, int create)
+int ext3_fiemap(struct inode *inode, struct fiemap_extent_info *fieinfo,
+               u64 start, u64 len)
 {
-       return ext3_direct_io_get_blocks(inode, iblock, 1, bh, create);
+       return generic_block_fiemap(inode, fieinfo, start, len,
+                                   ext3_get_block);
 }
 
 /*
  * `handle' can be NULL if create is zero
  */
-struct buffer_head *ext3_getblk(handle_t *handle, struct inode * inode,
-                               long block, int create, int * errp)
+struct buffer_head *ext3_getblk(handle_t *handle, struct inode *inode,
+                               long block, int create, int *errp)
 {
        struct buffer_head dummy;
        int fatal = 0, err;
@@ -864,25 +1014,41 @@ struct buffer_head *ext3_getblk(handle_t *handle, struct inode * inode,
        dummy.b_state = 0;
        dummy.b_blocknr = -1000;
        buffer_trace_init(&dummy.b_history);
-       *errp = ext3_get_block_handle(handle, inode, block, &dummy, create, 1);
-       if (!*errp && buffer_mapped(&dummy)) {
+       err = ext3_get_blocks_handle(handle, inode, block, 1,
+                                       &dummy, create);
+       /*
+        * ext3_get_blocks_handle() returns number of blocks
+        * mapped. 0 in case of a HOLE.
+        */
+       if (err > 0) {
+               if (err > 1)
+                       WARN_ON(1);
+               err = 0;
+       }
+       *errp = err;
+       if (!err && buffer_mapped(&dummy)) {
                struct buffer_head *bh;
                bh = sb_getblk(inode->i_sb, dummy.b_blocknr);
+               if (!bh) {
+                       *errp = -EIO;
+                       goto err;
+               }
                if (buffer_new(&dummy)) {
                        J_ASSERT(create != 0);
-                       J_ASSERT(handle != 0);
-
-                       /* Now that we do not always journal data, we
-                          should keep in mind whether this should
-                          always journal the new buffer as metadata.
-                          For now, regular file writes use
-                          ext3_get_block instead, so it's not a
-                          problem. */
+                       J_ASSERT(handle != NULL);
+
+                       /*
+                        * Now that we do not always journal data, we should
+                        * keep in mind whether this should always journal the
+                        * new buffer as metadata.  For now, regular file
+                        * writes use ext3_get_block instead, so it's not a
+                        * problem.
+                        */
                        lock_buffer(bh);
                        BUFFER_TRACE(bh, "call get_create_access");
                        fatal = ext3_journal_get_create_access(handle, bh);
                        if (!fatal && !buffer_uptodate(bh)) {
-                               memset(bh->b_data, 0, inode->i_sb->s_blocksize);
+                               memset(bh->b_data,0,inode->i_sb->s_blocksize);
                                set_buffer_uptodate(bh);
                        }
                        unlock_buffer(bh);
@@ -900,10 +1066,11 @@ struct buffer_head *ext3_getblk(handle_t *handle, struct inode * inode,
                }
                return bh;
        }
+err:
        return NULL;
 }
 
-struct buffer_head *ext3_bread(handle_t *handle, struct inode * inode,
+struct buffer_head *ext3_bread(handle_t *handle, struct inode *inode,
                               int block, int create, int *err)
 {
        struct buffer_head * bh;
@@ -913,7 +1080,7 @@ struct buffer_head *ext3_bread(handle_t *handle, struct inode * inode,
                return bh;
        if (buffer_uptodate(bh))
                return bh;
-       ll_rw_block(READ, 1, &bh);
+       ll_rw_block(READ_META, 1, &bh);
        wait_on_buffer(bh);
        if (buffer_uptodate(bh))
                return bh;
@@ -938,7 +1105,7 @@ static int walk_page_buffers(      handle_t *handle,
 
        for (   bh = head, block_start = 0;
                ret == 0 && (bh != head || !block_start);
-               block_start = block_end, bh = next)
+               block_start = block_end, bh = next)
        {
                next = bh->b_this_page;
                block_end = block_start + blocksize;
@@ -977,63 +1144,116 @@ static int walk_page_buffers(   handle_t *handle,
  * So what we do is to rely on the fact that journal_stop/journal_start
  * will _not_ run commit under these circumstances because handle->h_ref
  * is elevated.  We'll still have enough credits for the tiny quotafile
- * write.  
+ * write.
  */
-
-static int do_journal_get_write_access(handle_t *handle, 
-                                      struct buffer_head *bh)
+static int do_journal_get_write_access(handle_t *handle,
+                                       struct buffer_head *bh)
 {
        if (!buffer_mapped(bh) || buffer_freed(bh))
                return 0;
        return ext3_journal_get_write_access(handle, bh);
 }
 
-static int ext3_prepare_write(struct file *file, struct page *page,
-                             unsigned from, unsigned to)
+/*
+ * Truncate blocks that were not used by write. We have to truncate the
+ * pagecache as well so that corresponding buffers get properly unmapped.
+ */
+static void ext3_truncate_failed_write(struct inode *inode)
 {
-       struct inode *inode = page->mapping->host;
-       int ret, needed_blocks = ext3_writepage_trans_blocks(inode);
+       truncate_inode_pages(inode->i_mapping, inode->i_size);
+       ext3_truncate(inode);
+}
+
+static int ext3_write_begin(struct file *file, struct address_space *mapping,
+                               loff_t pos, unsigned len, unsigned flags,
+                               struct page **pagep, void **fsdata)
+{
+       struct inode *inode = mapping->host;
+       int ret;
        handle_t *handle;
        int retries = 0;
+       struct page *page;
+       pgoff_t index;
+       unsigned from, to;
+       /* Reserve one block more for addition to orphan list in case
+        * we allocate blocks but write fails for some reason */
+       int needed_blocks = ext3_writepage_trans_blocks(inode) + 1;
+
+       index = pos >> PAGE_CACHE_SHIFT;
+       from = pos & (PAGE_CACHE_SIZE - 1);
+       to = from + len;
 
 retry:
+       page = grab_cache_page_write_begin(mapping, index, flags);
+       if (!page)
+               return -ENOMEM;
+       *pagep = page;
+
        handle = ext3_journal_start(inode, needed_blocks);
        if (IS_ERR(handle)) {
+               unlock_page(page);
+               page_cache_release(page);
                ret = PTR_ERR(handle);
                goto out;
        }
-       if (test_opt(inode->i_sb, NOBH))
-               ret = nobh_prepare_write(page, from, to, ext3_get_block);
-       else
-               ret = block_prepare_write(page, from, to, ext3_get_block);
+       ret = block_write_begin(file, mapping, pos, len, flags, pagep, fsdata,
+                                                       ext3_get_block);
        if (ret)
-               goto prepare_write_failed;
+               goto write_begin_failed;
 
        if (ext3_should_journal_data(inode)) {
                ret = walk_page_buffers(handle, page_buffers(page),
                                from, to, NULL, do_journal_get_write_access);
        }
-prepare_write_failed:
-       if (ret)
+write_begin_failed:
+       if (ret) {
+               /*
+                * block_write_begin may have instantiated a few blocks
+                * outside i_size.  Trim these off again. Don't need
+                * i_size_read because we hold i_mutex.
+                *
+                * Add inode to orphan list in case we crash before truncate
+                * finishes. Do this only if ext3_can_truncate() agrees so
+                * that orphan processing code is happy.
+                */
+               if (pos + len > inode->i_size && ext3_can_truncate(inode))
+                       ext3_orphan_add(handle, inode);
                ext3_journal_stop(handle);
+               unlock_page(page);
+               page_cache_release(page);
+               if (pos + len > inode->i_size)
+                       ext3_truncate_failed_write(inode);
+       }
        if (ret == -ENOSPC && ext3_should_retry_alloc(inode->i_sb, &retries))
                goto retry;
 out:
        return ret;
 }
 
-int
-ext3_journal_dirty_data(handle_t *handle, struct buffer_head *bh)
+
+int ext3_journal_dirty_data(handle_t *handle, struct buffer_head *bh)
 {
        int err = journal_dirty_data(handle, bh);
        if (err)
-               ext3_journal_abort_handle(__FUNCTION__, __FUNCTION__,
-                                               bh, handle,err);
+               ext3_journal_abort_handle(__func__, __func__,
+                                               bh, handle, err);
        return err;
 }
 
-/* For commit_write() in data=journal mode */
-static int commit_write_fn(handle_t *handle, struct buffer_head *bh)
+/* For ordered writepage and write_end functions */
+static int journal_dirty_data_fn(handle_t *handle, struct buffer_head *bh)
+{
+       /*
+        * Write could have mapped the buffer but it didn't copy the data in
+        * yet. So avoid filing such buffer into a transaction.
+        */
+       if (buffer_mapped(bh) && buffer_uptodate(bh))
+               return ext3_journal_dirty_data(handle, bh);
+       return 0;
+}
+
+/* For write_end() in data=journal mode */
+static int write_end_fn(handle_t *handle, struct buffer_head *bh)
 {
        if (!buffer_mapped(bh) || buffer_freed(bh))
                return 0;
@@ -1042,99 +1262,145 @@ static int commit_write_fn(handle_t *handle, struct buffer_head *bh)
 }
 
 /*
+ * This is nasty and subtle: ext3_write_begin() could have allocated blocks
+ * for the whole page but later we failed to copy the data in. Update inode
+ * size according to what we managed to copy. The rest is going to be
+ * truncated in write_end function.
+ */
+static void update_file_sizes(struct inode *inode, loff_t pos, unsigned copied)
+{
+       /* What matters to us is i_disksize. We don't write i_size anywhere */
+       if (pos + copied > inode->i_size)
+               i_size_write(inode, pos + copied);
+       if (pos + copied > EXT3_I(inode)->i_disksize) {
+               EXT3_I(inode)->i_disksize = pos + copied;
+               mark_inode_dirty(inode);
+       }
+}
+
+/*
  * We need to pick up the new inode size which generic_commit_write gave us
  * `file' can be NULL - eg, when called from page_symlink().
  *
  * ext3 never places buffers on inode->i_mapping->private_list.  metadata
  * buffers are managed internally.
  */
-
-static int ext3_ordered_commit_write(struct file *file, struct page *page,
-                            unsigned from, unsigned to)
+static int ext3_ordered_write_end(struct file *file,
+                               struct address_space *mapping,
+                               loff_t pos, unsigned len, unsigned copied,
+                               struct page *page, void *fsdata)
 {
        handle_t *handle = ext3_journal_current_handle();
-       struct inode *inode = page->mapping->host;
+       struct inode *inode = file->f_mapping->host;
+       unsigned from, to;
        int ret = 0, ret2;
 
-       ret = walk_page_buffers(handle, page_buffers(page),
-               from, to, NULL, ext3_journal_dirty_data);
+       copied = block_write_end(file, mapping, pos, len, copied, page, fsdata);
 
-       if (ret == 0) {
-               /*
-                * generic_commit_write() will run mark_inode_dirty() if i_size
-                * changes.  So let's piggyback the i_disksize mark_inode_dirty
-                * into that.
-                */
-               loff_t new_i_size;
+       from = pos & (PAGE_CACHE_SIZE - 1);
+       to = from + copied;
+       ret = walk_page_buffers(handle, page_buffers(page),
+               from, to, NULL, journal_dirty_data_fn);
 
-               new_i_size = ((loff_t)page->index << PAGE_CACHE_SHIFT) + to;
-               if (new_i_size > EXT3_I(inode)->i_disksize)
-                       EXT3_I(inode)->i_disksize = new_i_size;
-               ret = generic_commit_write(file, page, from, to);
-       }
+       if (ret == 0)
+               update_file_sizes(inode, pos, copied);
+       /*
+        * There may be allocated blocks outside of i_size because
+        * we failed to copy some data. Prepare for truncate.
+        */
+       if (pos + len > inode->i_size && ext3_can_truncate(inode))
+               ext3_orphan_add(handle, inode);
        ret2 = ext3_journal_stop(handle);
        if (!ret)
                ret = ret2;
-       return ret;
+       unlock_page(page);
+       page_cache_release(page);
+
+       if (pos + len > inode->i_size)
+               ext3_truncate_failed_write(inode);
+       return ret ? ret : copied;
 }
 
-static int ext3_writeback_commit_write(struct file *file, struct page *page,
-                            unsigned from, unsigned to)
+static int ext3_writeback_write_end(struct file *file,
+                               struct address_space *mapping,
+                               loff_t pos, unsigned len, unsigned copied,
+                               struct page *page, void *fsdata)
 {
        handle_t *handle = ext3_journal_current_handle();
-       struct inode *inode = page->mapping->host;
-       int ret = 0, ret2;
-       loff_t new_i_size;
-
-       new_i_size = ((loff_t)page->index << PAGE_CACHE_SHIFT) + to;
-       if (new_i_size > EXT3_I(inode)->i_disksize)
-               EXT3_I(inode)->i_disksize = new_i_size;
+       struct inode *inode = file->f_mapping->host;
+       int ret;
 
-       if (test_opt(inode->i_sb, NOBH))
-               ret = nobh_commit_write(file, page, from, to);
-       else
-               ret = generic_commit_write(file, page, from, to);
+       copied = block_write_end(file, mapping, pos, len, copied, page, fsdata);
+       update_file_sizes(inode, pos, copied);
+       /*
+        * There may be allocated blocks outside of i_size because
+        * we failed to copy some data. Prepare for truncate.
+        */
+       if (pos + len > inode->i_size && ext3_can_truncate(inode))
+               ext3_orphan_add(handle, inode);
+       ret = ext3_journal_stop(handle);
+       unlock_page(page);
+       page_cache_release(page);
 
-       ret2 = ext3_journal_stop(handle);
-       if (!ret)
-               ret = ret2;
-       return ret;
+       if (pos + len > inode->i_size)
+               ext3_truncate_failed_write(inode);
+       return ret ? ret : copied;
 }
 
-static int ext3_journalled_commit_write(struct file *file,
-                       struct page *page, unsigned from, unsigned to)
+static int ext3_journalled_write_end(struct file *file,
+                               struct address_space *mapping,
+                               loff_t pos, unsigned len, unsigned copied,
+                               struct page *page, void *fsdata)
 {
        handle_t *handle = ext3_journal_current_handle();
-       struct inode *inode = page->mapping->host;
+       struct inode *inode = mapping->host;
        int ret = 0, ret2;
        int partial = 0;
-       loff_t pos;
+       unsigned from, to;
 
-       /*
-        * Here we duplicate the generic_commit_write() functionality
-        */
-       pos = ((loff_t)page->index << PAGE_CACHE_SHIFT) + to;
+       from = pos & (PAGE_CACHE_SIZE - 1);
+       to = from + len;
+
+       if (copied < len) {
+               if (!PageUptodate(page))
+                       copied = 0;
+               page_zero_new_buffers(page, from + copied, to);
+               to = from + copied;
+       }
 
        ret = walk_page_buffers(handle, page_buffers(page), from,
-                               to, &partial, commit_write_fn);
+                               to, &partial, write_end_fn);
        if (!partial)
                SetPageUptodate(page);
-       if (pos > inode->i_size)
-               i_size_write(inode, pos);
-       EXT3_I(inode)->i_state |= EXT3_STATE_JDATA;
+
+       if (pos + copied > inode->i_size)
+               i_size_write(inode, pos + copied);
+       /*
+        * There may be allocated blocks outside of i_size because
+        * we failed to copy some data. Prepare for truncate.
+        */
+       if (pos + len > inode->i_size && ext3_can_truncate(inode))
+               ext3_orphan_add(handle, inode);
+       ext3_set_inode_state(inode, EXT3_STATE_JDATA);
        if (inode->i_size > EXT3_I(inode)->i_disksize) {
                EXT3_I(inode)->i_disksize = inode->i_size;
                ret2 = ext3_mark_inode_dirty(handle, inode);
-               if (!ret) 
+               if (!ret)
                        ret = ret2;
        }
+
        ret2 = ext3_journal_stop(handle);
        if (!ret)
                ret = ret2;
-       return ret;
+       unlock_page(page);
+       page_cache_release(page);
+
+       if (pos + len > inode->i_size)
+               ext3_truncate_failed_write(inode);
+       return ret ? ret : copied;
 }
 
-/* 
+/*
  * bmap() is special.  It gets used by applications such as lilo and by
  * the swapper to find the on-disk block of a specific piece of data.
  *
@@ -1143,10 +1409,10 @@ static int ext3_journalled_commit_write(struct file *file,
  * filesystem and enables swap, then they may get a nasty shock when the
  * data getting swapped to that swapfile suddenly gets overwritten by
  * the original zero's written out previously to the journal and
- * awaiting writeback in the kernel's buffer cache. 
+ * awaiting writeback in the kernel's buffer cache.
  *
  * So, if we see any bmap calls here on a modified, data-journaled file,
- * take extra steps to flush any blocks which might be in the cache. 
+ * take extra steps to flush any blocks which might be in the cache.
  */
 static sector_t ext3_bmap(struct address_space *mapping, sector_t block)
 {
@@ -1154,17 +1420,17 @@ static sector_t ext3_bmap(struct address_space *mapping, sector_t block)
        journal_t *journal;
        int err;
 
-       if (EXT3_I(inode)->i_state & EXT3_STATE_JDATA) {
-               /* 
+       if (ext3_test_inode_state(inode, EXT3_STATE_JDATA)) {
+               /*
                 * This is a REALLY heavyweight approach, but the use of
                 * bmap on dirty files is expected to be extremely rare:
                 * only if we run lilo or swapon on a freshly made file
-                * do we expect this to happen. 
+                * do we expect this to happen.
                 *
                 * (bmap requires CAP_SYS_RAWIO so this does not
                 * represent an unprivileged user DOS attack --- we'd be
                 * in trouble if mortal users could trigger this path at
-                * will.) 
+                * will.)
                 *
                 * NB. EXT3_STATE_JDATA is not set on files other than
                 * regular files.  If somebody wants to bmap a directory
@@ -1173,7 +1439,7 @@ static sector_t ext3_bmap(struct address_space *mapping, sector_t block)
                 * everything they get.
                 */
 
-               EXT3_I(inode)->i_state &= ~EXT3_STATE_JDATA;
+               ext3_clear_inode_state(inode, EXT3_STATE_JDATA);
                journal = EXT3_JOURNAL(inode);
                journal_lock_updates(journal);
                err = journal_flush(journal);
@@ -1198,11 +1464,9 @@ static int bput_one(handle_t *handle, struct buffer_head *bh)
        return 0;
 }
 
-static int journal_dirty_data_fn(handle_t *handle, struct buffer_head *bh)
+static int buffer_unmapped(handle_t *handle, struct buffer_head *bh)
 {
-       if (buffer_mapped(bh))
-               return ext3_journal_dirty_data(handle, bh);
-       return 0;
+       return !buffer_mapped(bh);
 }
 
 /*
@@ -1224,7 +1488,7 @@ static int journal_dirty_data_fn(handle_t *handle, struct buffer_head *bh)
  *     ext3_file_write() -> generic_file_write() -> __alloc_pages() -> ...
  *
  * Same applies to ext3_get_block().  We will deadlock on various things like
- * lock_journal and i_truncate_sem.
+ * lock_journal and i_truncate_mutex.
  *
  * Setting PF_MEMALLOC here doesn't work - too many internal memory
  * allocations fail.
@@ -1258,7 +1522,7 @@ static int journal_dirty_data_fn(handle_t *handle, struct buffer_head *bh)
  * we don't need to open a transaction here.
  */
 static int ext3_ordered_writepage(struct page *page,
-                       struct writeback_control *wbc)
+                               struct writeback_control *wbc)
 {
        struct inode *inode = page->mapping->host;
        struct buffer_head *page_bufs;
@@ -1267,6 +1531,7 @@ static int ext3_ordered_writepage(struct page *page,
        int err;
 
        J_ASSERT(PageLocked(page));
+       WARN_ON_ONCE(IS_RDONLY(inode));
 
        /*
         * We give up here if we're reentered, because it might be for a
@@ -1275,6 +1540,19 @@ static int ext3_ordered_writepage(struct page *page,
        if (ext3_journal_current_handle())
                goto out_fail;
 
+       if (!page_has_buffers(page)) {
+               create_empty_buffers(page, inode->i_sb->s_blocksize,
+                               (1 << BH_Dirty)|(1 << BH_Uptodate));
+               page_bufs = page_buffers(page);
+       } else {
+               page_bufs = page_buffers(page);
+               if (!walk_page_buffers(NULL, page_bufs, 0, PAGE_CACHE_SIZE,
+                                      NULL, buffer_unmapped)) {
+                       /* Provide NULL get_block() to catch bugs if buffers
+                        * weren't really mapped */
+                       return block_write_full_page(page, NULL, wbc);
+               }
+       }
        handle = ext3_journal_start(inode, ext3_writepage_trans_blocks(inode));
 
        if (IS_ERR(handle)) {
@@ -1282,11 +1560,6 @@ static int ext3_ordered_writepage(struct page *page,
                goto out_fail;
        }
 
-       if (!page_has_buffers(page)) {
-               create_empty_buffers(page, inode->i_sb->s_blocksize,
-                               (1 << BH_Dirty)|(1 << BH_Uptodate));
-       }
-       page_bufs = page_buffers(page);
        walk_page_buffers(handle, page_bufs, 0,
                        PAGE_CACHE_SIZE, NULL, bget_one);
 
@@ -1300,7 +1573,7 @@ static int ext3_ordered_writepage(struct page *page,
         */
 
        /*
-        * And attach them to the current transaction.  But only if 
+        * And attach them to the current transaction.  But only if
         * block_write_full_page() succeeded.  Otherwise they are unmapped,
         * and generally junk.
         */
@@ -1323,45 +1596,6 @@ out_fail:
        return ret;
 }
 
-static int
-ext3_writeback_writepage_helper(struct page *page,
-                               struct writeback_control *wbc)
-{
-       return block_write_full_page(page, ext3_get_block, wbc);
-}
-
-static int
-ext3_writeback_writepages(struct address_space *mapping,
-                               struct writeback_control *wbc)
-{
-       struct inode *inode = mapping->host;
-       handle_t *handle = NULL;
-       int err, ret = 0;
-
-       if (!mapping_tagged(mapping, PAGECACHE_TAG_DIRTY))
-               return ret;
-
-       handle = ext3_journal_start(inode, ext3_writepage_trans_blocks(inode));
-       if (IS_ERR(handle)) {
-               ret = PTR_ERR(handle);
-               return ret;
-       }
-
-        ret = __mpage_writepages(mapping, wbc, ext3_writepages_get_block,
-                                       ext3_writeback_writepage_helper);
-
-       /*
-        * Need to reaquire the handle since ext3_writepages_get_block()
-        * can restart the handle
-        */
-       handle = journal_current_handle();
-
-       err = ext3_journal_stop(handle);
-       if (!ret)
-               ret = err;
-       return ret;
-}
-
 static int ext3_writeback_writepage(struct page *page,
                                struct writeback_control *wbc)
 {
@@ -1370,16 +1604,28 @@ static int ext3_writeback_writepage(struct page *page,
        int ret = 0;
        int err;
 
+       J_ASSERT(PageLocked(page));
+       WARN_ON_ONCE(IS_RDONLY(inode));
+
        if (ext3_journal_current_handle())
                goto out_fail;
 
+       if (page_has_buffers(page)) {
+               if (!walk_page_buffers(NULL, page_buffers(page), 0,
+                                     PAGE_CACHE_SIZE, NULL, buffer_unmapped)) {
+                       /* Provide NULL get_block() to catch bugs if buffers
+                        * weren't really mapped */
+                       return block_write_full_page(page, NULL, wbc);
+               }
+       }
+
        handle = ext3_journal_start(inode, ext3_writepage_trans_blocks(inode));
        if (IS_ERR(handle)) {
                ret = PTR_ERR(handle);
                goto out_fail;
        }
 
-       if (test_opt(inode->i_sb, NOBH))
+       if (test_opt(inode->i_sb, NOBH) && ext3_should_writeback_data(inode))
                ret = nobh_writepage(page, ext3_get_block, wbc);
        else
                ret = block_write_full_page(page, ext3_get_block, wbc);
@@ -1403,6 +1649,9 @@ static int ext3_journalled_writepage(struct page *page,
        int ret = 0;
        int err;
 
+       J_ASSERT(PageLocked(page));
+       WARN_ON_ONCE(IS_RDONLY(inode));
+
        if (ext3_journal_current_handle())
                goto no_write;
 
@@ -1420,16 +1669,18 @@ static int ext3_journalled_writepage(struct page *page,
                ClearPageChecked(page);
                ret = block_prepare_write(page, 0, PAGE_CACHE_SIZE,
                                        ext3_get_block);
-               if (ret != 0)
+               if (ret != 0) {
+                       ext3_journal_stop(handle);
                        goto out_unlock;
+               }
                ret = walk_page_buffers(handle, page_buffers(page), 0,
                        PAGE_CACHE_SIZE, NULL, do_journal_get_write_access);
 
                err = walk_page_buffers(handle, page_buffers(page), 0,
-                               PAGE_CACHE_SIZE, NULL, commit_write_fn);
+                               PAGE_CACHE_SIZE, NULL, write_end_fn);
                if (ret == 0)
                        ret = err;
-               EXT3_I(inode)->i_state |= EXT3_STATE_JDATA;
+               ext3_set_inode_state(inode, EXT3_STATE_JDATA);
                unlock_page(page);
        } else {
                /*
@@ -1464,7 +1715,7 @@ ext3_readpages(struct file *file, struct address_space *mapping,
        return mpage_readpages(mapping, pages, nr_pages, ext3_get_block);
 }
 
-static int ext3_invalidatepage(struct page *page, unsigned long offset)
+static void ext3_invalidatepage(struct page *page, unsigned long offset)
 {
        journal_t *journal = EXT3_JOURNAL(page->mapping->host);
 
@@ -1474,10 +1725,10 @@ static int ext3_invalidatepage(struct page *page, unsigned long offset)
        if (offset == 0)
                ClearPageChecked(page);
 
-       return journal_invalidatepage(journal, page, offset);
+       journal_invalidatepage(journal, page, offset);
 }
 
-static int ext3_releasepage(struct page *page, int wait)
+static int ext3_releasepage(struct page *page, gfp_t wait)
 {
        journal_t *journal = EXT3_JOURNAL(page->mapping->host);
 
@@ -1493,7 +1744,8 @@ static int ext3_releasepage(struct page *page, int wait)
  * if the machine crashes during the write.
  *
  * If the O_DIRECT write is intantiating holes inside i_size and the machine
- * crashes then stale disk data _may_ be exposed inside the file.
+ * crashes then stale disk data _may_ be exposed inside the file. But current
+ * VFS code falls back into buffered path in that case so we are safe.
  */
 static ssize_t ext3_direct_IO(int rw, struct kiocb *iocb,
                        const struct iovec *iov, loff_t offset,
@@ -1502,45 +1754,56 @@ static ssize_t ext3_direct_IO(int rw, struct kiocb *iocb,
        struct file *file = iocb->ki_filp;
        struct inode *inode = file->f_mapping->host;
        struct ext3_inode_info *ei = EXT3_I(inode);
-       handle_t *handle = NULL;
+       handle_t *handle;
        ssize_t ret;
        int orphan = 0;
        size_t count = iov_length(iov, nr_segs);
+       int retries = 0;
 
        if (rw == WRITE) {
                loff_t final_size = offset + count;
 
-               handle = ext3_journal_start(inode, DIO_CREDITS);
-               if (IS_ERR(handle)) {
-                       ret = PTR_ERR(handle);
-                       goto out;
-               }
                if (final_size > inode->i_size) {
+                       /* Credits for sb + inode write */
+                       handle = ext3_journal_start(inode, 2);
+                       if (IS_ERR(handle)) {
+                               ret = PTR_ERR(handle);
+                               goto out;
+                       }
                        ret = ext3_orphan_add(handle, inode);
-                       if (ret)
-                               goto out_stop;
+                       if (ret) {
+                               ext3_journal_stop(handle);
+                               goto out;
+                       }
                        orphan = 1;
                        ei->i_disksize = inode->i_size;
+                       ext3_journal_stop(handle);
                }
        }
 
-       ret = blockdev_direct_IO(rw, iocb, inode, inode->i_sb->s_bdev, iov, 
+retry:
+       ret = blockdev_direct_IO(rw, iocb, inode, inode->i_sb->s_bdev, iov,
                                 offset, nr_segs,
-                                ext3_direct_io_get_blocks, NULL);
-
-       /*
-        * Reacquire the handle: ext3_direct_io_get_block() can restart the
-        * transaction
-        */
-       handle = journal_current_handle();
+                                ext3_get_block, NULL);
+       if (ret == -ENOSPC && ext3_should_retry_alloc(inode->i_sb, &retries))
+               goto retry;
 
-out_stop:
-       if (handle) {
+       if (orphan) {
                int err;
 
-               if (orphan && inode->i_nlink)
+               /* Credits for sb + inode write */
+               handle = ext3_journal_start(inode, 2);
+               if (IS_ERR(handle)) {
+                       /* This is really bad luck. We've written the data
+                        * but cannot extend i_size. Truncate allocated blocks
+                        * and pretend the write failed... */
+                       ext3_truncate(inode);
+                       ret = PTR_ERR(handle);
+                       goto out;
+               }
+               if (inode->i_nlink)
                        ext3_orphan_del(handle, inode);
-               if (orphan && ret > 0) {
+               if (ret > 0) {
                        loff_t end = offset + ret;
                        if (end > inode->i_size) {
                                ei->i_disksize = end;
@@ -1582,44 +1845,51 @@ static int ext3_journalled_set_page_dirty(struct page *page)
        return __set_page_dirty_nobuffers(page);
 }
 
-static struct address_space_operations ext3_ordered_aops = {
-       .readpage       = ext3_readpage,
-       .readpages      = ext3_readpages,
-       .writepage      = ext3_ordered_writepage,
-       .sync_page      = block_sync_page,
-       .prepare_write  = ext3_prepare_write,
-       .commit_write   = ext3_ordered_commit_write,
-       .bmap           = ext3_bmap,
-       .invalidatepage = ext3_invalidatepage,
-       .releasepage    = ext3_releasepage,
-       .direct_IO      = ext3_direct_IO,
+static const struct address_space_operations ext3_ordered_aops = {
+       .readpage               = ext3_readpage,
+       .readpages              = ext3_readpages,
+       .writepage              = ext3_ordered_writepage,
+       .sync_page              = block_sync_page,
+       .write_begin            = ext3_write_begin,
+       .write_end              = ext3_ordered_write_end,
+       .bmap                   = ext3_bmap,
+       .invalidatepage         = ext3_invalidatepage,
+       .releasepage            = ext3_releasepage,
+       .direct_IO              = ext3_direct_IO,
+       .migratepage            = buffer_migrate_page,
+       .is_partially_uptodate  = block_is_partially_uptodate,
+       .error_remove_page      = generic_error_remove_page,
 };
 
-static struct address_space_operations ext3_writeback_aops = {
-       .readpage       = ext3_readpage,
-       .readpages      = ext3_readpages,
-       .writepage      = ext3_writeback_writepage,
-       .writepages     = ext3_writeback_writepages,
-       .sync_page      = block_sync_page,
-       .prepare_write  = ext3_prepare_write,
-       .commit_write   = ext3_writeback_commit_write,
-       .bmap           = ext3_bmap,
-       .invalidatepage = ext3_invalidatepage,
-       .releasepage    = ext3_releasepage,
-       .direct_IO      = ext3_direct_IO,
+static const struct address_space_operations ext3_writeback_aops = {
+       .readpage               = ext3_readpage,
+       .readpages              = ext3_readpages,
+       .writepage              = ext3_writeback_writepage,
+       .sync_page              = block_sync_page,
+       .write_begin            = ext3_write_begin,
+       .write_end              = ext3_writeback_write_end,
+       .bmap                   = ext3_bmap,
+       .invalidatepage         = ext3_invalidatepage,
+       .releasepage            = ext3_releasepage,
+       .direct_IO              = ext3_direct_IO,
+       .migratepage            = buffer_migrate_page,
+       .is_partially_uptodate  = block_is_partially_uptodate,
+       .error_remove_page      = generic_error_remove_page,
 };
 
-static struct address_space_operations ext3_journalled_aops = {
-       .readpage       = ext3_readpage,
-       .readpages      = ext3_readpages,
-       .writepage      = ext3_journalled_writepage,
-       .sync_page      = block_sync_page,
-       .prepare_write  = ext3_prepare_write,
-       .commit_write   = ext3_journalled_commit_write,
-       .set_page_dirty = ext3_journalled_set_page_dirty,
-       .bmap           = ext3_bmap,
-       .invalidatepage = ext3_invalidatepage,
-       .releasepage    = ext3_releasepage,
+static const struct address_space_operations ext3_journalled_aops = {
+       .readpage               = ext3_readpage,
+       .readpages              = ext3_readpages,
+       .writepage              = ext3_journalled_writepage,
+       .sync_page              = block_sync_page,
+       .write_begin            = ext3_write_begin,
+       .write_end              = ext3_journalled_write_end,
+       .set_page_dirty         = ext3_journalled_set_page_dirty,
+       .bmap                   = ext3_bmap,
+       .invalidatepage         = ext3_invalidatepage,
+       .releasepage            = ext3_releasepage,
+       .is_partially_uptodate  = block_is_partially_uptodate,
+       .error_remove_page      = generic_error_remove_page,
 };
 
 void ext3_set_aops(struct inode *inode)
@@ -1641,13 +1911,12 @@ void ext3_set_aops(struct inode *inode)
 static int ext3_block_truncate_page(handle_t *handle, struct page *page,
                struct address_space *mapping, loff_t from)
 {
-       unsigned long index = from >> PAGE_CACHE_SHIFT;
+       ext3_fsblk_t index = from >> PAGE_CACHE_SHIFT;
        unsigned offset = from & (PAGE_CACHE_SIZE-1);
        unsigned blocksize, iblock, length, pos;
        struct inode *inode = mapping->host;
        struct buffer_head *bh;
        int err = 0;
-       void *kaddr;
 
        blocksize = inode->i_sb->s_blocksize;
        length = blocksize - (offset & (blocksize - 1));
@@ -1657,15 +1926,11 @@ static int ext3_block_truncate_page(handle_t *handle, struct page *page,
         * For "nobh" option,  we can only work if we don't need to
         * read-in the page - otherwise we create buffers to do the IO.
         */
-       if (!page_has_buffers(page) && test_opt(inode->i_sb, NOBH)) {
-               if (PageUptodate(page)) {
-                       kaddr = kmap_atomic(page, KM_USER0);
-                       memset(kaddr + offset, 0, length);
-                       flush_dcache_page(page);
-                       kunmap_atomic(kaddr, KM_USER0);
-                       set_page_dirty(page);
-                       goto unlock;
-               }
+       if (!page_has_buffers(page) && test_opt(inode->i_sb, NOBH) &&
+            ext3_should_writeback_data(inode) && PageUptodate(page)) {
+               zero_user(page, offset, length);
+               set_page_dirty(page);
+               goto unlock;
        }
 
        if (!page_has_buffers(page))
@@ -1716,11 +1981,7 @@ static int ext3_block_truncate_page(handle_t *handle, struct page *page,
                        goto unlock;
        }
 
-       kaddr = kmap_atomic(page, KM_USER0);
-       memset(kaddr + offset, 0, length);
-       flush_dcache_page(page);
-       kunmap_atomic(kaddr, KM_USER0);
-
+       zero_user(page, offset, length);
        BUFFER_TRACE(bh, "zeroed end of block");
 
        err = 0;
@@ -1786,17 +2047,14 @@ static inline int all_zeroes(__le32 *p, __le32 *q)
  *             c) free the subtrees growing from the inode past the @chain[0].
  *                     (no partially truncated stuff there).  */
 
-static Indirect *ext3_find_shared(struct inode *inode,
-                               int depth,
-                               int offsets[4],
-                               Indirect chain[4],
-                               __le32 *top)
+static Indirect *ext3_find_shared(struct inode *inode, int depth,
+                       int offsets[4], Indirect chain[4], __le32 *top)
 {
        Indirect *partial, *p;
        int k, err;
 
        *top = 0;
-       /* Make k index the deepest non-null offest + 1 */
+       /* Make k index the deepest non-null offset + 1 */
        for (k = depth; k > 1 && !offsets[k-1]; k--)
                ;
        partial = ext3_get_branch(inode, k, offsets, chain, &err);
@@ -1829,8 +2087,7 @@ static Indirect *ext3_find_shared(struct inode *inode,
        }
        /* Writer: end */
 
-       while(partial > p)
-       {
+       while(partial > p) {
                brelse(partial->bh);
                partial--;
        }
@@ -1846,10 +2103,9 @@ no_top:
  * We release `count' blocks on disk, but (last - first) may be greater
  * than `count' because there can be holes in there.
  */
-static void
-ext3_clear_blocks(handle_t *handle, struct inode *inode, struct buffer_head *bh,
-               unsigned long block_to_free, unsigned long count,
-               __le32 *first, __le32 *last)
+static void ext3_clear_blocks(handle_t *handle, struct inode *inode,
+               struct buffer_head *bh, ext3_fsblk_t block_to_free,
+               unsigned long count, __le32 *first, __le32 *last)
 {
        __le32 *p;
        if (try_to_extend_transaction(handle, inode)) {
@@ -1858,7 +2114,7 @@ ext3_clear_blocks(handle_t *handle, struct inode *inode, struct buffer_head *bh,
                        ext3_journal_dirty_metadata(handle, bh);
                }
                ext3_mark_inode_dirty(handle, inode);
-               ext3_journal_test_restart(handle, inode);
+               truncate_restart_transaction(handle, inode);
                if (bh) {
                        BUFFER_TRACE(bh, "retaking write access");
                        ext3_journal_get_write_access(handle, bh);
@@ -1910,12 +2166,12 @@ static void ext3_free_data(handle_t *handle, struct inode *inode,
                           struct buffer_head *this_bh,
                           __le32 *first, __le32 *last)
 {
-       unsigned long block_to_free = 0;    /* Starting block # of a run */
-       unsigned long count = 0;            /* Number of blocks in the run */ 
+       ext3_fsblk_t block_to_free = 0;    /* Starting block # of a run */
+       unsigned long count = 0;            /* Number of blocks in the run */
        __le32 *block_to_free_p = NULL;     /* Pointer into inode/ind
                                               corresponding to
                                               block_to_free */
-       unsigned long nr;                   /* Current block # */
+       ext3_fsblk_t nr;                    /* Current block # */
        __le32 *p;                          /* Pointer into inode/ind
                                               for current block */
        int err;
@@ -1940,7 +2196,7 @@ static void ext3_free_data(handle_t *handle, struct inode *inode,
                        } else if (nr == block_to_free + count) {
                                count++;
                        } else {
-                               ext3_clear_blocks(handle, inode, this_bh, 
+                               ext3_clear_blocks(handle, inode, this_bh,
                                                  block_to_free,
                                                  count, block_to_free_p, p);
                                block_to_free = nr;
@@ -1956,7 +2212,21 @@ static void ext3_free_data(handle_t *handle, struct inode *inode,
 
        if (this_bh) {
                BUFFER_TRACE(this_bh, "call ext3_journal_dirty_metadata");
-               ext3_journal_dirty_metadata(handle, this_bh);
+
+               /*
+                * The buffer head should have an attached journal head at this
+                * point. However, if the data is corrupted and an indirect
+                * block pointed to itself, it would have been detached when
+                * the block was cleared. Check for this instead of OOPSing.
+                */
+               if (bh2jh(this_bh))
+                       ext3_journal_dirty_metadata(handle, this_bh);
+               else
+                       ext3_error(inode->i_sb, "ext3_free_data",
+                                  "circular indirect block detected, "
+                                  "inode=%lu, block=%llu",
+                                  inode->i_ino,
+                                  (unsigned long long)this_bh->b_blocknr);
        }
 }
 
@@ -1977,7 +2247,7 @@ static void ext3_free_branches(handle_t *handle, struct inode *inode,
                               struct buffer_head *parent_bh,
                               __le32 *first, __le32 *last, int depth)
 {
-       unsigned long nr;
+       ext3_fsblk_t nr;
        __le32 *p;
 
        if (is_handle_aborted(handle))
@@ -2001,7 +2271,7 @@ static void ext3_free_branches(handle_t *handle, struct inode *inode,
                         */
                        if (!bh) {
                                ext3_error(inode->i_sb, "ext3_free_branches",
-                                          "Read failure, inode=%ld, block=%ld",
+                                          "Read failure, inode=%lu, block="E3FSBLK,
                                           inode->i_ino, nr);
                                continue;
                        }
@@ -2054,7 +2324,7 @@ static void ext3_free_branches(handle_t *handle, struct inode *inode,
                                return;
                        if (try_to_extend_transaction(handle, inode)) {
                                ext3_mark_inode_dirty(handle, inode);
-                               ext3_journal_test_restart(handle, inode);
+                               truncate_restart_transaction(handle, inode);
                        }
 
                        ext3_free_blocks(handle, inode, nr, 1);
@@ -2070,7 +2340,7 @@ static void ext3_free_branches(handle_t *handle, struct inode *inode,
                                        *p = 0;
                                        BUFFER_TRACE(parent_bh,
                                        "call ext3_journal_dirty_metadata");
-                                       ext3_journal_dirty_metadata(handle, 
+                                       ext3_journal_dirty_metadata(handle,
                                                                    parent_bh);
                                }
                        }
@@ -2082,6 +2352,19 @@ static void ext3_free_branches(handle_t *handle, struct inode *inode,
        }
 }
 
+int ext3_can_truncate(struct inode *inode)
+{
+       if (IS_APPEND(inode) || IS_IMMUTABLE(inode))
+               return 0;
+       if (S_ISREG(inode->i_mode))
+               return 1;
+       if (S_ISDIR(inode->i_mode))
+               return 1;
+       if (S_ISLNK(inode->i_mode))
+               return !ext3_inode_is_fast_symlink(inode);
+       return 0;
+}
+
 /*
  * ext3_truncate()
  *
@@ -2110,8 +2393,7 @@ static void ext3_free_branches(handle_t *handle, struct inode *inode,
  * that's fine - as long as they are linked from the inode, the post-crash
  * ext3_truncate() run will find them and release them.
  */
-
-void ext3_truncate(struct inode * inode)
+void ext3_truncate(struct inode *inode)
 {
        handle_t *handle;
        struct ext3_inode_info *ei = EXT3_I(inode);
@@ -2127,13 +2409,11 @@ void ext3_truncate(struct inode * inode)
        unsigned blocksize = inode->i_sb->s_blocksize;
        struct page *page;
 
-       if (!(S_ISREG(inode->i_mode) || S_ISDIR(inode->i_mode) ||
-           S_ISLNK(inode->i_mode)))
-               return;
-       if (ext3_inode_is_fast_symlink(inode))
-               return;
-       if (IS_APPEND(inode) || IS_IMMUTABLE(inode))
-               return;
+       if (!ext3_can_truncate(inode))
+               goto out_notrans;
+
+       if (inode->i_size == 0 && ext3_should_writeback_data(inode))
+               ext3_set_inode_state(inode, EXT3_STATE_FLUSH_ON_CLOSE);
 
        /*
         * We have to lock the EOF page here, because lock_page() nests
@@ -2146,7 +2426,7 @@ void ext3_truncate(struct inode * inode)
                page = grab_cache_page(mapping,
                                inode->i_size >> PAGE_CACHE_SHIFT);
                if (!page)
-                       return;
+                       goto out_notrans;
        }
 
        handle = start_transaction(inode);
@@ -2157,7 +2437,7 @@ void ext3_truncate(struct inode * inode)
                        unlock_page(page);
                        page_cache_release(page);
                }
-               return;         /* AKPM: return what? */
+               goto out_notrans;
        }
 
        last_block = (inode->i_size + blocksize-1)
@@ -2195,7 +2475,7 @@ void ext3_truncate(struct inode * inode)
         * From here we block out all ext3_get_block() callers who want to
         * modify the block allocation tree.
         */
-       down(&ei->truncate_sem);
+       mutex_lock(&ei->truncate_mutex);
 
        if (n == 1) {           /* direct blocks */
                ext3_free_data(handle, inode, NULL, i_data+offsets[0],
@@ -2235,39 +2515,38 @@ void ext3_truncate(struct inode * inode)
 do_indirects:
        /* Kill the remaining (whole) subtrees */
        switch (offsets[0]) {
-               default:
-                       nr = i_data[EXT3_IND_BLOCK];
-                       if (nr) {
-                               ext3_free_branches(handle, inode, NULL,
-                                                  &nr, &nr+1, 1);
-                               i_data[EXT3_IND_BLOCK] = 0;
-                       }
-               case EXT3_IND_BLOCK:
-                       nr = i_data[EXT3_DIND_BLOCK];
-                       if (nr) {
-                               ext3_free_branches(handle, inode, NULL,
-                                                  &nr, &nr+1, 2);
-                               i_data[EXT3_DIND_BLOCK] = 0;
-                       }
-               case EXT3_DIND_BLOCK:
-                       nr = i_data[EXT3_TIND_BLOCK];
-                       if (nr) {
-                               ext3_free_branches(handle, inode, NULL,
-                                                  &nr, &nr+1, 3);
-                               i_data[EXT3_TIND_BLOCK] = 0;
-                       }
-               case EXT3_TIND_BLOCK:
-                       ;
+       default:
+               nr = i_data[EXT3_IND_BLOCK];
+               if (nr) {
+                       ext3_free_branches(handle, inode, NULL, &nr, &nr+1, 1);
+                       i_data[EXT3_IND_BLOCK] = 0;
+               }
+       case EXT3_IND_BLOCK:
+               nr = i_data[EXT3_DIND_BLOCK];
+               if (nr) {
+                       ext3_free_branches(handle, inode, NULL, &nr, &nr+1, 2);
+                       i_data[EXT3_DIND_BLOCK] = 0;
+               }
+       case EXT3_DIND_BLOCK:
+               nr = i_data[EXT3_TIND_BLOCK];
+               if (nr) {
+                       ext3_free_branches(handle, inode, NULL, &nr, &nr+1, 3);
+                       i_data[EXT3_TIND_BLOCK] = 0;
+               }
+       case EXT3_TIND_BLOCK:
+               ;
        }
 
        ext3_discard_reservation(inode);
 
-       up(&ei->truncate_sem);
+       mutex_unlock(&ei->truncate_mutex);
        inode->i_mtime = inode->i_ctime = CURRENT_TIME_SEC;
        ext3_mark_inode_dirty(handle, inode);
 
-       /* In a multi-transaction truncate, we only make the final
-        * transaction synchronous */
+       /*
+        * In a multi-transaction truncate, we only make the final transaction
+        * synchronous
+        */
        if (IS_SYNC(inode))
                handle->h_sync = 1;
 out_stop:
@@ -2282,50 +2561,43 @@ out_stop:
                ext3_orphan_del(handle, inode);
 
        ext3_journal_stop(handle);
+       return;
+out_notrans:
+       /*
+        * Delete the inode from orphan list so that it doesn't stay there
+        * forever and trigger assertion on umount.
+        */
+       if (inode->i_nlink)
+               ext3_orphan_del(NULL, inode);
 }
 
-static unsigned long ext3_get_inode_block(struct super_block *sb,
+static ext3_fsblk_t ext3_get_inode_block(struct super_block *sb,
                unsigned long ino, struct ext3_iloc *iloc)
 {
-       unsigned long desc, group_desc, block_group;
-       unsigned long offset, block;
-       struct buffer_head *bh;
-       struct ext3_group_desc * gdp;
-
+       unsigned long block_group;
+       unsigned long offset;
+       ext3_fsblk_t block;
+       struct ext3_group_desc *gdp;
 
-       if ((ino != EXT3_ROOT_INO &&
-               ino != EXT3_JOURNAL_INO &&
-               ino != EXT3_RESIZE_INO &&
-               ino < EXT3_FIRST_INO(sb)) ||
-               ino > le32_to_cpu(
-                       EXT3_SB(sb)->s_es->s_inodes_count)) {
-               ext3_error (sb, "ext3_get_inode_block",
-                           "bad inode number: %lu", ino);
+       if (!ext3_valid_inum(sb, ino)) {
+               /*
+                * This error is already checked for in namei.c unless we are
+                * looking at an NFS filehandle, in which case no error
+                * report is needed
+                */
                return 0;
        }
+
        block_group = (ino - 1) / EXT3_INODES_PER_GROUP(sb);
-       if (block_group >= EXT3_SB(sb)->s_groups_count) {
-               ext3_error (sb, "ext3_get_inode_block",
-                           "group >= groups count");
+       gdp = ext3_get_group_desc(sb, block_group, NULL);
+       if (!gdp)
                return 0;
-       }
-       smp_rmb();
-       group_desc = block_group >> EXT3_DESC_PER_BLOCK_BITS(sb);
-       desc = block_group & (EXT3_DESC_PER_BLOCK(sb) - 1);
-       bh = EXT3_SB(sb)->s_group_desc[group_desc];
-       if (!bh) {
-               ext3_error (sb, "ext3_get_inode_block",
-                           "Descriptor not loaded");
-               return 0;
-       }
-
-       gdp = (struct ext3_group_desc *) bh->b_data;
        /*
         * Figure out the offset within the block group inode table
         */
        offset = ((ino - 1) % EXT3_INODES_PER_GROUP(sb)) *
                EXT3_INODE_SIZE(sb);
-       block = le32_to_cpu(gdp[desc].bg_inode_table) +
+       block = le32_to_cpu(gdp->bg_inode_table) +
                (offset >> EXT3_BLOCK_SIZE_BITS(sb));
 
        iloc->block_group = block_group;
@@ -2342,7 +2614,7 @@ static unsigned long ext3_get_inode_block(struct super_block *sb,
 static int __ext3_get_inode_loc(struct inode *inode,
                                struct ext3_iloc *iloc, int in_mem)
 {
-       unsigned long block;
+       ext3_fsblk_t block;
        struct buffer_head *bh;
 
        block = ext3_get_inode_block(inode->i_sb, inode->i_ino, iloc);
@@ -2353,11 +2625,22 @@ static int __ext3_get_inode_loc(struct inode *inode,
        if (!bh) {
                ext3_error (inode->i_sb, "ext3_get_inode_loc",
                                "unable to read inode block - "
-                               "inode=%lu, block=%lu", inode->i_ino, block);
+                               "inode=%lu, block="E3FSBLK,
+                                inode->i_ino, block);
                return -EIO;
        }
        if (!buffer_uptodate(bh)) {
                lock_buffer(bh);
+
+               /*
+                * If the buffer has the write error flag, we have failed
+                * to write out another inode in the same block.  In this
+                * case, we don't have to read the block because we may
+                * read the old inode data successfully.
+                */
+               if (buffer_write_io_error(bh) && !buffer_uptodate(bh))
+                       set_buffer_uptodate(bh);
+
                if (buffer_uptodate(bh)) {
                        /* someone brought it uptodate while we waited */
                        unlock_buffer(bh);
@@ -2429,12 +2712,12 @@ make_io:
                 */
                get_bh(bh);
                bh->b_end_io = end_buffer_read_sync;
-               submit_bh(READ, bh);
+               submit_bh(READ_META, bh);
                wait_on_buffer(bh);
                if (!buffer_uptodate(bh)) {
                        ext3_error(inode->i_sb, "ext3_get_inode_loc",
                                        "unable to read inode block - "
-                                       "inode=%lu, block=%lu",
+                                       "inode=%lu, block="E3FSBLK,
                                        inode->i_ino, block);
                        brelse(bh);
                        return -EIO;
@@ -2449,7 +2732,7 @@ int ext3_get_inode_loc(struct inode *inode, struct ext3_iloc *iloc)
 {
        /* We have all inode data except xattrs in memory here. */
        return __ext3_get_inode_loc(inode, iloc,
-               !(EXT3_I(inode)->i_state & EXT3_STATE_XATTR));
+               !ext3_test_inode_state(inode, EXT3_STATE_XATTR));
 }
 
 void ext3_set_inode_flags(struct inode *inode)
@@ -2469,21 +2752,48 @@ void ext3_set_inode_flags(struct inode *inode)
                inode->i_flags |= S_DIRSYNC;
 }
 
-void ext3_read_inode(struct inode * inode)
+/* Propagate flags from i_flags to EXT3_I(inode)->i_flags */
+void ext3_get_inode_flags(struct ext3_inode_info *ei)
+{
+       unsigned int flags = ei->vfs_inode.i_flags;
+
+       ei->i_flags &= ~(EXT3_SYNC_FL|EXT3_APPEND_FL|
+                       EXT3_IMMUTABLE_FL|EXT3_NOATIME_FL|EXT3_DIRSYNC_FL);
+       if (flags & S_SYNC)
+               ei->i_flags |= EXT3_SYNC_FL;
+       if (flags & S_APPEND)
+               ei->i_flags |= EXT3_APPEND_FL;
+       if (flags & S_IMMUTABLE)
+               ei->i_flags |= EXT3_IMMUTABLE_FL;
+       if (flags & S_NOATIME)
+               ei->i_flags |= EXT3_NOATIME_FL;
+       if (flags & S_DIRSYNC)
+               ei->i_flags |= EXT3_DIRSYNC_FL;
+}
+
+struct inode *ext3_iget(struct super_block *sb, unsigned long ino)
 {
        struct ext3_iloc iloc;
        struct ext3_inode *raw_inode;
-       struct ext3_inode_info *ei = EXT3_I(inode);
+       struct ext3_inode_info *ei;
        struct buffer_head *bh;
+       struct inode *inode;
+       journal_t *journal = EXT3_SB(sb)->s_journal;
+       transaction_t *transaction;
+       long ret;
        int block;
 
-#ifdef CONFIG_EXT3_FS_POSIX_ACL
-       ei->i_acl = EXT3_ACL_NOT_CACHED;
-       ei->i_default_acl = EXT3_ACL_NOT_CACHED;
-#endif
+       inode = iget_locked(sb, ino);
+       if (!inode)
+               return ERR_PTR(-ENOMEM);
+       if (!(inode->i_state & I_NEW))
+               return inode;
+
+       ei = EXT3_I(inode);
        ei->i_block_alloc_info = NULL;
 
-       if (__ext3_get_inode_loc(inode, &iloc, 0))
+       ret = __ext3_get_inode_loc(inode, &iloc, 0);
+       if (ret < 0)
                goto bad_inode;
        bh = iloc.bh;
        raw_inode = ext3_raw_inode(&iloc);
@@ -2496,9 +2806,9 @@ void ext3_read_inode(struct inode * inode)
        }
        inode->i_nlink = le16_to_cpu(raw_inode->i_links_count);
        inode->i_size = le32_to_cpu(raw_inode->i_size);
-       inode->i_atime.tv_sec = le32_to_cpu(raw_inode->i_atime);
-       inode->i_ctime.tv_sec = le32_to_cpu(raw_inode->i_ctime);
-       inode->i_mtime.tv_sec = le32_to_cpu(raw_inode->i_mtime);
+       inode->i_atime.tv_sec = (signed)le32_to_cpu(raw_inode->i_atime);
+       inode->i_ctime.tv_sec = (signed)le32_to_cpu(raw_inode->i_ctime);
+       inode->i_mtime.tv_sec = (signed)le32_to_cpu(raw_inode->i_mtime);
        inode->i_atime.tv_nsec = inode->i_ctime.tv_nsec = inode->i_mtime.tv_nsec = 0;
 
        ei->i_state = 0;
@@ -2514,6 +2824,7 @@ void ext3_read_inode(struct inode * inode)
                    !(EXT3_SB(inode->i_sb)->s_mount_state & EXT3_ORPHAN_FS)) {
                        /* this inode is deleted */
                        brelse (bh);
+                       ret = -ESTALE;
                        goto bad_inode;
                }
                /* The only unlinked inodes we let through here have
@@ -2521,9 +2832,6 @@ void ext3_read_inode(struct inode * inode)
                 * recovery code: that's fine, we're about to complete
                 * the process of deleting those. */
        }
-       inode->i_blksize = PAGE_SIZE;   /* This is the optimal IO size
-                                        * (for stat), not the fs block
-                                        * size */  
        inode->i_blocks = le32_to_cpu(raw_inode->i_blocks);
        ei->i_flags = le32_to_cpu(raw_inode->i_flags);
 #ifdef EXT3_FRAGMENTS
@@ -2549,6 +2857,30 @@ void ext3_read_inode(struct inode * inode)
                ei->i_data[block] = raw_inode->i_block[block];
        INIT_LIST_HEAD(&ei->i_orphan);
 
+       /*
+        * Set transaction id's of transactions that have to be committed
+        * to finish f[data]sync. We set them to currently running transaction
+        * as we cannot be sure that the inode or some of its metadata isn't
+        * part of the transaction - the inode could have been reclaimed and
+        * now it is reread from disk.
+        */
+       if (journal) {
+               tid_t tid;
+
+               spin_lock(&journal->j_state_lock);
+               if (journal->j_running_transaction)
+                       transaction = journal->j_running_transaction;
+               else
+                       transaction = journal->j_committing_transaction;
+               if (transaction)
+                       tid = transaction->t_tid;
+               else
+                       tid = journal->j_commit_sequence;
+               spin_unlock(&journal->j_state_lock);
+               atomic_set(&ei->i_sync_tid, tid);
+               atomic_set(&ei->i_datasync_tid, tid);
+       }
+
        if (inode->i_ino >= EXT3_FIRST_INO(inode->i_sb) + 1 &&
            EXT3_INODE_SIZE(inode->i_sb) > EXT3_GOOD_OLD_INODE_SIZE) {
                /*
@@ -2558,8 +2890,11 @@ void ext3_read_inode(struct inode * inode)
                 */
                ei->i_extra_isize = le16_to_cpu(raw_inode->i_extra_isize);
                if (EXT3_GOOD_OLD_INODE_SIZE + ei->i_extra_isize >
-                   EXT3_INODE_SIZE(inode->i_sb))
+                   EXT3_INODE_SIZE(inode->i_sb)) {
+                       brelse (bh);
+                       ret = -EIO;
                        goto bad_inode;
+               }
                if (ei->i_extra_isize == 0) {
                        /* The extra space is currently unused. Use it. */
                        ei->i_extra_isize = sizeof(struct ext3_inode) -
@@ -2569,7 +2904,7 @@ void ext3_read_inode(struct inode * inode)
                                        EXT3_GOOD_OLD_INODE_SIZE +
                                        ei->i_extra_isize;
                        if (*magic == cpu_to_le32(EXT3_XATTR_MAGIC))
-                                ei->i_state |= EXT3_STATE_XATTR;
+                                ext3_set_inode_state(inode, EXT3_STATE_XATTR);
                }
        } else
                ei->i_extra_isize = 0;
@@ -2582,9 +2917,11 @@ void ext3_read_inode(struct inode * inode)
                inode->i_op = &ext3_dir_inode_operations;
                inode->i_fop = &ext3_dir_operations;
        } else if (S_ISLNK(inode->i_mode)) {
-               if (ext3_inode_is_fast_symlink(inode))
+               if (ext3_inode_is_fast_symlink(inode)) {
                        inode->i_op = &ext3_fast_symlink_inode_operations;
-               else {
+                       nd_terminate_link(ei->i_data, inode->i_size,
+                               sizeof(ei->i_data) - 1);
+               } else {
                        inode->i_op = &ext3_symlink_inode_operations;
                        ext3_set_aops(inode);
                }
@@ -2593,17 +2930,18 @@ void ext3_read_inode(struct inode * inode)
                if (raw_inode->i_block[0])
                        init_special_inode(inode, inode->i_mode,
                           old_decode_dev(le32_to_cpu(raw_inode->i_block[0])));
-               else 
+               else
                        init_special_inode(inode, inode->i_mode,
                           new_decode_dev(le32_to_cpu(raw_inode->i_block[1])));
        }
        brelse (iloc.bh);
        ext3_set_inode_flags(inode);
-       return;
+       unlock_new_inode(inode);
+       return inode;
 
 bad_inode:
-       make_bad_inode(inode);
-       return;
+       iget_failed(inode);
+       return ERR_PTR(ret);
 }
 
 /*
@@ -2613,8 +2951,8 @@ bad_inode:
  *
  * The caller must have write access to iloc->bh.
  */
-static int ext3_do_update_inode(handle_t *handle, 
-                               struct inode *inode, 
+static int ext3_do_update_inode(handle_t *handle,
+                               struct inode *inode,
                                struct ext3_iloc *iloc)
 {
        struct ext3_inode *raw_inode = ext3_raw_inode(iloc);
@@ -2622,11 +2960,16 @@ static int ext3_do_update_inode(handle_t *handle,
        struct buffer_head *bh = iloc->bh;
        int err = 0, rc, block;
 
+again:
+       /* we can't allow multiple procs in here at once, its a bit racey */
+       lock_buffer(bh);
+
        /* For fields not not tracking in the in-memory inode,
         * initialise them to zero for new inodes. */
-       if (ei->i_state & EXT3_STATE_NEW)
+       if (ext3_test_inode_state(inode, EXT3_STATE_NEW))
                memset(raw_inode, 0, EXT3_SB(inode->i_sb)->s_inode_size);
 
+       ext3_get_inode_flags(ei);
        raw_inode->i_mode = cpu_to_le16(inode->i_mode);
        if(!(test_opt(inode->i_sb, NO_UID32))) {
                raw_inode->i_uid_low = cpu_to_le16(low_16_bits(inode->i_uid));
@@ -2680,17 +3023,20 @@ static int ext3_do_update_inode(handle_t *handle,
                               /* If this is the first large file
                                * created, add a flag to the superblock.
                                */
+                               unlock_buffer(bh);
                                err = ext3_journal_get_write_access(handle,
                                                EXT3_SB(sb)->s_sbh);
                                if (err)
                                        goto out_brelse;
+
                                ext3_update_dynamic_rev(sb);
                                EXT3_SET_RO_COMPAT_FEATURE(sb,
                                        EXT3_FEATURE_RO_COMPAT_LARGE_FILE);
-                               sb->s_dirt = 1;
                                handle->h_sync = 1;
                                err = ext3_journal_dirty_metadata(handle,
                                                EXT3_SB(sb)->s_sbh);
+                               /* get our lock and start over */
+                               goto again;
                        }
                }
        }
@@ -2709,15 +3055,17 @@ static int ext3_do_update_inode(handle_t *handle,
        } else for (block = 0; block < EXT3_N_BLOCKS; block++)
                raw_inode->i_block[block] = ei->i_data[block];
 
-       if (EXT3_INODE_SIZE(inode->i_sb) > EXT3_GOOD_OLD_INODE_SIZE)
+       if (ei->i_extra_isize)
                raw_inode->i_extra_isize = cpu_to_le16(ei->i_extra_isize);
 
        BUFFER_TRACE(bh, "call ext3_journal_dirty_metadata");
+       unlock_buffer(bh);
        rc = ext3_journal_dirty_metadata(handle, bh);
        if (!err)
                err = rc;
-       ei->i_state &= ~EXT3_STATE_NEW;
+       ext3_clear_inode_state(inode, EXT3_STATE_NEW);
 
+       atomic_set(&ei->i_sync_tid, handle->h_transaction->t_tid);
 out_brelse:
        brelse (bh);
        ext3_std_error(inode->i_sb, err);
@@ -2765,7 +3113,7 @@ int ext3_write_inode(struct inode *inode, int wait)
                return 0;
 
        if (ext3_journal_current_handle()) {
-               jbd_debug(0, "called recursively, non-PF_MEMALLOC!\n");
+               jbd_debug(1, "called recursively, non-PF_MEMALLOC!\n");
                dump_stack();
                return -EIO;
        }
@@ -2789,7 +3137,7 @@ int ext3_write_inode(struct inode *inode, int wait)
  * commit will leave the blocks being flushed in an unused state on
  * disk.  (On recovery, the inode will get truncated and the blocks will
  * be freed, so we have a strong guarantee that no future commit will
- * leave these blocks visible to the user.)  
+ * leave these blocks visible to the user.)
  *
  * Called with inode->sem down.
  */
@@ -2803,18 +3151,21 @@ int ext3_setattr(struct dentry *dentry, struct iattr *attr)
        if (error)
                return error;
 
+       if (ia_valid & ATTR_SIZE)
+               vfs_dq_init(inode);
        if ((ia_valid & ATTR_UID && attr->ia_uid != inode->i_uid) ||
                (ia_valid & ATTR_GID && attr->ia_gid != inode->i_gid)) {
                handle_t *handle;
 
                /* (user+group)*(old+new) structure, inode write (sb,
                 * inode block, ? - but truncate inode update has it) */
-               handle = ext3_journal_start(inode, 4*EXT3_QUOTA_INIT_BLOCKS+3);
+               handle = ext3_journal_start(inode, EXT3_MAXQUOTAS_INIT_BLOCKS(inode->i_sb)+
+                                       EXT3_MAXQUOTAS_DEL_BLOCKS(inode->i_sb)+3);
                if (IS_ERR(handle)) {
                        error = PTR_ERR(handle);
                        goto err_out;
                }
-               error = DQUOT_TRANSFER(inode, attr) ? -EDQUOT : 0;
+               error = dquot_transfer(inode, attr);
                if (error) {
                        ext3_journal_stop(handle);
                        return error;
@@ -2849,12 +3200,6 @@ int ext3_setattr(struct dentry *dentry, struct iattr *attr)
 
        rc = inode_setattr(inode, attr);
 
-       /* If inode_setattr's call to ext3_truncate failed to get a
-        * transaction handle at all, we need to clean up the in-core
-        * orphan list manually. */
-       if (inode->i_nlink)
-               ext3_orphan_del(NULL, inode);
-
        if (!rc && (ia_valid & ATTR_MODE))
                rc = ext3_acl_chmod(inode);
 
@@ -2867,7 +3212,7 @@ err_out:
 
 
 /*
- * akpm: how many blocks doth make a writepage()?
+ * How many blocks doth make a writepage()?
  *
  * With N blocks per page, it may be:
  * N data blocks
@@ -2905,9 +3250,9 @@ static int ext3_writepage_trans_blocks(struct inode *inode)
                ret = 2 * (bpp + indirects) + 2;
 
 #ifdef CONFIG_QUOTA
-       /* We know that structure was already allocated during DQUOT_INIT so
+       /* We know that structure was already allocated during vfs_dq_init so
         * we will be updating only the data blocks + inodes */
-       ret += 2*EXT3_QUOTA_TRANS_BLOCKS;
+       ret += EXT3_MAXQUOTAS_TRANS_BLOCKS(inode->i_sb);
 #endif
 
        return ret;
@@ -2931,13 +3276,13 @@ int ext3_mark_iloc_dirty(handle_t *handle,
        return err;
 }
 
-/* 
+/*
  * On success, We end up with an outstanding reference count against
- * iloc->bh.  This _must_ be cleaned up later. 
+ * iloc->bh.  This _must_ be cleaned up later.
  */
 
 int
-ext3_reserve_inode_write(handle_t *handle, struct inode *inode, 
+ext3_reserve_inode_write(handle_t *handle, struct inode *inode,
                         struct ext3_iloc *iloc)
 {
        int err = 0;
@@ -2957,8 +3302,8 @@ ext3_reserve_inode_write(handle_t *handle, struct inode *inode,
 }
 
 /*
- * akpm: What we do here is to mark the in-core inode as clean
- * with respect to inode dirtiness (it may still be data-dirty).
+ * What we do here is to mark the in-core inode as clean with respect to inode
+ * dirtiness (it may still be data-dirty).
  * This means that the in-core inode may be reaped by prune_icache
  * without having to perform any I/O.  This is a very good thing,
  * because *any* task may call prune_icache - even ones which
@@ -2990,13 +3335,13 @@ int ext3_mark_inode_dirty(handle_t *handle, struct inode *inode)
 }
 
 /*
- * akpm: ext3_dirty_inode() is called from __mark_inode_dirty()
+ * ext3_dirty_inode() is called from __mark_inode_dirty()
  *
  * We're really interested in the case where a file is being extended.
  * i_size has been changed by generic_commit_write() and we thus need
  * to include the updated inode in the current transaction.
  *
- * Also, DQUOT_ALLOC_SPACE() will always dirty the inode when blocks
+ * Also, dquot_alloc_space() will always dirty the inode when blocks
  * are allocated to the file.
  *
  * If the inode is marked synchronous, we don't honour that here - doing
@@ -3015,7 +3360,7 @@ void ext3_dirty_inode(struct inode *inode)
                current_handle->h_transaction != handle->h_transaction) {
                /* This task has a transaction open against a different fs */
                printk(KERN_EMERG "%s: transactions do not match!\n",
-                      __FUNCTION__);
+                      __func__);
        } else {
                jbd_debug(5, "marking dirty.  outer handle=%p\n",
                                current_handle);
@@ -3026,16 +3371,15 @@ out:
        return;
 }
 
-#ifdef AKPM
-/* 
+#if 0
+/*
  * Bind an inode's backing buffer_head into this transaction, to prevent
  * it from being flushed to disk early.  Unlike
  * ext3_reserve_inode_write, this leaves behind no bh reference and
  * returns no iloc structure, so the caller needs to repeat the iloc
  * lookup to mark the inode dirty later.
  */
-static inline int
-ext3_pin_inode(handle_t *handle, struct inode *inode)
+static int ext3_pin_inode(handle_t *handle, struct inode *inode)
 {
        struct ext3_iloc iloc;
 
@@ -3046,7 +3390,7 @@ ext3_pin_inode(handle_t *handle, struct inode *inode)
                        BUFFER_TRACE(iloc.bh, "get_write_access");
                        err = journal_get_write_access(handle, iloc.bh);
                        if (!err)
-                               err = ext3_journal_dirty_metadata(handle, 
+                               err = ext3_journal_dirty_metadata(handle,
                                                                  iloc.bh);
                        brelse(iloc.bh);
                }
@@ -3073,7 +3417,7 @@ int ext3_change_inode_journal_flag(struct inode *inode, int val)
         */
 
        journal = EXT3_JOURNAL(inode);
-       if (is_journal_aborted(journal) || IS_RDONLY(inode))
+       if (is_journal_aborted(journal))
                return -EROFS;
 
        journal_lock_updates(journal);