dquot: cleanup dquot drop routine
[safe/jmp/linux-2.6] / fs / ext4 / ialloc.c
index 1ff3df0..ca8986e 100644 (file)
 #include <linux/bitops.h>
 #include <linux/blkdev.h>
 #include <asm/byteorder.h>
+
 #include "ext4.h"
 #include "ext4_jbd2.h"
 #include "xattr.h"
 #include "acl.h"
-#include "group.h"
+
+#include <trace/events/ext4.h>
 
 /*
  * ialloc.c contains the inodes allocation and deallocation routines
@@ -123,16 +125,16 @@ ext4_read_inode_bitmap(struct super_block *sb, ext4_group_t block_group)
                unlock_buffer(bh);
                return bh;
        }
-       spin_lock(sb_bgl_lock(EXT4_SB(sb), block_group));
+       ext4_lock_group(sb, block_group);
        if (desc->bg_flags & cpu_to_le16(EXT4_BG_INODE_UNINIT)) {
                ext4_init_inode_bitmap(sb, bh, block_group, desc);
                set_bitmap_uptodate(bh);
                set_buffer_uptodate(bh);
-               spin_unlock(sb_bgl_lock(EXT4_SB(sb), block_group));
+               ext4_unlock_group(sb, block_group);
                unlock_buffer(bh);
                return bh;
        }
-       spin_unlock(sb_bgl_lock(EXT4_SB(sb), block_group));
+       ext4_unlock_group(sb, block_group);
        if (buffer_uptodate(bh)) {
                /*
                 * if not uninit if bh is uptodate,
@@ -189,7 +191,6 @@ void ext4_free_inode(handle_t *handle, struct inode *inode)
        struct ext4_super_block *es;
        struct ext4_sb_info *sbi;
        int fatal = 0, err, count, cleared;
-       ext4_group_t flex_group;
 
        if (atomic_read(&inode->i_count) > 1) {
                printk(KERN_ERR "ext4_free_inode: inode has count=%d\n",
@@ -210,11 +211,7 @@ void ext4_free_inode(handle_t *handle, struct inode *inode)
 
        ino = inode->i_ino;
        ext4_debug("freeing inode %lu\n", ino);
-       trace_mark(ext4_free_inode,
-                  "dev %s ino %lu mode %d uid %lu gid %lu bocks %llu",
-                  sb->s_id, inode->i_ino, inode->i_mode,
-                  (unsigned long) inode->i_uid, (unsigned long) inode->i_gid,
-                  (unsigned long long) inode->i_blocks);
+       trace_ext4_free_inode(inode);
 
        /*
         * Note: we must free any quota before locking the superblock,
@@ -222,8 +219,8 @@ void ext4_free_inode(handle_t *handle, struct inode *inode)
         */
        vfs_dq_init(inode);
        ext4_xattr_delete_inode(handle, inode);
-       vfs_dq_free_inode(inode);
-       vfs_dq_drop(inode);
+       dquot_free_inode(inode);
+       dquot_drop(inode);
 
        is_directory = S_ISDIR(inode->i_mode);
 
@@ -248,9 +245,8 @@ void ext4_free_inode(handle_t *handle, struct inode *inode)
                goto error_return;
 
        /* Ok, now we can actually update the inode bitmaps.. */
-       spin_lock(sb_bgl_lock(sbi, block_group));
-       cleared = ext4_clear_bit(bit, bitmap_bh->b_data);
-       spin_unlock(sb_bgl_lock(sbi, block_group));
+       cleared = ext4_clear_bit_atomic(ext4_group_lock_ptr(sb, block_group),
+                                       bit, bitmap_bh->b_data);
        if (!cleared)
                ext4_error(sb, "ext4_free_inode",
                           "bit already cleared for inode %lu", ino);
@@ -262,25 +258,32 @@ void ext4_free_inode(handle_t *handle, struct inode *inode)
                if (fatal) goto error_return;
 
                if (gdp) {
-                       spin_lock(sb_bgl_lock(sbi, block_group));
+                       ext4_lock_group(sb, block_group);
                        count = ext4_free_inodes_count(sb, gdp) + 1;
                        ext4_free_inodes_set(sb, gdp, count);
                        if (is_directory) {
                                count = ext4_used_dirs_count(sb, gdp) - 1;
                                ext4_used_dirs_set(sb, gdp, count);
+                               if (sbi->s_log_groups_per_flex) {
+                                       ext4_group_t f;
+
+                                       f = ext4_flex_group(sbi, block_group);
+                                       atomic_dec(&sbi->s_flex_groups[f].free_inodes);
+                               }
+
                        }
                        gdp->bg_checksum = ext4_group_desc_csum(sbi,
                                                        block_group, gdp);
-                       spin_unlock(sb_bgl_lock(sbi, block_group));
+                       ext4_unlock_group(sb, block_group);
                        percpu_counter_inc(&sbi->s_freeinodes_counter);
                        if (is_directory)
                                percpu_counter_dec(&sbi->s_dirs_counter);
 
                        if (sbi->s_log_groups_per_flex) {
-                               flex_group = ext4_flex_group(sbi, block_group);
-                               spin_lock(sb_bgl_lock(sbi, flex_group));
-                               sbi->s_flex_groups[flex_group].free_inodes++;
-                               spin_unlock(sb_bgl_lock(sbi, flex_group));
+                               ext4_group_t f;
+
+                               f = ext4_flex_group(sbi, block_group);
+                               atomic_inc(&sbi->s_flex_groups[f].free_inodes);
                        }
                }
                BUFFER_TRACE(bh2, "call ext4_handle_dirty_metadata");
@@ -310,7 +313,7 @@ error_return:
 static int find_group_dir(struct super_block *sb, struct inode *parent,
                                ext4_group_t *best_group)
 {
-       ext4_group_t ngroups = EXT4_SB(sb)->s_groups_count;
+       ext4_group_t ngroups = ext4_get_groups_count(sb);
        unsigned int freei, avefreei;
        struct ext4_group_desc *desc, *best_desc = NULL;
        ext4_group_t group;
@@ -343,11 +346,10 @@ static int find_group_flex(struct super_block *sb, struct inode *parent,
 {
        struct ext4_sb_info *sbi = EXT4_SB(sb);
        struct ext4_group_desc *desc;
-       struct buffer_head *bh;
        struct flex_groups *flex_group = sbi->s_flex_groups;
        ext4_group_t parent_group = EXT4_I(parent)->i_block_group;
        ext4_group_t parent_fbg_group = ext4_flex_group(sbi, parent_group);
-       ext4_group_t ngroups = sbi->s_groups_count;
+       ext4_group_t ngroups = ext4_get_groups_count(sb);
        int flex_size = ext4_flex_bg_size(sbi);
        ext4_group_t best_flex = parent_fbg_group;
        int blocks_per_flex = sbi->s_blocks_per_group * flex_size;
@@ -356,13 +358,13 @@ static int find_group_flex(struct super_block *sb, struct inode *parent,
        ext4_group_t n_fbg_groups;
        ext4_group_t i;
 
-       n_fbg_groups = (sbi->s_groups_count + flex_size - 1) >>
+       n_fbg_groups = (ngroups + flex_size - 1) >>
                sbi->s_log_groups_per_flex;
 
 find_close_to_parent:
-       flexbg_free_blocks = flex_group[best_flex].free_blocks;
+       flexbg_free_blocks = atomic_read(&flex_group[best_flex].free_blocks);
        flex_freeb_ratio = flexbg_free_blocks * 100 / blocks_per_flex;
-       if (flex_group[best_flex].free_inodes &&
+       if (atomic_read(&flex_group[best_flex].free_inodes) &&
            flex_freeb_ratio > free_block_ratio)
                goto found_flexbg;
 
@@ -375,30 +377,30 @@ find_close_to_parent:
                if (i == parent_fbg_group || i == parent_fbg_group - 1)
                        continue;
 
-               flexbg_free_blocks = flex_group[i].free_blocks;
+               flexbg_free_blocks = atomic_read(&flex_group[i].free_blocks);
                flex_freeb_ratio = flexbg_free_blocks * 100 / blocks_per_flex;
 
                if (flex_freeb_ratio > free_block_ratio &&
-                   flex_group[i].free_inodes) {
+                   (atomic_read(&flex_group[i].free_inodes))) {
                        best_flex = i;
                        goto found_flexbg;
                }
 
-               if (flex_group[best_flex].free_inodes == 0 ||
-                   (flex_group[i].free_blocks >
-                    flex_group[best_flex].free_blocks &&
-                    flex_group[i].free_inodes))
+               if ((atomic_read(&flex_group[best_flex].free_inodes) == 0) ||
+                   ((atomic_read(&flex_group[i].free_blocks) >
+                     atomic_read(&flex_group[best_flex].free_blocks)) &&
+                    atomic_read(&flex_group[i].free_inodes)))
                        best_flex = i;
        }
 
-       if (!flex_group[best_flex].free_inodes ||
-           !flex_group[best_flex].free_blocks)
+       if (!atomic_read(&flex_group[best_flex].free_inodes) ||
+           !atomic_read(&flex_group[best_flex].free_blocks))
                return -1;
 
 found_flexbg:
        for (i = best_flex * flex_size; i < ngroups &&
                     i < (best_flex + 1) * flex_size; i++) {
-               desc = ext4_get_group_desc(sb, i, &bh);
+               desc = ext4_get_group_desc(sb, i, NULL);
                if (ext4_free_inodes_count(sb, desc)) {
                        *best_group = i;
                        goto out;
@@ -410,6 +412,42 @@ out:
        return 0;
 }
 
+struct orlov_stats {
+       __u32 free_inodes;
+       __u32 free_blocks;
+       __u32 used_dirs;
+};
+
+/*
+ * Helper function for Orlov's allocator; returns critical information
+ * for a particular block group or flex_bg.  If flex_size is 1, then g
+ * is a block group number; otherwise it is flex_bg number.
+ */
+void get_orlov_stats(struct super_block *sb, ext4_group_t g,
+                      int flex_size, struct orlov_stats *stats)
+{
+       struct ext4_group_desc *desc;
+       struct flex_groups *flex_group = EXT4_SB(sb)->s_flex_groups;
+
+       if (flex_size > 1) {
+               stats->free_inodes = atomic_read(&flex_group[g].free_inodes);
+               stats->free_blocks = atomic_read(&flex_group[g].free_blocks);
+               stats->used_dirs = atomic_read(&flex_group[g].used_dirs);
+               return;
+       }
+
+       desc = ext4_get_group_desc(sb, g, NULL);
+       if (desc) {
+               stats->free_inodes = ext4_free_inodes_count(sb, desc);
+               stats->free_blocks = ext4_free_blks_count(sb, desc);
+               stats->used_dirs = ext4_used_dirs_count(sb, desc);
+       } else {
+               stats->free_inodes = 0;
+               stats->free_blocks = 0;
+               stats->used_dirs = 0;
+       }
+}
+
 /*
  * Orlov's allocator for directories.
  *
@@ -425,35 +463,37 @@ out:
  * it has too many directories already (max_dirs) or
  * it has too few free inodes left (min_inodes) or
  * it has too few free blocks left (min_blocks) or
- * it's already running too large debt (max_debt).
  * Parent's group is preferred, if it doesn't satisfy these
  * conditions we search cyclically through the rest. If none
  * of the groups look good we just look for a group with more
  * free inodes than average (starting at parent's group).
- *
- * Debt is incremented each time we allocate a directory and decremented
- * when we allocate an inode, within 0--255.
  */
 
-#define INODE_COST 64
-#define BLOCK_COST 256
-
 static int find_group_orlov(struct super_block *sb, struct inode *parent,
-                               ext4_group_t *group)
+                           ext4_group_t *group, int mode,
+                           const struct qstr *qstr)
 {
        ext4_group_t parent_group = EXT4_I(parent)->i_block_group;
        struct ext4_sb_info *sbi = EXT4_SB(sb);
-       struct ext4_super_block *es = sbi->s_es;
-       ext4_group_t ngroups = sbi->s_groups_count;
+       ext4_group_t real_ngroups = ext4_get_groups_count(sb);
        int inodes_per_group = EXT4_INODES_PER_GROUP(sb);
        unsigned int freei, avefreei;
        ext4_fsblk_t freeb, avefreeb;
-       ext4_fsblk_t blocks_per_dir;
        unsigned int ndirs;
-       int max_debt, max_dirs, min_inodes;
+       int max_dirs, min_inodes;
        ext4_grpblk_t min_blocks;
-       ext4_group_t i;
+       ext4_group_t i, grp, g, ngroups;
        struct ext4_group_desc *desc;
+       struct orlov_stats stats;
+       int flex_size = ext4_flex_bg_size(sbi);
+       struct dx_hash_info hinfo;
+
+       ngroups = real_ngroups;
+       if (flex_size > 1) {
+               ngroups = (real_ngroups + flex_size - 1) >>
+                       sbi->s_log_groups_per_flex;
+               parent_group >>= sbi->s_log_groups_per_flex;
+       }
 
        freei = percpu_counter_read_positive(&sbi->s_freeinodes_counter);
        avefreei = freei / ngroups;
@@ -462,71 +502,104 @@ static int find_group_orlov(struct super_block *sb, struct inode *parent,
        do_div(avefreeb, ngroups);
        ndirs = percpu_counter_read_positive(&sbi->s_dirs_counter);
 
-       if ((parent == sb->s_root->d_inode) ||
-           (EXT4_I(parent)->i_flags & EXT4_TOPDIR_FL)) {
+       if (S_ISDIR(mode) &&
+           ((parent == sb->s_root->d_inode) ||
+            (EXT4_I(parent)->i_flags & EXT4_TOPDIR_FL))) {
                int best_ndir = inodes_per_group;
-               ext4_group_t grp;
                int ret = -1;
 
-               get_random_bytes(&grp, sizeof(grp));
+               if (qstr) {
+                       hinfo.hash_version = DX_HASH_HALF_MD4;
+                       hinfo.seed = sbi->s_hash_seed;
+                       ext4fs_dirhash(qstr->name, qstr->len, &hinfo);
+                       grp = hinfo.hash;
+               } else
+                       get_random_bytes(&grp, sizeof(grp));
                parent_group = (unsigned)grp % ngroups;
                for (i = 0; i < ngroups; i++) {
-                       grp = (parent_group + i) % ngroups;
-                       desc = ext4_get_group_desc(sb, grp, NULL);
-                       if (!desc || !ext4_free_inodes_count(sb, desc))
+                       g = (parent_group + i) % ngroups;
+                       get_orlov_stats(sb, g, flex_size, &stats);
+                       if (!stats.free_inodes)
                                continue;
-                       if (ext4_used_dirs_count(sb, desc) >= best_ndir)
+                       if (stats.used_dirs >= best_ndir)
                                continue;
-                       if (ext4_free_inodes_count(sb, desc) < avefreei)
+                       if (stats.free_inodes < avefreei)
                                continue;
-                       if (ext4_free_blks_count(sb, desc) < avefreeb)
+                       if (stats.free_blocks < avefreeb)
                                continue;
-                       *group = grp;
+                       grp = g;
                        ret = 0;
-                       best_ndir = ext4_used_dirs_count(sb, desc);
+                       best_ndir = stats.used_dirs;
+               }
+               if (ret)
+                       goto fallback;
+       found_flex_bg:
+               if (flex_size == 1) {
+                       *group = grp;
+                       return 0;
+               }
+
+               /*
+                * We pack inodes at the beginning of the flexgroup's
+                * inode tables.  Block allocation decisions will do
+                * something similar, although regular files will
+                * start at 2nd block group of the flexgroup.  See
+                * ext4_ext_find_goal() and ext4_find_near().
+                */
+               grp *= flex_size;
+               for (i = 0; i < flex_size; i++) {
+                       if (grp+i >= real_ngroups)
+                               break;
+                       desc = ext4_get_group_desc(sb, grp+i, NULL);
+                       if (desc && ext4_free_inodes_count(sb, desc)) {
+                               *group = grp+i;
+                               return 0;
+                       }
                }
-               if (ret == 0)
-                       return ret;
                goto fallback;
        }
 
-       blocks_per_dir = ext4_blocks_count(es) - freeb;
-       do_div(blocks_per_dir, ndirs);
-
        max_dirs = ndirs / ngroups + inodes_per_group / 16;
-       min_inodes = avefreei - inodes_per_group / 4;
-       min_blocks = avefreeb - EXT4_BLOCKS_PER_GROUP(sb) / 4;
-
-       max_debt = EXT4_BLOCKS_PER_GROUP(sb);
-       max_debt /= max_t(int, blocks_per_dir, BLOCK_COST);
-       if (max_debt * INODE_COST > inodes_per_group)
-               max_debt = inodes_per_group / INODE_COST;
-       if (max_debt > 255)
-               max_debt = 255;
-       if (max_debt == 0)
-               max_debt = 1;
+       min_inodes = avefreei - inodes_per_group*flex_size / 4;
+       if (min_inodes < 1)
+               min_inodes = 1;
+       min_blocks = avefreeb - EXT4_BLOCKS_PER_GROUP(sb)*flex_size / 4;
+
+       /*
+        * Start looking in the flex group where we last allocated an
+        * inode for this parent directory
+        */
+       if (EXT4_I(parent)->i_last_alloc_group != ~0) {
+               parent_group = EXT4_I(parent)->i_last_alloc_group;
+               if (flex_size > 1)
+                       parent_group >>= sbi->s_log_groups_per_flex;
+       }
 
        for (i = 0; i < ngroups; i++) {
-               *group = (parent_group + i) % ngroups;
-               desc = ext4_get_group_desc(sb, *group, NULL);
-               if (!desc || !ext4_free_inodes_count(sb, desc))
-                       continue;
-               if (ext4_used_dirs_count(sb, desc) >= max_dirs)
+               grp = (parent_group + i) % ngroups;
+               get_orlov_stats(sb, grp, flex_size, &stats);
+               if (stats.used_dirs >= max_dirs)
                        continue;
-               if (ext4_free_inodes_count(sb, desc) < min_inodes)
+               if (stats.free_inodes < min_inodes)
                        continue;
-               if (ext4_free_blks_count(sb, desc) < min_blocks)
+               if (stats.free_blocks < min_blocks)
                        continue;
-               return 0;
+               goto found_flex_bg;
        }
 
 fallback:
+       ngroups = real_ngroups;
+       avefreei = freei / ngroups;
+fallback_retry:
+       parent_group = EXT4_I(parent)->i_block_group;
        for (i = 0; i < ngroups; i++) {
-               *group = (parent_group + i) % ngroups;
-               desc = ext4_get_group_desc(sb, *group, NULL);
+               grp = (parent_group + i) % ngroups;
+               desc = ext4_get_group_desc(sb, grp, NULL);
                if (desc && ext4_free_inodes_count(sb, desc) &&
-                       ext4_free_inodes_count(sb, desc) >= avefreei)
+                   ext4_free_inodes_count(sb, desc) >= avefreei) {
+                       *group = grp;
                        return 0;
+               }
        }
 
        if (avefreei) {
@@ -535,19 +608,57 @@ fallback:
                 * filesystems the above test can fail to find any blockgroups
                 */
                avefreei = 0;
-               goto fallback;
+               goto fallback_retry;
        }
 
        return -1;
 }
 
 static int find_group_other(struct super_block *sb, struct inode *parent,
-                               ext4_group_t *group)
+                           ext4_group_t *group, int mode)
 {
        ext4_group_t parent_group = EXT4_I(parent)->i_block_group;
-       ext4_group_t ngroups = EXT4_SB(sb)->s_groups_count;
+       ext4_group_t i, last, ngroups = ext4_get_groups_count(sb);
        struct ext4_group_desc *desc;
-       ext4_group_t i;
+       int flex_size = ext4_flex_bg_size(EXT4_SB(sb));
+
+       /*
+        * Try to place the inode is the same flex group as its
+        * parent.  If we can't find space, use the Orlov algorithm to
+        * find another flex group, and store that information in the
+        * parent directory's inode information so that use that flex
+        * group for future allocations.
+        */
+       if (flex_size > 1) {
+               int retry = 0;
+
+       try_again:
+               parent_group &= ~(flex_size-1);
+               last = parent_group + flex_size;
+               if (last > ngroups)
+                       last = ngroups;
+               for  (i = parent_group; i < last; i++) {
+                       desc = ext4_get_group_desc(sb, i, NULL);
+                       if (desc && ext4_free_inodes_count(sb, desc)) {
+                               *group = i;
+                               return 0;
+                       }
+               }
+               if (!retry && EXT4_I(parent)->i_last_alloc_group != ~0) {
+                       retry = 1;
+                       parent_group = EXT4_I(parent)->i_last_alloc_group;
+                       goto try_again;
+               }
+               /*
+                * If this didn't work, use the Orlov search algorithm
+                * to find a new flex group; we pass in the mode to
+                * avoid the topdir algorithms.
+                */
+               *group = parent_group + flex_size;
+               if (*group > ngroups)
+                       *group = 0;
+               return find_group_orlov(sb, parent, group, mode, 0);
+       }
 
        /*
         * Try to place the inode in its parent directory
@@ -601,10 +712,10 @@ static int find_group_other(struct super_block *sb, struct inode *parent,
 
 /*
  * claim the inode from the inode bitmap. If the group
- * is uninit we need to take the groups's sb_bgl_lock
+ * is uninit we need to take the groups's ext4_group_lock
  * and clear the uninit flag. The inode bitmap update
  * and group desc uninit flag clear should be done
- * after holding sb_bgl_lock so that ext4_read_inode_bitmap
+ * after holding ext4_group_lock so that ext4_read_inode_bitmap
  * doesn't race with the ext4_claim_inode
  */
 static int ext4_claim_inode(struct super_block *sb,
@@ -615,7 +726,7 @@ static int ext4_claim_inode(struct super_block *sb,
        struct ext4_sb_info *sbi = EXT4_SB(sb);
        struct ext4_group_desc *gdp = ext4_get_group_desc(sb, group, NULL);
 
-       spin_lock(sb_bgl_lock(sbi, group));
+       ext4_lock_group(sb, group);
        if (ext4_set_bit(ino, inode_bitmap_bh->b_data)) {
                /* not a free inode */
                retval = 1;
@@ -624,7 +735,7 @@ static int ext4_claim_inode(struct super_block *sb,
        ino++;
        if ((group == 0 && ino < EXT4_FIRST_INO(sb)) ||
                        ino > EXT4_INODES_PER_GROUP(sb)) {
-               spin_unlock(sb_bgl_lock(sbi, group));
+               ext4_unlock_group(sb, group);
                ext4_error(sb, __func__,
                           "reserved inode or inode > inodes count - "
                           "block_group = %u, inode=%lu", group,
@@ -665,10 +776,15 @@ static int ext4_claim_inode(struct super_block *sb,
        if (S_ISDIR(mode)) {
                count = ext4_used_dirs_count(sb, gdp) + 1;
                ext4_used_dirs_set(sb, gdp, count);
+               if (sbi->s_log_groups_per_flex) {
+                       ext4_group_t f = ext4_flex_group(sbi, group);
+
+                       atomic_inc(&sbi->s_flex_groups[f].free_inodes);
+               }
        }
        gdp->bg_checksum = ext4_group_desc_csum(sbi, group, gdp);
 err_ret:
-       spin_unlock(sb_bgl_lock(sbi, group));
+       ext4_unlock_group(sb, group);
        return retval;
 }
 
@@ -682,16 +798,16 @@ err_ret:
  * For other inodes, search forward from the parent directory's block
  * group to find a free inode.
  */
-struct inode *ext4_new_inode(handle_t *handle, struct inode *dir, int mode)
+struct inode *ext4_new_inode(handle_t *handle, struct inode *dir, int mode,
+                            const struct qstr *qstr, __u32 goal)
 {
        struct super_block *sb;
        struct buffer_head *inode_bitmap_bh = NULL;
        struct buffer_head *group_desc_bh;
-       ext4_group_t group = 0;
+       ext4_group_t ngroups, group = 0;
        unsigned long ino = 0;
        struct inode *inode;
        struct ext4_group_desc *gdp = NULL;
-       struct ext4_super_block *es;
        struct ext4_inode_info *ei;
        struct ext4_sb_info *sbi;
        int ret2, err = 0;
@@ -706,25 +822,34 @@ struct inode *ext4_new_inode(handle_t *handle, struct inode *dir, int mode)
                return ERR_PTR(-EPERM);
 
        sb = dir->i_sb;
-       trace_mark(ext4_request_inode, "dev %s dir %lu mode %d", sb->s_id,
-                  dir->i_ino, mode);
+       ngroups = ext4_get_groups_count(sb);
+       trace_ext4_request_inode(dir, mode);
        inode = new_inode(sb);
        if (!inode)
                return ERR_PTR(-ENOMEM);
        ei = EXT4_I(inode);
-
        sbi = EXT4_SB(sb);
-       es = sbi->s_es;
 
-       if (sbi->s_log_groups_per_flex) {
+       if (!goal)
+               goal = sbi->s_inode_goal;
+
+       if (goal && goal <= le32_to_cpu(sbi->s_es->s_inodes_count)) {
+               group = (goal - 1) / EXT4_INODES_PER_GROUP(sb);
+               ino = (goal - 1) % EXT4_INODES_PER_GROUP(sb);
+               ret2 = 0;
+               goto got_group;
+       }
+
+       if (sbi->s_log_groups_per_flex && test_opt(sb, OLDALLOC)) {
                ret2 = find_group_flex(sb, dir, &group);
                if (ret2 == -1) {
-                       ret2 = find_group_other(sb, dir, &group);
-                       if (ret2 == 0 && once)
+                       ret2 = find_group_other(sb, dir, &group, mode);
+                       if (ret2 == 0 && once) {
                                once = 0;
                                printk(KERN_NOTICE "ext4: find_group_flex "
                                       "failed, fallback succeeded dir %lu\n",
                                       dir->i_ino);
+                       }
                }
                goto got_group;
        }
@@ -733,16 +858,17 @@ struct inode *ext4_new_inode(handle_t *handle, struct inode *dir, int mode)
                if (test_opt(sb, OLDALLOC))
                        ret2 = find_group_dir(sb, dir, &group);
                else
-                       ret2 = find_group_orlov(sb, dir, &group);
+                       ret2 = find_group_orlov(sb, dir, &group, mode, qstr);
        } else
-               ret2 = find_group_other(sb, dir, &group);
+               ret2 = find_group_other(sb, dir, &group, mode);
 
 got_group:
+       EXT4_I(dir)->i_last_alloc_group = group;
        err = -ENOSPC;
        if (ret2 == -1)
                goto out;
 
-       for (i = 0; i < sbi->s_groups_count; i++) {
+       for (i = 0; i < ngroups; i++, ino = 0) {
                err = -EIO;
 
                gdp = ext4_get_group_desc(sb, group, &group_desc_bh);
@@ -754,8 +880,6 @@ got_group:
                if (!inode_bitmap_bh)
                        goto fail;
 
-               ino = 0;
-
 repeat_in_this_group:
                ino = ext4_find_next_zero_bit((unsigned long *)
                                              inode_bitmap_bh->b_data,
@@ -803,7 +927,7 @@ repeat_in_this_group:
                 * group descriptor metadata has not yet been updated.
                 * So we just go onto the next blockgroup.
                 */
-               if (++group == sbi->s_groups_count)
+               if (++group == ngroups)
                        group = 0;
        }
        err = -ENOSPC;
@@ -824,7 +948,7 @@ got:
                }
 
                free = 0;
-               spin_lock(sb_bgl_lock(sbi, group));
+               ext4_lock_group(sb, group);
                /* recheck and clear flag under lock if we still need to */
                if (gdp->bg_flags & cpu_to_le16(EXT4_BG_BLOCK_UNINIT)) {
                        free = ext4_free_blocks_after_init(sb, group, gdp);
@@ -833,7 +957,7 @@ got:
                        gdp->bg_checksum = ext4_group_desc_csum(sbi, group,
                                                                gdp);
                }
-               spin_unlock(sb_bgl_lock(sbi, group));
+               ext4_unlock_group(sb, group);
 
                /* Don't need to dirty bitmap block if we didn't change it */
                if (free) {
@@ -858,9 +982,7 @@ got:
 
        if (sbi->s_log_groups_per_flex) {
                flex_group = ext4_flex_group(sbi, group);
-               spin_lock(sb_bgl_lock(sbi, flex_group));
-               sbi->s_flex_groups[flex_group].free_inodes--;
-               spin_unlock(sb_bgl_lock(sbi, flex_group));
+               atomic_dec(&sbi->s_flex_groups[flex_group].free_inodes);
        }
 
        inode->i_uid = current_fsuid();
@@ -885,19 +1007,16 @@ got:
        ei->i_disksize = 0;
 
        /*
-        * Don't inherit extent flag from directory. We set extent flag on
-        * newly created directory and file only if -o extent mount option is
-        * specified
+        * Don't inherit extent flag from directory, amongst others. We set
+        * extent flag on newly created directory and file only if -o extent
+        * mount option is specified
         */
-       ei->i_flags = EXT4_I(dir)->i_flags & EXT4_FL_INHERITED;
-       if (S_ISLNK(mode))
-               ei->i_flags &= ~(EXT4_IMMUTABLE_FL|EXT4_APPEND_FL);
-       /* dirsync only applies to directories */
-       if (!S_ISDIR(mode))
-               ei->i_flags &= ~EXT4_DIRSYNC_FL;
+       ei->i_flags =
+               ext4_mask_flags(mode, EXT4_I(dir)->i_flags & EXT4_FL_INHERITED);
        ei->i_file_acl = 0;
        ei->i_dtime = 0;
        ei->i_block_group = group;
+       ei->i_last_alloc_group = ~0;
 
        ext4_set_inode_flags(inode);
        if (IS_DIRSYNC(inode))
@@ -915,10 +1034,10 @@ got:
        ei->i_extra_isize = EXT4_SB(sb)->s_want_extra_isize;
 
        ret = inode;
-       if (vfs_dq_alloc_inode(inode)) {
-               err = -EDQUOT;
+       vfs_dq_init(inode);
+       err = dquot_alloc_inode(inode);
+       if (err)
                goto fail_drop;
-       }
 
        err = ext4_init_acl(handle, inode, dir);
        if (err)
@@ -943,8 +1062,7 @@ got:
        }
 
        ext4_debug("allocating inode %lu\n", inode->i_ino);
-       trace_mark(ext4_allocate_inode, "dev %s ino %lu dir %lu mode %d",
-                  sb->s_id, inode->i_ino, dir->i_ino, mode);
+       trace_ext4_allocate_inode(inode, dir, mode);
        goto really_out;
 fail:
        ext4_std_error(sb, err);
@@ -956,10 +1074,10 @@ really_out:
        return ret;
 
 fail_free_drop:
-       vfs_dq_free_inode(inode);
+       dquot_free_inode(inode);
 
 fail_drop:
-       vfs_dq_drop(inode);
+       dquot_drop(inode);
        inode->i_flags |= S_NOQUOTA;
        inode->i_nlink = 0;
        unlock_new_inode(inode);
@@ -1049,7 +1167,7 @@ unsigned long ext4_count_free_inodes(struct super_block *sb)
 {
        unsigned long desc_count;
        struct ext4_group_desc *gdp;
-       ext4_group_t i;
+       ext4_group_t i, ngroups = ext4_get_groups_count(sb);
 #ifdef EXT4FS_DEBUG
        struct ext4_super_block *es;
        unsigned long bitmap_count, x;
@@ -1059,7 +1177,7 @@ unsigned long ext4_count_free_inodes(struct super_block *sb)
        desc_count = 0;
        bitmap_count = 0;
        gdp = NULL;
-       for (i = 0; i < EXT4_SB(sb)->s_groups_count; i++) {
+       for (i = 0; i < ngroups; i++) {
                gdp = ext4_get_group_desc(sb, i, NULL);
                if (!gdp)
                        continue;
@@ -1071,7 +1189,7 @@ unsigned long ext4_count_free_inodes(struct super_block *sb)
 
                x = ext4_count_free(bitmap_bh, EXT4_INODES_PER_GROUP(sb) / 8);
                printk(KERN_DEBUG "group %lu: stored = %d, counted = %lu\n",
-                       i, ext4_free_inodes_count(sb, gdp), x);
+                       (unsigned long) i, ext4_free_inodes_count(sb, gdp), x);
                bitmap_count += x;
        }
        brelse(bitmap_bh);
@@ -1081,7 +1199,7 @@ unsigned long ext4_count_free_inodes(struct super_block *sb)
        return desc_count;
 #else
        desc_count = 0;
-       for (i = 0; i < EXT4_SB(sb)->s_groups_count; i++) {
+       for (i = 0; i < ngroups; i++) {
                gdp = ext4_get_group_desc(sb, i, NULL);
                if (!gdp)
                        continue;
@@ -1096,9 +1214,9 @@ unsigned long ext4_count_free_inodes(struct super_block *sb)
 unsigned long ext4_count_dirs(struct super_block * sb)
 {
        unsigned long count = 0;
-       ext4_group_t i;
+       ext4_group_t i, ngroups = ext4_get_groups_count(sb);
 
-       for (i = 0; i < EXT4_SB(sb)->s_groups_count; i++) {
+       for (i = 0; i < ngroups; i++) {
                struct ext4_group_desc *gdp = ext4_get_group_desc(sb, i, NULL);
                if (!gdp)
                        continue;