nfsd4: check for negative dentry before use in nfsv4 readdir
[safe/jmp/linux-2.6] / fs / btrfs / transaction.c
index 47cd5fc..2869b33 100644 (file)
@@ -28,9 +28,6 @@
 #include "ref-cache.h"
 #include "tree-log.h"
 
-extern struct kmem_cache *btrfs_trans_handle_cachep;
-extern struct kmem_cache *btrfs_transaction_cachep;
-
 #define BTRFS_ROOT_TRANS_TAG 0
 
 static noinline void put_transaction(struct btrfs_transaction *transaction)
@@ -56,8 +53,6 @@ static noinline int join_transaction(struct btrfs_root *root)
                                             GFP_NOFS);
                BUG_ON(!cur_trans);
                root->fs_info->generation++;
-               root->fs_info->last_alloc = 0;
-               root->fs_info->last_data_alloc = 0;
                cur_trans->num_writers = 1;
                cur_trans->num_joined = 0;
                cur_trans->transid = root->fs_info->generation;
@@ -68,6 +63,15 @@ static noinline int join_transaction(struct btrfs_root *root)
                cur_trans->use_count = 1;
                cur_trans->commit_done = 0;
                cur_trans->start_time = get_seconds();
+
+               cur_trans->delayed_refs.root.rb_node = NULL;
+               cur_trans->delayed_refs.num_entries = 0;
+               cur_trans->delayed_refs.num_heads_ready = 0;
+               cur_trans->delayed_refs.num_heads = 0;
+               cur_trans->delayed_refs.flushing = 0;
+               cur_trans->delayed_refs.run_delayed_start = 0;
+               spin_lock_init(&cur_trans->delayed_refs.lock);
+
                INIT_LIST_HEAD(&cur_trans->pending_snapshots);
                list_add_tail(&cur_trans->list, &root->fs_info->trans_list);
                extent_io_tree_init(&cur_trans->dirty_pages,
@@ -85,10 +89,10 @@ static noinline int join_transaction(struct btrfs_root *root)
 }
 
 /*
- * this does all the record keeping required to make sure that a
- * reference counted root is properly recorded in a given transaction.
- * This is required to make sure the old root from before we joined the transaction
- * is deleted when the transaction commits
+ * this does all the record keeping required to make sure that a reference
+ * counted root is properly recorded in a given transaction.  This is required
+ * to make sure the old root from before we joined the transaction is deleted
+ * when the transaction commits
  */
 noinline int btrfs_record_root_in_trans(struct btrfs_root *root)
 {
@@ -144,7 +148,7 @@ static void wait_current_trans(struct btrfs_root *root)
        if (cur_trans && cur_trans->blocked) {
                DEFINE_WAIT(wait);
                cur_trans->use_count++;
-               while(1) {
+               while (1) {
                        prepare_to_wait(&root->fs_info->transaction_wait, &wait,
                                        TASK_UNINTERRUPTIBLE);
                        if (cur_trans->blocked) {
@@ -182,9 +186,11 @@ static struct btrfs_trans_handle *start_transaction(struct btrfs_root *root,
        h->transaction = root->fs_info->running_transaction;
        h->blocks_reserved = num_blocks;
        h->blocks_used = 0;
-       h->block_group = NULL;
+       h->block_group = 0;
        h->alloc_exclude_nr = 0;
        h->alloc_exclude_start = 0;
+       h->delayed_ref_updates = 0;
+
        root->fs_info->running_transaction->use_count++;
        mutex_unlock(&root->fs_info->trans_mutex);
        return h;
@@ -213,7 +219,7 @@ static noinline int wait_for_commit(struct btrfs_root *root,
 {
        DEFINE_WAIT(wait);
        mutex_lock(&root->fs_info->trans_mutex);
-       while(!commit->commit_done) {
+       while (!commit->commit_done) {
                prepare_to_wait(&commit->commit_wait, &wait,
                                TASK_UNINTERRUPTIBLE);
                if (commit->commit_done)
@@ -228,8 +234,8 @@ static noinline int wait_for_commit(struct btrfs_root *root,
 }
 
 /*
- * rate limit against the drop_snapshot code.  This helps to slow down new operations
- * if the drop_snapshot code isn't able to keep up.
+ * rate limit against the drop_snapshot code.  This helps to slow down new
+ * operations if the drop_snapshot code isn't able to keep up.
  */
 static void throttle_on_drops(struct btrfs_root *root)
 {
@@ -274,7 +280,6 @@ void btrfs_throttle(struct btrfs_root *root)
        if (!root->fs_info->open_ioctl_trans)
                wait_current_trans(root);
        mutex_unlock(&root->fs_info->trans_mutex);
-
        throttle_on_drops(root);
 }
 
@@ -283,6 +288,27 @@ static int __btrfs_end_transaction(struct btrfs_trans_handle *trans,
 {
        struct btrfs_transaction *cur_trans;
        struct btrfs_fs_info *info = root->fs_info;
+       int count = 0;
+
+       while (count < 4) {
+               unsigned long cur = trans->delayed_ref_updates;
+               trans->delayed_ref_updates = 0;
+               if (cur &&
+                   trans->transaction->delayed_refs.num_heads_ready > 64) {
+                       trans->delayed_ref_updates = 0;
+
+                       /*
+                        * do a full flush if the transaction is trying
+                        * to close
+                        */
+                       if (trans->transaction->delayed_refs.flushing)
+                               cur = 0;
+                       btrfs_run_delayed_refs(trans, root, cur);
+               } else {
+                       break;
+               }
+               count++;
+       }
 
        mutex_lock(&info->trans_mutex);
        cur_trans = info->running_transaction;
@@ -332,12 +358,12 @@ int btrfs_write_and_wait_marked_extents(struct btrfs_root *root,
        u64 end;
        unsigned long index;
 
-       while(1) {
+       while (1) {
                ret = find_first_extent_bit(dirty_pages, start, &start, &end,
                                            EXTENT_DIRTY);
                if (ret)
                        break;
-               while(start <= end) {
+               while (start <= end) {
                        cond_resched();
 
                        index = start >> PAGE_CACHE_SHIFT;
@@ -368,14 +394,14 @@ int btrfs_write_and_wait_marked_extents(struct btrfs_root *root,
                        page_cache_release(page);
                }
        }
-       while(1) {
+       while (1) {
                ret = find_first_extent_bit(dirty_pages, 0, &start, &end,
                                            EXTENT_DIRTY);
                if (ret)
                        break;
 
                clear_extent_dirty(dirty_pages, start, end, GFP_NOFS);
-               while(start <= end) {
+               while (start <= end) {
                        index = start >> PAGE_CACHE_SHIFT;
                        start = (u64)(index + 1) << PAGE_CACHE_SHIFT;
                        page = find_get_page(btree_inode->i_mapping, index);
@@ -427,11 +453,12 @@ static int update_cowonly_root(struct btrfs_trans_handle *trans,
        u64 old_root_bytenr;
        struct btrfs_root *tree_root = root->fs_info->tree_root;
 
-       btrfs_extent_post_op(trans, root);
        btrfs_write_dirty_block_groups(trans, root);
-       btrfs_extent_post_op(trans, root);
 
-       while(1) {
+       ret = btrfs_run_delayed_refs(trans, root, (unsigned long)-1);
+       BUG_ON(ret);
+
+       while (1) {
                old_root_bytenr = btrfs_root_bytenr(&root->root_item);
                if (old_root_bytenr == root->node->start)
                        break;
@@ -441,14 +468,14 @@ static int update_cowonly_root(struct btrfs_trans_handle *trans,
                                     btrfs_header_level(root->node));
                btrfs_set_root_generation(&root->root_item, trans->transid);
 
-               btrfs_extent_post_op(trans, root);
-
                ret = btrfs_update_root(trans, tree_root,
                                        &root->root_key,
                                        &root->root_item);
                BUG_ON(ret);
                btrfs_write_dirty_block_groups(trans, root);
-               btrfs_extent_post_op(trans, root);
+
+               ret = btrfs_run_delayed_refs(trans, root, (unsigned long)-1);
+               BUG_ON(ret);
        }
        return 0;
 }
@@ -462,22 +489,28 @@ int btrfs_commit_tree_roots(struct btrfs_trans_handle *trans,
        struct btrfs_fs_info *fs_info = root->fs_info;
        struct list_head *next;
        struct extent_buffer *eb;
+       int ret;
 
-       btrfs_extent_post_op(trans, fs_info->tree_root);
+       ret = btrfs_run_delayed_refs(trans, root, (unsigned long)-1);
+       BUG_ON(ret);
 
        eb = btrfs_lock_root_node(fs_info->tree_root);
-       btrfs_cow_block(trans, fs_info->tree_root, eb, NULL, 0, &eb, 0);
+       btrfs_cow_block(trans, fs_info->tree_root, eb, NULL, 0, &eb);
        btrfs_tree_unlock(eb);
        free_extent_buffer(eb);
 
-       btrfs_extent_post_op(trans, fs_info->tree_root);
+       ret = btrfs_run_delayed_refs(trans, root, (unsigned long)-1);
+       BUG_ON(ret);
 
-       while(!list_empty(&fs_info->dirty_cowonly_roots)) {
+       while (!list_empty(&fs_info->dirty_cowonly_roots)) {
                next = fs_info->dirty_cowonly_roots.next;
                list_del_init(next);
                root = list_entry(next, struct btrfs_root, dirty_list);
 
                update_cowonly_root(trans, root);
+
+               ret = btrfs_run_delayed_refs(trans, root, (unsigned long)-1);
+               BUG_ON(ret);
        }
        return 0;
 }
@@ -521,7 +554,7 @@ static noinline int add_dirty_roots(struct btrfs_trans_handle *trans,
        int err = 0;
        u32 refs;
 
-       while(1) {
+       while (1) {
                ret = radix_tree_gang_lookup_tag(radix, (void **)gang, 0,
                                                 ARRAY_SIZE(gang),
                                                 BTRFS_ROOT_TRANS_TAG);
@@ -638,6 +671,31 @@ int btrfs_defrag_root(struct btrfs_root *root, int cacheonly)
 }
 
 /*
+ * when dropping snapshots, we generate a ton of delayed refs, and it makes
+ * sense not to join the transaction while it is trying to flush the current
+ * queue of delayed refs out.
+ *
+ * This is used by the drop snapshot code only
+ */
+static noinline int wait_transaction_pre_flush(struct btrfs_fs_info *info)
+{
+       DEFINE_WAIT(wait);
+
+       mutex_lock(&info->trans_mutex);
+       while (info->running_transaction &&
+              info->running_transaction->delayed_refs.flushing) {
+               prepare_to_wait(&info->transaction_wait, &wait,
+                               TASK_UNINTERRUPTIBLE);
+               mutex_unlock(&info->trans_mutex);
+               schedule();
+               mutex_lock(&info->trans_mutex);
+               finish_wait(&info->transaction_wait, &wait);
+       }
+       mutex_unlock(&info->trans_mutex);
+       return 0;
+}
+
+/*
  * Given a list of roots that need to be deleted, call btrfs_drop_snapshot on
  * all of them
  */
@@ -653,7 +711,7 @@ static noinline int drop_dirty_roots(struct btrfs_root *tree_root,
        int ret = 0;
        int err;
 
-       while(!list_empty(list)) {
+       while (!list_empty(list)) {
                struct btrfs_root *root;
 
                dirty = list_entry(list->prev, struct btrfs_dirty_root, list);
@@ -663,13 +721,27 @@ static noinline int drop_dirty_roots(struct btrfs_root *tree_root,
                root = dirty->latest_root;
                atomic_inc(&root->fs_info->throttles);
 
-               while(1) {
+               while (1) {
+                       /*
+                        * we don't want to jump in and create a bunch of
+                        * delayed refs if the transaction is starting to close
+                        */
+                       wait_transaction_pre_flush(tree_root->fs_info);
                        trans = btrfs_start_transaction(tree_root, 1);
+
+                       /*
+                        * we've joined a transaction, make sure it isn't
+                        * closing right now
+                        */
+                       if (trans->transaction->delayed_refs.flushing) {
+                               btrfs_end_transaction(trans, tree_root);
+                               continue;
+                       }
+
                        mutex_lock(&root->fs_info->drop_mutex);
                        ret = btrfs_drop_snapshot(trans, dirty->root);
-                       if (ret != -EAGAIN) {
+                       if (ret != -EAGAIN)
                                break;
-                       }
                        mutex_unlock(&root->fs_info->drop_mutex);
 
                        err = btrfs_update_root(trans,
@@ -692,7 +764,9 @@ static noinline int drop_dirty_roots(struct btrfs_root *tree_root,
                num_bytes -= btrfs_root_used(&dirty->root->root_item);
                bytes_used = btrfs_root_used(&root->root_item);
                if (num_bytes) {
+                       mutex_lock(&root->fs_info->trans_mutex);
                        btrfs_record_root_in_trans(root);
+                       mutex_unlock(&root->fs_info->trans_mutex);
                        btrfs_set_root_used(&root->root_item,
                                            bytes_used - num_bytes);
                }
@@ -768,7 +842,7 @@ static noinline int create_pending_snapshot(struct btrfs_trans_handle *trans,
        btrfs_set_key_type(&key, BTRFS_ROOT_ITEM_KEY);
 
        old = btrfs_lock_root_node(root);
-       btrfs_cow_block(trans, root, old, NULL, 0, &old, 0);
+       btrfs_cow_block(trans, root, old, NULL, 0, &old);
 
        btrfs_copy_root(trans, root, old, &tmp, objectid);
        btrfs_tree_unlock(old);
@@ -804,7 +878,7 @@ static noinline int finish_pending_snapshot(struct btrfs_fs_info *fs_info,
 
        parent_inode = pending->dentry->d_parent->d_inode;
        parent_root = BTRFS_I(parent_inode)->root;
-       trans = btrfs_start_transaction(parent_root, 1);
+       trans = btrfs_join_transaction(parent_root, 1);
 
        /*
         * insert the directory item
@@ -819,6 +893,10 @@ static noinline int finish_pending_snapshot(struct btrfs_fs_info *fs_info,
        if (ret)
                goto fail;
 
+       btrfs_i_size_write(parent_inode, parent_inode->i_size + namelen * 2);
+       ret = btrfs_update_inode(trans, parent_root, parent_inode);
+       BUG_ON(ret);
+
        /* add the backref first */
        ret = btrfs_add_root_ref(trans, parent_root->fs_info->tree_root,
                                 pending->root_key.objectid,
@@ -852,11 +930,9 @@ static noinline int create_pending_snapshots(struct btrfs_trans_handle *trans,
 {
        struct btrfs_pending_snapshot *pending;
        struct list_head *head = &trans->transaction->pending_snapshots;
-       struct list_head *cur;
        int ret;
 
-       list_for_each(cur, head) {
-               pending = list_entry(cur, struct btrfs_pending_snapshot, list);
+       list_for_each_entry(pending, head, list) {
                ret = create_pending_snapshot(trans, fs_info, pending);
                BUG_ON(ret);
        }
@@ -870,7 +946,7 @@ static noinline int finish_pending_snapshots(struct btrfs_trans_handle *trans,
        struct list_head *head = &trans->transaction->pending_snapshots;
        int ret;
 
-       while(!list_empty(head)) {
+       while (!list_empty(head)) {
                pending = list_entry(head->next,
                                     struct btrfs_pending_snapshot, list);
                ret = finish_pending_snapshot(fs_info, pending);
@@ -894,12 +970,32 @@ int btrfs_commit_transaction(struct btrfs_trans_handle *trans,
        struct extent_io_tree *pinned_copy;
        DEFINE_WAIT(wait);
        int ret;
+       int should_grow = 0;
+       unsigned long now = get_seconds();
+       int flush_on_commit = btrfs_test_opt(root, FLUSHONCOMMIT);
+
+       btrfs_run_ordered_operations(root, 0);
+
+       /* make a pass through all the delayed refs we have so far
+        * any runnings procs may add more while we are here
+        */
+       ret = btrfs_run_delayed_refs(trans, root, 0);
+       BUG_ON(ret);
+
+       cur_trans = trans->transaction;
+       /*
+        * set the flushing flag so procs in this transaction have to
+        * start sending their work down.
+        */
+       cur_trans->delayed_refs.flushing = 1;
+
+       ret = btrfs_run_delayed_refs(trans, root, 0);
+       BUG_ON(ret);
 
-       INIT_LIST_HEAD(&dirty_fs_roots);
        mutex_lock(&root->fs_info->trans_mutex);
-       if (trans->transaction->in_commit) {
-               cur_trans = trans->transaction;
-               trans->transaction->use_count++;
+       INIT_LIST_HEAD(&dirty_fs_roots);
+       if (cur_trans->in_commit) {
+               cur_trans->use_count++;
                mutex_unlock(&root->fs_info->trans_mutex);
                btrfs_end_transaction(trans, root);
 
@@ -922,7 +1018,6 @@ int btrfs_commit_transaction(struct btrfs_trans_handle *trans,
 
        trans->transaction->in_commit = 1;
        trans->transaction->blocked = 1;
-       cur_trans = trans->transaction;
        if (cur_trans->list.prev != &root->fs_info->trans_list) {
                prev_trans = list_entry(cur_trans->list.prev,
                                        struct btrfs_transaction, list);
@@ -937,6 +1032,9 @@ int btrfs_commit_transaction(struct btrfs_trans_handle *trans,
                }
        }
 
+       if (now < cur_trans->start_time || now - cur_trans->start_time < 1)
+               should_grow = 1;
+
        do {
                int snap_pending = 0;
                joined = cur_trans->num_joined;
@@ -949,26 +1047,42 @@ int btrfs_commit_transaction(struct btrfs_trans_handle *trans,
 
                if (cur_trans->num_writers > 1)
                        timeout = MAX_SCHEDULE_TIMEOUT;
-               else
+               else if (should_grow)
                        timeout = 1;
 
                mutex_unlock(&root->fs_info->trans_mutex);
 
-               if (snap_pending) {
+               if (flush_on_commit || snap_pending) {
+                       if (flush_on_commit)
+                               btrfs_start_delalloc_inodes(root);
                        ret = btrfs_wait_ordered_extents(root, 1);
                        BUG_ON(ret);
                }
 
-               schedule_timeout(timeout);
+               /*
+                * rename don't use btrfs_join_transaction, so, once we
+                * set the transaction to blocked above, we aren't going
+                * to get any new ordered operations.  We can safely run
+                * it here and no for sure that nothing new will be added
+                * to the list
+                */
+               btrfs_run_ordered_operations(root, 1);
+
+               smp_mb();
+               if (cur_trans->num_writers > 1 || should_grow)
+                       schedule_timeout(timeout);
 
                mutex_lock(&root->fs_info->trans_mutex);
                finish_wait(&cur_trans->writer_wait, &wait);
        } while (cur_trans->num_writers > 1 ||
-                (cur_trans->num_joined != joined));
+                (should_grow && cur_trans->num_joined != joined));
 
        ret = create_pending_snapshots(trans, root->fs_info);
        BUG_ON(ret);
 
+       ret = btrfs_run_delayed_refs(trans, root, (unsigned long)-1);
+       BUG_ON(ret);
+
        WARN_ON(cur_trans != trans->transaction);
 
        /* btrfs_commit_tree_roots is responsible for getting the
@@ -1032,6 +1146,7 @@ int btrfs_commit_transaction(struct btrfs_trans_handle *trans,
        btrfs_copy_pinned(root, pinned_copy);
 
        trans->transaction->blocked = 0;
+
        wake_up(&root->fs_info->transaction_throttle);
        wake_up(&root->fs_info->transaction_wait);
 
@@ -1058,6 +1173,7 @@ int btrfs_commit_transaction(struct btrfs_trans_handle *trans,
        mutex_lock(&root->fs_info->trans_mutex);
 
        cur_trans->commit_done = 1;
+
        root->fs_info->last_trans_committed = cur_trans->transid;
        wake_up(&cur_trans->commit_wait);
 
@@ -1072,9 +1188,8 @@ int btrfs_commit_transaction(struct btrfs_trans_handle *trans,
 
        kmem_cache_free(btrfs_trans_handle_cachep, trans);
 
-       if (root->fs_info->closing) {
+       if (root->fs_info->closing)
                drop_dirty_roots(root->fs_info->tree_root, &dirty_fs_roots);
-       }
        return ret;
 }