#include "ref-cache.h"
#include "tree-log.h"
-static int total_trans = 0;
extern struct kmem_cache *btrfs_trans_handle_cachep;
extern struct kmem_cache *btrfs_transaction_cachep;
WARN_ON(transaction->use_count == 0);
transaction->use_count--;
if (transaction->use_count == 0) {
- WARN_ON(total_trans == 0);
- total_trans--;
list_del_init(&transaction->list);
memset(transaction, 0, sizeof(*transaction));
kmem_cache_free(btrfs_transaction_cachep, transaction);
if (!cur_trans) {
cur_trans = kmem_cache_alloc(btrfs_transaction_cachep,
GFP_NOFS);
- total_trans++;
BUG_ON(!cur_trans);
root->fs_info->generation++;
root->fs_info->last_alloc = 0;
h->transaction = root->fs_info->running_transaction;
h->blocks_reserved = num_blocks;
h->blocks_used = 0;
- h->block_group = NULL;
+ h->block_group = 0;
h->alloc_exclude_nr = 0;
h->alloc_exclude_start = 0;
root->fs_info->running_transaction->use_count++;
int werr = 0;
struct page *page;
struct inode *btree_inode = root->fs_info->btree_inode;
- struct extent_io_tree *io_tree = &BTRFS_I(btree_inode)->io_tree;
u64 start = 0;
u64 end;
unsigned long index;
page_cache_release(page);
}
}
- /*
- * we unplug once and then use the wait_on_extent_bit for
- * everything else
- */
- blk_run_address_space(btree_inode->i_mapping);
while(1) {
ret = find_first_extent_bit(dirty_pages, 0, &start, &end,
EXTENT_DIRTY);
if (err)
werr = err;
}
- if (PageWriteback(page)) {
- /*
- * we don't wait on the page writeback bit
- * because that triggers a lot of unplugs.
- * The extent bits are much nicer to
- * the disks, but come with a slightly
- * higher latency because we aren't forcing
- * unplugs.
- */
- wait_on_extent_writeback(io_tree,
- page_offset(page),
- page_offset(page) +
- PAGE_CACHE_SIZE - 1);
- }
- if (PageWriteback(page)) {
- /*
- * the state bits get cleared before the
- * page bits, lets add some extra
- * paranoia here
- */
- wait_on_page_writeback(page);
- }
+ wait_on_page_writeback(page);
page_cache_release(page);
cond_resched();
}
struct extent_buffer *tmp;
struct extent_buffer *old;
int ret;
- int namelen;
u64 objectid;
new_root_item = kmalloc(sizeof(*new_root_item), GFP_NOFS);
if (ret)
goto fail;
+ key.offset = (u64)-1;
+ memcpy(&pending->root_key, &key, sizeof(key));
+fail:
+ kfree(new_root_item);
+ return ret;
+}
+
+static noinline int finish_pending_snapshot(struct btrfs_fs_info *fs_info,
+ struct btrfs_pending_snapshot *pending)
+{
+ int ret;
+ int namelen;
+ u64 index = 0;
+ struct btrfs_trans_handle *trans;
+ struct inode *parent_inode;
+ struct inode *inode;
+ struct btrfs_root *parent_root;
+
+ parent_inode = pending->dentry->d_parent->d_inode;
+ parent_root = BTRFS_I(parent_inode)->root;
+ trans = btrfs_start_transaction(parent_root, 1);
+
/*
* insert the directory item
*/
- key.offset = (u64)-1;
namelen = strlen(pending->name);
- ret = btrfs_insert_dir_item(trans, root->fs_info->tree_root,
- pending->name, namelen,
- root->fs_info->sb->s_root->d_inode->i_ino,
- &key, BTRFS_FT_DIR, 0);
+ ret = btrfs_set_inode_index(parent_inode, &index);
+ ret = btrfs_insert_dir_item(trans, parent_root,
+ pending->name, namelen,
+ parent_inode->i_ino,
+ &pending->root_key, BTRFS_FT_DIR, index);
if (ret)
goto fail;
- ret = btrfs_insert_inode_ref(trans, root->fs_info->tree_root,
- pending->name, strlen(pending->name), objectid,
- root->fs_info->sb->s_root->d_inode->i_ino, 0);
+ /* add the backref first */
+ ret = btrfs_add_root_ref(trans, parent_root->fs_info->tree_root,
+ pending->root_key.objectid,
+ BTRFS_ROOT_BACKREF_KEY,
+ parent_root->root_key.objectid,
+ parent_inode->i_ino, index, pending->name,
+ namelen);
+
+ BUG_ON(ret);
- /* Invalidate existing dcache entry for new snapshot. */
- btrfs_invalidate_dcache_root(root, pending->name, namelen);
+ /* now add the forward ref */
+ ret = btrfs_add_root_ref(trans, parent_root->fs_info->tree_root,
+ parent_root->root_key.objectid,
+ BTRFS_ROOT_REF_KEY,
+ pending->root_key.objectid,
+ parent_inode->i_ino, index, pending->name,
+ namelen);
+ inode = btrfs_lookup_dentry(parent_inode, pending->dentry);
+ d_instantiate(pending->dentry, inode);
fail:
- kfree(new_root_item);
+ btrfs_end_transaction(trans, fs_info->fs_root);
return ret;
}
{
struct btrfs_pending_snapshot *pending;
struct list_head *head = &trans->transaction->pending_snapshots;
+ struct list_head *cur;
+ int ret;
+
+ list_for_each(cur, head) {
+ pending = list_entry(cur, struct btrfs_pending_snapshot, list);
+ ret = create_pending_snapshot(trans, fs_info, pending);
+ BUG_ON(ret);
+ }
+ return 0;
+}
+
+static noinline int finish_pending_snapshots(struct btrfs_trans_handle *trans,
+ struct btrfs_fs_info *fs_info)
+{
+ struct btrfs_pending_snapshot *pending;
+ struct list_head *head = &trans->transaction->pending_snapshots;
int ret;
while(!list_empty(head)) {
pending = list_entry(head->next,
struct btrfs_pending_snapshot, list);
- ret = create_pending_snapshot(trans, fs_info, pending);
+ ret = finish_pending_snapshot(fs_info, pending);
BUG_ON(ret);
list_del(&pending->list);
kfree(pending->name);
mutex_unlock(&root->fs_info->trans_mutex);
ret = btrfs_write_and_wait_transaction(trans, root);
BUG_ON(ret);
- write_ctree_super(trans, root);
+ write_ctree_super(trans, root, 0);
/*
* the super is written, we can safely allow the tree-loggers
btrfs_drop_dead_reloc_roots(root);
mutex_unlock(&root->fs_info->tree_reloc_mutex);
+ /* do the directory inserts of any pending snapshot creations */
+ finish_pending_snapshots(trans, root->fs_info);
+
mutex_lock(&root->fs_info->trans_mutex);
cur_trans->commit_done = 1;
root->fs_info->last_trans_committed = cur_trans->transid;
wake_up(&cur_trans->commit_wait);
+
put_transaction(cur_trans);
put_transaction(cur_trans);
list_splice_init(&root->fs_info->dead_roots, &dirty_fs_roots);
mutex_unlock(&root->fs_info->trans_mutex);
+
kmem_cache_free(btrfs_trans_handle_cachep, trans);
if (root->fs_info->closing) {