msm: physical offset for MSM7X30
[safe/jmp/linux-2.6] / fs / btrfs / tree-log.c
index 835daed..af57dd2 100644 (file)
  */
 
 #include <linux/sched.h>
+#include <linux/slab.h>
 #include "ctree.h"
 #include "transaction.h"
 #include "disk-io.h"
 #include "locking.h"
 #include "print-tree.h"
 #include "compat.h"
+#include "tree-log.h"
 
 /* magic values for the inode_only field in btrfs_log_inode:
  *
 #define LOG_INODE_EXISTS 1
 
 /*
+ * directory trouble cases
+ *
+ * 1) on rename or unlink, if the inode being unlinked isn't in the fsync
+ * log, we must force a full commit before doing an fsync of the directory
+ * where the unlink was done.
+ * ---> record transid of last unlink/rename per directory
+ *
+ * mkdir foo/some_dir
+ * normal commit
+ * rename foo/some_dir foo2/some_dir
+ * mkdir foo/some_dir
+ * fsync foo/some_dir/some_file
+ *
+ * The fsync above will unlink the original some_dir without recording
+ * it in its new location (foo2).  After a crash, some_dir will be gone
+ * unless the fsync of some_file forces a full commit
+ *
+ * 2) we must log any new names for any file or dir that is in the fsync
+ * log. ---> check inode while renaming/linking.
+ *
+ * 2a) we must log any new names for any file or dir during rename
+ * when the directory they are being removed from was logged.
+ * ---> check inode and old parent dir during rename
+ *
+ *  2a is actually the more important variant.  With the extra logging
+ *  a crash might unlink the old name without recreating the new one
+ *
+ * 3) after a crash, we must go through any directories with a link count
+ * of zero and redo the rm -rf
+ *
+ * mkdir f1/foo
+ * normal commit
+ * rm -rf f1/foo
+ * fsync(f1)
+ *
+ * The directory f1 was fully removed from the FS, but fsync was never
+ * called on f1, only its parent dir.  After a crash the rm -rf must
+ * be replayed.  This must be able to recurse down the entire
+ * directory tree.  The inode link count fixup code takes care of the
+ * ugly details.
+ */
+
+/*
  * stages for the tree walking.  The first
  * stage (0) is to only pin down the blocks we find
  * the second stage (1) is to make sure that all the inodes
 #define LOG_WALK_REPLAY_INODES 1
 #define LOG_WALK_REPLAY_ALL 2
 
-static int __btrfs_log_inode(struct btrfs_trans_handle *trans,
+static int btrfs_log_inode(struct btrfs_trans_handle *trans,
                             struct btrfs_root *root, struct inode *inode,
                             int inode_only);
+static int link_to_fixup_dir(struct btrfs_trans_handle *trans,
+                            struct btrfs_root *root,
+                            struct btrfs_path *path, u64 objectid);
+static noinline int replay_dir_deletes(struct btrfs_trans_handle *trans,
+                                      struct btrfs_root *root,
+                                      struct btrfs_root *log,
+                                      struct btrfs_path *path,
+                                      u64 dirid, int del_all);
 
 /*
  * tree logging is a special write ahead log used to make sure that
@@ -74,90 +127,6 @@ static int __btrfs_log_inode(struct btrfs_trans_handle *trans,
  */
 
 /*
- * btrfs_add_log_tree adds a new per-subvolume log tree into the
- * tree of log tree roots.  This must be called with a tree log transaction
- * running (see start_log_trans).
- */
-int btrfs_add_log_tree(struct btrfs_trans_handle *trans,
-                     struct btrfs_root *root)
-{
-       struct btrfs_key key;
-       struct btrfs_root_item root_item;
-       struct btrfs_inode_item *inode_item;
-       struct extent_buffer *leaf;
-       struct btrfs_root *new_root = root;
-       int ret;
-       u64 objectid = root->root_key.objectid;
-
-       leaf = btrfs_alloc_free_block(trans, root, root->leafsize, 0,
-                                     BTRFS_TREE_LOG_OBJECTID,
-                                     trans->transid, 0, 0, 0);
-       if (IS_ERR(leaf)) {
-               ret = PTR_ERR(leaf);
-               return ret;
-       }
-
-       btrfs_set_header_nritems(leaf, 0);
-       btrfs_set_header_level(leaf, 0);
-       btrfs_set_header_bytenr(leaf, leaf->start);
-       btrfs_set_header_generation(leaf, trans->transid);
-       btrfs_set_header_owner(leaf, BTRFS_TREE_LOG_OBJECTID);
-
-       write_extent_buffer(leaf, root->fs_info->fsid,
-                           (unsigned long)btrfs_header_fsid(leaf),
-                           BTRFS_FSID_SIZE);
-       btrfs_mark_buffer_dirty(leaf);
-
-       inode_item = &root_item.inode;
-       memset(inode_item, 0, sizeof(*inode_item));
-       inode_item->generation = cpu_to_le64(1);
-       inode_item->size = cpu_to_le64(3);
-       inode_item->nlink = cpu_to_le32(1);
-       inode_item->nbytes = cpu_to_le64(root->leafsize);
-       inode_item->mode = cpu_to_le32(S_IFDIR | 0755);
-
-       btrfs_set_root_bytenr(&root_item, leaf->start);
-       btrfs_set_root_level(&root_item, 0);
-       btrfs_set_root_refs(&root_item, 0);
-       btrfs_set_root_used(&root_item, 0);
-
-       memset(&root_item.drop_progress, 0, sizeof(root_item.drop_progress));
-       root_item.drop_level = 0;
-
-       btrfs_tree_unlock(leaf);
-       free_extent_buffer(leaf);
-       leaf = NULL;
-
-       btrfs_set_root_dirid(&root_item, 0);
-
-       key.objectid = BTRFS_TREE_LOG_OBJECTID;
-       key.offset = objectid;
-       btrfs_set_key_type(&key, BTRFS_ROOT_ITEM_KEY);
-       ret = btrfs_insert_root(trans, root->fs_info->log_root_tree, &key,
-                               &root_item);
-       if (ret)
-               goto fail;
-
-       new_root = btrfs_read_fs_root_no_radix(root->fs_info->log_root_tree,
-                                              &key);
-       BUG_ON(!new_root);
-
-       WARN_ON(root->log_root);
-       root->log_root = new_root;
-
-       /*
-        * log trees do not get reference counted because they go away
-        * before a real commit is actually done.  They do store pointers
-        * to file data extents, and those reference counts still get
-        * updated (along with back refs to the log tree).
-        */
-       new_root->ref_cows = 0;
-       new_root->last_trans = trans->transid;
-fail:
-       return ret;
-}
-
-/*
  * start a sub transaction and setup the log tree
  * this increments the log tree writer count to make the people
  * syncing the tree wait for us to finish
@@ -166,6 +135,23 @@ static int start_log_trans(struct btrfs_trans_handle *trans,
                           struct btrfs_root *root)
 {
        int ret;
+
+       mutex_lock(&root->log_mutex);
+       if (root->log_root) {
+               if (!root->log_start_pid) {
+                       root->log_start_pid = current->pid;
+                       root->log_multiple_pids = false;
+               } else if (root->log_start_pid != current->pid) {
+                       root->log_multiple_pids = true;
+               }
+
+               root->log_batch++;
+               atomic_inc(&root->log_writers);
+               mutex_unlock(&root->log_mutex);
+               return 0;
+       }
+       root->log_multiple_pids = false;
+       root->log_start_pid = current->pid;
        mutex_lock(&root->fs_info->tree_log_mutex);
        if (!root->fs_info->log_root_tree) {
                ret = btrfs_init_log_root_tree(trans, root->fs_info);
@@ -175,9 +161,10 @@ static int start_log_trans(struct btrfs_trans_handle *trans,
                ret = btrfs_add_log_tree(trans, root);
                BUG_ON(ret);
        }
-       atomic_inc(&root->fs_info->tree_log_writers);
-       root->fs_info->tree_log_batch++;
        mutex_unlock(&root->fs_info->tree_log_mutex);
+       root->log_batch++;
+       atomic_inc(&root->log_writers);
+       mutex_unlock(&root->log_mutex);
        return 0;
 }
 
@@ -194,13 +181,27 @@ static int join_running_log_trans(struct btrfs_root *root)
        if (!root->log_root)
                return -ENOENT;
 
-       mutex_lock(&root->fs_info->tree_log_mutex);
+       mutex_lock(&root->log_mutex);
        if (root->log_root) {
                ret = 0;
-               atomic_inc(&root->fs_info->tree_log_writers);
-               root->fs_info->tree_log_batch++;
+               atomic_inc(&root->log_writers);
        }
-       mutex_unlock(&root->fs_info->tree_log_mutex);
+       mutex_unlock(&root->log_mutex);
+       return ret;
+}
+
+/*
+ * This either makes the current running log transaction wait
+ * until you call btrfs_end_log_trans() or it makes any future
+ * log transactions wait until you call btrfs_end_log_trans()
+ */
+int btrfs_pin_log_trans(struct btrfs_root *root)
+{
+       int ret = -ENOENT;
+
+       mutex_lock(&root->log_mutex);
+       atomic_inc(&root->log_writers);
+       mutex_unlock(&root->log_mutex);
        return ret;
 }
 
@@ -208,12 +209,13 @@ static int join_running_log_trans(struct btrfs_root *root)
  * indicate we're done making changes to the log tree
  * and wake up anyone waiting to do a sync
  */
-static int end_log_trans(struct btrfs_root *root)
+int btrfs_end_log_trans(struct btrfs_root *root)
 {
-       atomic_dec(&root->fs_info->tree_log_writers);
-       smp_mb();
-       if (waitqueue_active(&root->fs_info->tree_log_wait))
-               wake_up(&root->fs_info->tree_log_wait);
+       if (atomic_dec_and_test(&root->log_writers)) {
+               smp_mb();
+               if (waitqueue_active(&root->log_writer_wait))
+                       wake_up(&root->log_writer_wait);
+       }
        return 0;
 }
 
@@ -270,12 +272,9 @@ static int process_one_buffer(struct btrfs_root *log,
                              struct extent_buffer *eb,
                              struct walk_control *wc, u64 gen)
 {
-       if (wc->pin) {
-               mutex_lock(&log->fs_info->pinned_mutex);
-               btrfs_update_pinned_extents(log->fs_info->extent_root,
-                                           eb->start, eb->len, 1);
-               mutex_unlock(&log->fs_info->pinned_mutex);
-       }
+       if (wc->pin)
+               btrfs_pin_extent(log->fs_info->extent_root,
+                                eb->start, eb->len, 0);
 
        if (btrfs_buffer_uptodate(eb, gen)) {
                if (wc->write)
@@ -372,13 +371,8 @@ insert:
                if (found_size > item_size) {
                        btrfs_truncate_item(trans, root, path, item_size, 1);
                } else if (found_size < item_size) {
-                       ret = btrfs_del_item(trans, root,
-                                            path);
-                       BUG_ON(ret);
-
-                       btrfs_release_path(root, path);
-                       ret = btrfs_insert_empty_item(trans,
-                                 root, path, key, item_size);
+                       ret = btrfs_extend_item(trans, root, path,
+                                               item_size - found_size);
                        BUG_ON(ret);
                }
        } else if (ret) {
@@ -433,48 +427,6 @@ insert:
                                                   trans->transid);
                }
        }
-
-       if (overwrite_root &&
-           key->type == BTRFS_EXTENT_DATA_KEY) {
-               int extent_type;
-               struct btrfs_file_extent_item *fi;
-
-               fi = (struct btrfs_file_extent_item *)dst_ptr;
-               extent_type = btrfs_file_extent_type(path->nodes[0], fi);
-               if (extent_type == BTRFS_FILE_EXTENT_REG) {
-                       struct btrfs_key ins;
-                       ins.objectid = btrfs_file_extent_disk_bytenr(
-                                                       path->nodes[0], fi);
-                       ins.offset = btrfs_file_extent_disk_num_bytes(
-                                                       path->nodes[0], fi);
-                       ins.type = BTRFS_EXTENT_ITEM_KEY;
-
-                       /*
-                        * is this extent already allocated in the extent
-                        * allocation tree?  If so, just add a reference
-                        */
-                       ret = btrfs_lookup_extent(root, ins.objectid,
-                                                 ins.offset);
-                       if (ret == 0) {
-                               ret = btrfs_inc_extent_ref(trans, root,
-                                               ins.objectid, ins.offset,
-                                               path->nodes[0]->start,
-                                               root->root_key.objectid,
-                                               trans->transid, key->objectid);
-                       } else {
-                               /*
-                                * insert the extent pointer in the extent
-                                * allocation tree
-                                */
-                               ret = btrfs_alloc_logged_extent(trans, root,
-                                               path->nodes[0]->start,
-                                               root->root_key.objectid,
-                                               trans->transid, key->objectid,
-                                               &ins);
-                               BUG_ON(ret);
-                       }
-               }
-       }
 no_copy:
        btrfs_mark_buffer_dirty(path->nodes[0]);
        btrfs_release_path(root, path);
@@ -488,18 +440,16 @@ no_copy:
 static noinline struct inode *read_one_inode(struct btrfs_root *root,
                                             u64 objectid)
 {
+       struct btrfs_key key;
        struct inode *inode;
-       inode = btrfs_iget_locked(root->fs_info->sb, objectid, root);
-       if (inode->i_state & I_NEW) {
-               BTRFS_I(inode)->root = root;
-               BTRFS_I(inode)->location.objectid = objectid;
-               BTRFS_I(inode)->location.type = BTRFS_INODE_ITEM_KEY;
-               BTRFS_I(inode)->location.offset = 0;
-               btrfs_read_locked_inode(inode);
-               unlock_new_inode(inode);
 
-       }
-       if (is_bad_inode(inode)) {
+       key.objectid = objectid;
+       key.type = BTRFS_INODE_ITEM_KEY;
+       key.offset = 0;
+       inode = btrfs_iget(root->fs_info->sb, &key, root, NULL);
+       if (IS_ERR(inode)) {
+               inode = NULL;
+       } else if (is_bad_inode(inode)) {
                iput(inode);
                inode = NULL;
        }
@@ -529,6 +479,7 @@ static noinline int replay_one_extent(struct btrfs_trans_handle *trans,
        u64 extent_end;
        u64 alloc_hint;
        u64 start = key->offset;
+       u64 saved_nbytes;
        struct btrfs_file_extent_item *item;
        struct inode *inode = NULL;
        unsigned long size;
@@ -537,7 +488,8 @@ static noinline int replay_one_extent(struct btrfs_trans_handle *trans,
        item = btrfs_item_ptr(eb, slot, struct btrfs_file_extent_item);
        found_type = btrfs_file_extent_type(eb, item);
 
-       if (found_type == BTRFS_FILE_EXTENT_REG)
+       if (found_type == BTRFS_FILE_EXTENT_REG ||
+           found_type == BTRFS_FILE_EXTENT_PREALLOC)
                extent_end = start + btrfs_file_extent_num_bytes(eb, item);
        else if (found_type == BTRFS_FILE_EXTENT_INLINE) {
                size = btrfs_file_extent_inline_len(eb, item);
@@ -561,7 +513,9 @@ static noinline int replay_one_extent(struct btrfs_trans_handle *trans,
        ret = btrfs_lookup_file_extent(trans, root, path, inode->i_ino,
                                       start, 0);
 
-       if (ret == 0 && found_type == BTRFS_FILE_EXTENT_REG) {
+       if (ret == 0 &&
+           (found_type == BTRFS_FILE_EXTENT_REG ||
+            found_type == BTRFS_FILE_EXTENT_PREALLOC)) {
                struct btrfs_file_extent_item cmp1;
                struct btrfs_file_extent_item cmp2;
                struct btrfs_file_extent_item *existing;
@@ -587,17 +541,94 @@ static noinline int replay_one_extent(struct btrfs_trans_handle *trans,
        }
        btrfs_release_path(root, path);
 
+       saved_nbytes = inode_get_bytes(inode);
        /* drop any overlapping extents */
-       ret = btrfs_drop_extents(trans, root, inode,
-                        start, extent_end, start, &alloc_hint);
+       ret = btrfs_drop_extents(trans, inode, start, extent_end,
+                                &alloc_hint, 1);
        BUG_ON(ret);
 
-       /* insert the extent */
-       ret = overwrite_item(trans, root, path, eb, slot, key);
-       BUG_ON(ret);
+       if (found_type == BTRFS_FILE_EXTENT_REG ||
+           found_type == BTRFS_FILE_EXTENT_PREALLOC) {
+               u64 offset;
+               unsigned long dest_offset;
+               struct btrfs_key ins;
+
+               ret = btrfs_insert_empty_item(trans, root, path, key,
+                                             sizeof(*item));
+               BUG_ON(ret);
+               dest_offset = btrfs_item_ptr_offset(path->nodes[0],
+                                                   path->slots[0]);
+               copy_extent_buffer(path->nodes[0], eb, dest_offset,
+                               (unsigned long)item,  sizeof(*item));
+
+               ins.objectid = btrfs_file_extent_disk_bytenr(eb, item);
+               ins.offset = btrfs_file_extent_disk_num_bytes(eb, item);
+               ins.type = BTRFS_EXTENT_ITEM_KEY;
+               offset = key->offset - btrfs_file_extent_offset(eb, item);
+
+               if (ins.objectid > 0) {
+                       u64 csum_start;
+                       u64 csum_end;
+                       LIST_HEAD(ordered_sums);
+                       /*
+                        * is this extent already allocated in the extent
+                        * allocation tree?  If so, just add a reference
+                        */
+                       ret = btrfs_lookup_extent(root, ins.objectid,
+                                               ins.offset);
+                       if (ret == 0) {
+                               ret = btrfs_inc_extent_ref(trans, root,
+                                               ins.objectid, ins.offset,
+                                               0, root->root_key.objectid,
+                                               key->objectid, offset);
+                       } else {
+                               /*
+                                * insert the extent pointer in the extent
+                                * allocation tree
+                                */
+                               ret = btrfs_alloc_logged_file_extent(trans,
+                                               root, root->root_key.objectid,
+                                               key->objectid, offset, &ins);
+                               BUG_ON(ret);
+                       }
+                       btrfs_release_path(root, path);
+
+                       if (btrfs_file_extent_compression(eb, item)) {
+                               csum_start = ins.objectid;
+                               csum_end = csum_start + ins.offset;
+                       } else {
+                               csum_start = ins.objectid +
+                                       btrfs_file_extent_offset(eb, item);
+                               csum_end = csum_start +
+                                       btrfs_file_extent_num_bytes(eb, item);
+                       }
 
-       /* btrfs_drop_extents changes i_bytes & i_blocks, update it here */
-       inode_add_bytes(inode, extent_end - start);
+                       ret = btrfs_lookup_csums_range(root->log_root,
+                                               csum_start, csum_end - 1,
+                                               &ordered_sums);
+                       BUG_ON(ret);
+                       while (!list_empty(&ordered_sums)) {
+                               struct btrfs_ordered_sum *sums;
+                               sums = list_entry(ordered_sums.next,
+                                               struct btrfs_ordered_sum,
+                                               list);
+                               ret = btrfs_csum_file_blocks(trans,
+                                               root->fs_info->csum_root,
+                                               sums);
+                               BUG_ON(ret);
+                               list_del(&sums->list);
+                               kfree(sums);
+                       }
+               } else {
+                       btrfs_release_path(root, path);
+               }
+       } else if (found_type == BTRFS_FILE_EXTENT_INLINE) {
+               /* inline extents are easy, we just overwrite them */
+               ret = overwrite_item(trans, root, path, eb, slot, key);
+               BUG_ON(ret);
+       }
+
+       inode_set_bytes(inode, saved_nbytes);
        btrfs_update_inode(trans, root, inode);
 out:
        if (inode)
@@ -637,8 +668,11 @@ static noinline int drop_one_dir_item(struct btrfs_trans_handle *trans,
        inode = read_one_inode(root, location.objectid);
        BUG_ON(!inode);
 
-       btrfs_inc_nlink(inode);
+       ret = link_to_fixup_dir(trans, root, path, location.objectid);
+       BUG_ON(ret);
+
        ret = btrfs_unlink_inode(trans, root, dir, inode, name, name_len);
+       BUG_ON(ret);
        kfree(name);
 
        iput(inode);
@@ -773,7 +807,7 @@ static noinline int add_inode_ref(struct btrfs_trans_handle *trans,
                return -ENOENT;
 
        inode = read_one_inode(root, key->objectid);
-       BUG_ON(!dir);
+       BUG_ON(!inode);
 
        ref_ptr = btrfs_item_ptr_offset(eb, slot);
        ref_end = ref_ptr + btrfs_item_size_nr(eb, slot);
@@ -823,7 +857,7 @@ conflict_again:
                 */
                ptr = btrfs_item_ptr_offset(leaf, path->slots[0]);
                ptr_end = ptr + btrfs_item_size_nr(leaf, path->slots[0]);
-               while(ptr < ptr_end) {
+               while (ptr < ptr_end) {
                        victim_ref = (struct btrfs_inode_ref *)ptr;
                        victim_name_len = btrfs_inode_ref_name_len(leaf,
                                                                   victim_ref);
@@ -838,6 +872,7 @@ conflict_again:
                                            victim_name_len)) {
                                btrfs_inc_nlink(inode);
                                btrfs_release_path(root, path);
+
                                ret = btrfs_unlink_inode(trans, root, dir,
                                                         inode, victim_name,
                                                         victim_name_len);
@@ -896,78 +931,17 @@ out_nowrite:
        return 0;
 }
 
-/*
- * replay one csum item from the log tree into the subvolume 'root'
- * eb, slot and key all refer to the log tree
- * path is for temp use by this function and should be released on return
- *
- * This copies the checksums out of the log tree and inserts them into
- * the subvolume.  Any existing checksums for this range in the file
- * are overwritten, and new items are added where required.
- *
- * We keep this simple by reusing the btrfs_ordered_sum code from
- * the data=ordered mode.  This basically means making a copy
- * of all the checksums in ram, which we have to do anyway for kmap
- * rules.
- *
- * The copy is then sent down to btrfs_csum_file_blocks, which
- * does all the hard work of finding existing items in the file
- * or adding new ones.
- */
-static noinline int replay_one_csum(struct btrfs_trans_handle *trans,
-                                     struct btrfs_root *root,
-                                     struct btrfs_path *path,
-                                     struct extent_buffer *eb, int slot,
-                                     struct btrfs_key *key)
+static int insert_orphan_item(struct btrfs_trans_handle *trans,
+                             struct btrfs_root *root, u64 offset)
 {
        int ret;
-       u32 item_size = btrfs_item_size_nr(eb, slot);
-       u64 cur_offset;
-       unsigned long file_bytes;
-       struct btrfs_ordered_sum *sums;
-       struct btrfs_sector_sum *sector_sum;
-       struct inode *inode;
-       unsigned long ptr;
-
-       file_bytes = (item_size / BTRFS_CRC32_SIZE) * root->sectorsize;
-       inode = read_one_inode(root, key->objectid);
-       if (!inode) {
-               return -EIO;
-       }
-
-       sums = kzalloc(btrfs_ordered_sum_size(root, file_bytes), GFP_NOFS);
-       if (!sums) {
-               iput(inode);
-               return -ENOMEM;
-       }
-
-       INIT_LIST_HEAD(&sums->list);
-       sums->len = file_bytes;
-       sums->file_offset = key->offset;
+       ret = btrfs_find_orphan_item(root, offset);
+       if (ret > 0)
+               ret = btrfs_insert_orphan_item(trans, root, offset);
+       return ret;
+}
 
-       /*
-        * copy all the sums into the ordered sum struct
-        */
-       sector_sum = sums->sums;
-       cur_offset = key->offset;
-       ptr = btrfs_item_ptr_offset(eb, slot);
-       while(item_size > 0) {
-               sector_sum->offset = cur_offset;
-               read_extent_buffer(eb, &sector_sum->sum, ptr, BTRFS_CRC32_SIZE);
-               sector_sum++;
-               item_size -= BTRFS_CRC32_SIZE;
-               ptr += BTRFS_CRC32_SIZE;
-               cur_offset += root->sectorsize;
-       }
 
-       /* let btrfs_csum_file_blocks add them into the file */
-       ret = btrfs_csum_file_blocks(trans, root, inode, sums);
-       BUG_ON(ret);
-       kfree(sums);
-       iput(inode);
-
-       return 0;
-}
 /*
  * There are a few corners where the link count of the file can't
  * be properly maintained during replay.  So, instead of adding
@@ -996,7 +970,7 @@ static noinline int fixup_inode_link_count(struct btrfs_trans_handle *trans,
 
        path = btrfs_alloc_path();
 
-       while(1) {
+       while (1) {
                ret = btrfs_search_slot(NULL, root, &key, path, 0, 0);
                if (ret < 0)
                        break;
@@ -1013,7 +987,7 @@ static noinline int fixup_inode_link_count(struct btrfs_trans_handle *trans,
                ptr = btrfs_item_ptr_offset(path->nodes[0], path->slots[0]);
                ptr_end = ptr + btrfs_item_size_nr(path->nodes[0],
                                                   path->slots[0]);
-               while(ptr < ptr_end) {
+               while (ptr < ptr_end) {
                        struct btrfs_inode_ref *ref;
 
                        ref = (struct btrfs_inode_ref *)ptr;
@@ -1028,13 +1002,24 @@ static noinline int fixup_inode_link_count(struct btrfs_trans_handle *trans,
                key.offset--;
                btrfs_release_path(root, path);
        }
-       btrfs_free_path(path);
+       btrfs_release_path(root, path);
        if (nlink != inode->i_nlink) {
                inode->i_nlink = nlink;
                btrfs_update_inode(trans, root, inode);
        }
        BTRFS_I(inode)->index_cnt = (u64)-1;
 
+       if (inode->i_nlink == 0) {
+               if (S_ISDIR(inode->i_mode)) {
+                       ret = replay_dir_deletes(trans, root, NULL, path,
+                                                inode->i_ino, 1);
+                       BUG_ON(ret);
+               }
+               ret = insert_orphan_item(trans, root, inode->i_ino);
+               BUG_ON(ret);
+       }
+       btrfs_free_path(path);
+
        return 0;
 }
 
@@ -1049,7 +1034,7 @@ static noinline int fixup_inode_link_counts(struct btrfs_trans_handle *trans,
        key.objectid = BTRFS_TREE_LOG_FIXUP_OBJECTID;
        key.type = BTRFS_ORPHAN_ITEM_KEY;
        key.offset = (u64)-1;
-       while(1) {
+       while (1) {
                ret = btrfs_search_slot(trans, root, &key, path, -1, 1);
                if (ret < 0)
                        break;
@@ -1077,9 +1062,12 @@ static noinline int fixup_inode_link_counts(struct btrfs_trans_handle *trans,
 
                iput(inode);
 
-               if (key.offset == 0)
-                       break;
-               key.offset--;
+               /*
+                * fixup on a directory may create new entries,
+                * make sure we always look for the highset possible
+                * offset
+                */
+               key.offset = (u64)-1;
        }
        btrfs_release_path(root, path);
        return 0;
@@ -1207,8 +1195,7 @@ static noinline int replay_one_name(struct btrfs_trans_handle *trans,
        if (key->type == BTRFS_DIR_ITEM_KEY) {
                dst_di = btrfs_lookup_dir_item(trans, root, path, key->objectid,
                                       name, name_len, 1);
-       }
-       else if (key->type == BTRFS_DIR_INDEX_KEY) {
+       } else if (key->type == BTRFS_DIR_INDEX_KEY) {
                dst_di = btrfs_lookup_dir_index_item(trans, root, path,
                                                     key->objectid,
                                                     key->offset, name,
@@ -1257,8 +1244,7 @@ insert:
        ret = insert_one_name(trans, root, path, key->objectid, key->offset,
                              name, name_len, log_type, &log_key);
 
-       if (ret && ret != -ENOENT)
-               BUG();
+       BUG_ON(ret && ret != -ENOENT);
        goto out;
 }
 
@@ -1283,7 +1269,7 @@ static noinline int replay_one_dir_item(struct btrfs_trans_handle *trans,
 
        ptr = btrfs_item_ptr_offset(eb, slot);
        ptr_end = ptr + item_size;
-       while(ptr < ptr_end) {
+       while (ptr < ptr_end) {
                di = (struct btrfs_dir_item *)ptr;
                name_len = btrfs_dir_name_len(eb, di);
                ret = replay_one_name(trans, root, path, eb, di, key);
@@ -1409,7 +1395,7 @@ again:
        item_size = btrfs_item_size_nr(eb, slot);
        ptr = btrfs_item_ptr_offset(eb, slot);
        ptr_end = ptr + item_size;
-       while(ptr < ptr_end) {
+       while (ptr < ptr_end) {
                di = (struct btrfs_dir_item *)ptr;
                name_len = btrfs_dir_name_len(eb, di);
                name = kmalloc(name_len, GFP_NOFS);
@@ -1420,11 +1406,11 @@ again:
                read_extent_buffer(eb, name, (unsigned long)(di + 1),
                                  name_len);
                log_di = NULL;
-               if (dir_key->type == BTRFS_DIR_ITEM_KEY) {
+               if (log && dir_key->type == BTRFS_DIR_ITEM_KEY) {
                        log_di = btrfs_lookup_dir_item(trans, log, log_path,
                                                       dir_key->objectid,
                                                       name, name_len, 0);
-               } else if (dir_key->type == BTRFS_DIR_INDEX_KEY) {
+               } else if (log && dir_key->type == BTRFS_DIR_INDEX_KEY) {
                        log_di = btrfs_lookup_dir_index_item(trans, log,
                                                     log_path,
                                                     dir_key->objectid,
@@ -1485,7 +1471,7 @@ static noinline int replay_dir_deletes(struct btrfs_trans_handle *trans,
                                       struct btrfs_root *root,
                                       struct btrfs_root *log,
                                       struct btrfs_path *path,
-                                      u64 dirid)
+                                      u64 dirid, int del_all)
 {
        u64 range_start;
        u64 range_end;
@@ -1514,14 +1500,18 @@ static noinline int replay_dir_deletes(struct btrfs_trans_handle *trans,
 again:
        range_start = 0;
        range_end = 0;
-       while(1) {
-               ret = find_dir_range(log, path, dirid, key_type,
-                                    &range_start, &range_end);
-               if (ret != 0)
-                       break;
+       while (1) {
+               if (del_all)
+                       range_end = (u64)-1;
+               else {
+                       ret = find_dir_range(log, path, dirid, key_type,
+                                            &range_start, &range_end);
+                       if (ret != 0)
+                               break;
+               }
 
                dir_key.offset = range_start;
-               while(1) {
+               while (1) {
                        int nritems;
                        ret = btrfs_search_slot(NULL, root, &dir_key, path,
                                                0, 0);
@@ -1544,7 +1534,8 @@ again:
                                break;
 
                        ret = check_item_in_log(trans, root, log, path,
-                                               log_path, dir, &found_key);
+                                               log_path, dir,
+                                               &found_key);
                        BUG_ON(ret);
                        if (found_key.offset == (u64)-1)
                                break;
@@ -1612,7 +1603,6 @@ static int replay_one_buffer(struct btrfs_root *log, struct extent_buffer *eb,
                /* inode keys are done during the first stage */
                if (key.type == BTRFS_INODE_ITEM_KEY &&
                    wc->stage == LOG_WALK_REPLAY_INODES) {
-                       struct inode *inode;
                        struct btrfs_inode_item *inode_item;
                        u32 mode;
 
@@ -1621,27 +1611,23 @@ static int replay_one_buffer(struct btrfs_root *log, struct extent_buffer *eb,
                        mode = btrfs_inode_mode(eb, inode_item);
                        if (S_ISDIR(mode)) {
                                ret = replay_dir_deletes(wc->trans,
-                                        root, log, path, key.objectid);
+                                        root, log, path, key.objectid, 0);
                                BUG_ON(ret);
                        }
                        ret = overwrite_item(wc->trans, root, path,
                                             eb, i, &key);
                        BUG_ON(ret);
 
-                       /* for regular files, truncate away
-                        * extents past the new EOF
+                       /* for regular files, make sure corresponding
+                        * orhpan item exist. extents past the new EOF
+                        * will be truncated later by orphan cleanup.
                         */
                        if (S_ISREG(mode)) {
-                               inode = read_one_inode(root,
-                                                      key.objectid);
-                               BUG_ON(!inode);
-
-                               ret = btrfs_truncate_inode_items(wc->trans,
-                                       root, inode, inode->i_size,
-                                       BTRFS_EXTENT_DATA_KEY);
+                               ret = insert_orphan_item(wc->trans, root,
+                                                        key.objectid);
                                BUG_ON(ret);
-                               iput(inode);
                        }
+
                        ret = link_to_fixup_dir(wc->trans, root,
                                                path, key.objectid);
                        BUG_ON(ret);
@@ -1662,10 +1648,6 @@ static int replay_one_buffer(struct btrfs_root *log, struct extent_buffer *eb,
                        ret = replay_one_extent(wc->trans, root, path,
                                                eb, i, &key);
                        BUG_ON(ret);
-               } else if (key.type == BTRFS_CSUM_ITEM_KEY) {
-                       ret = replay_one_csum(wc->trans, root, path,
-                                             eb, i, &key);
-                       BUG_ON(ret);
                } else if (key.type == BTRFS_DIR_ITEM_KEY ||
                           key.type == BTRFS_DIR_INDEX_KEY) {
                        ret = replay_one_dir_item(wc->trans, root, path,
@@ -1677,7 +1659,7 @@ static int replay_one_buffer(struct btrfs_root *log, struct extent_buffer *eb,
        return 0;
 }
 
-static int noinline walk_down_log_tree(struct btrfs_trans_handle *trans,
+static noinline int walk_down_log_tree(struct btrfs_trans_handle *trans,
                                   struct btrfs_root *root,
                                   struct btrfs_path *path, int *level,
                                   struct walk_control *wc)
@@ -1695,7 +1677,7 @@ static int noinline walk_down_log_tree(struct btrfs_trans_handle *trans,
        WARN_ON(*level < 0);
        WARN_ON(*level >= BTRFS_MAX_LEVEL);
 
-       while(*level > 0) {
+       while (*level > 0) {
                WARN_ON(*level < 0);
                WARN_ON(*level >= BTRFS_MAX_LEVEL);
                cur = path->nodes[*level];
@@ -1726,12 +1708,10 @@ static int noinline walk_down_log_tree(struct btrfs_trans_handle *trans,
 
                                btrfs_tree_lock(next);
                                clean_tree_block(trans, root, next);
+                               btrfs_set_lock_blocking(next);
                                btrfs_wait_tree_block_writeback(next);
                                btrfs_tree_unlock(next);
 
-                               ret = btrfs_drop_leaf_ref(trans, root, next);
-                               BUG_ON(ret);
-
                                WARN_ON(root_owner !=
                                        BTRFS_TREE_LOG_OBJECTID);
                                ret = btrfs_free_reserved_extent(root,
@@ -1754,11 +1734,11 @@ static int noinline walk_down_log_tree(struct btrfs_trans_handle *trans,
        WARN_ON(*level < 0);
        WARN_ON(*level >= BTRFS_MAX_LEVEL);
 
-       if (path->nodes[*level] == root->node) {
+       if (path->nodes[*level] == root->node)
                parent = path->nodes[*level];
-       } else {
+       else
                parent = path->nodes[*level + 1];
-       }
+
        bytenr = path->nodes[*level]->start;
 
        blocksize = btrfs_level_size(root, *level);
@@ -1772,13 +1752,10 @@ static int noinline walk_down_log_tree(struct btrfs_trans_handle *trans,
                next = path->nodes[*level];
                btrfs_tree_lock(next);
                clean_tree_block(trans, root, next);
+               btrfs_set_lock_blocking(next);
                btrfs_wait_tree_block_writeback(next);
                btrfs_tree_unlock(next);
 
-               if (*level == 0) {
-                       ret = btrfs_drop_leaf_ref(trans, root, next);
-                       BUG_ON(ret);
-               }
                WARN_ON(root_owner != BTRFS_TREE_LOG_OBJECTID);
                ret = btrfs_free_reserved_extent(root, bytenr, blocksize);
                BUG_ON(ret);
@@ -1791,7 +1768,7 @@ static int noinline walk_down_log_tree(struct btrfs_trans_handle *trans,
        return 0;
 }
 
-static int noinline walk_up_log_tree(struct btrfs_trans_handle *trans,
+static noinline int walk_up_log_tree(struct btrfs_trans_handle *trans,
                                 struct btrfs_root *root,
                                 struct btrfs_path *path, int *level,
                                 struct walk_control *wc)
@@ -1802,7 +1779,7 @@ static int noinline walk_up_log_tree(struct btrfs_trans_handle *trans,
        int slot;
        int ret;
 
-       for(i = *level; i < BTRFS_MAX_LEVEL - 1 && path->nodes[i]; i++) {
+       for (i = *level; i < BTRFS_MAX_LEVEL - 1 && path->nodes[i]; i++) {
                slot = path->slots[i];
                if (slot < btrfs_header_nritems(path->nodes[i]) - 1) {
                        struct extent_buffer *node;
@@ -1829,15 +1806,10 @@ static int noinline walk_up_log_tree(struct btrfs_trans_handle *trans,
 
                                btrfs_tree_lock(next);
                                clean_tree_block(trans, root, next);
+                               btrfs_set_lock_blocking(next);
                                btrfs_wait_tree_block_writeback(next);
                                btrfs_tree_unlock(next);
 
-                               if (*level == 0) {
-                                       ret = btrfs_drop_leaf_ref(trans, root,
-                                                                 next);
-                                       BUG_ON(ret);
-                               }
-
                                WARN_ON(root_owner != BTRFS_TREE_LOG_OBJECTID);
                                ret = btrfs_free_reserved_extent(root,
                                                path->nodes[*level]->start,
@@ -1876,7 +1848,7 @@ static int walk_log_tree(struct btrfs_trans_handle *trans,
        extent_buffer_get(log->node);
        path->slots[level] = 0;
 
-       while(1) {
+       while (1) {
                wret = walk_down_log_tree(trans, log, path, &level, wc);
                if (wret > 0)
                        break;
@@ -1901,14 +1873,10 @@ static int walk_log_tree(struct btrfs_trans_handle *trans,
 
                        btrfs_tree_lock(next);
                        clean_tree_block(trans, log, next);
+                       btrfs_set_lock_blocking(next);
                        btrfs_wait_tree_block_writeback(next);
                        btrfs_tree_unlock(next);
 
-                       if (orig_level == 0) {
-                               ret = btrfs_drop_leaf_ref(trans, log,
-                                                         next);
-                               BUG_ON(ret);
-                       }
                        WARN_ON(log->root_key.objectid !=
                                BTRFS_TREE_LOG_OBJECTID);
                        ret = btrfs_free_reserved_extent(log, next->start,
@@ -1924,93 +1892,245 @@ static int walk_log_tree(struct btrfs_trans_handle *trans,
                }
        }
        btrfs_free_path(path);
-       if (wc->free)
-               free_extent_buffer(log->node);
        return ret;
 }
 
-int wait_log_commit(struct btrfs_root *log)
+/*
+ * helper function to update the item for a given subvolumes log root
+ * in the tree of log roots
+ */
+static int update_log_root(struct btrfs_trans_handle *trans,
+                          struct btrfs_root *log)
+{
+       int ret;
+
+       if (log->log_transid == 1) {
+               /* insert root item on the first sync */
+               ret = btrfs_insert_root(trans, log->fs_info->log_root_tree,
+                               &log->root_key, &log->root_item);
+       } else {
+               ret = btrfs_update_root(trans, log->fs_info->log_root_tree,
+                               &log->root_key, &log->root_item);
+       }
+       return ret;
+}
+
+static int wait_log_commit(struct btrfs_trans_handle *trans,
+                          struct btrfs_root *root, unsigned long transid)
 {
        DEFINE_WAIT(wait);
-       u64 transid = log->fs_info->tree_log_transid;
+       int index = transid % 2;
 
+       /*
+        * we only allow two pending log transactions at a time,
+        * so we know that if ours is more than 2 older than the
+        * current transaction, we're done
+        */
        do {
-               prepare_to_wait(&log->fs_info->tree_log_wait, &wait,
-                               TASK_UNINTERRUPTIBLE);
-               mutex_unlock(&log->fs_info->tree_log_mutex);
-               if (atomic_read(&log->fs_info->tree_log_commit))
+               prepare_to_wait(&root->log_commit_wait[index],
+                               &wait, TASK_UNINTERRUPTIBLE);
+               mutex_unlock(&root->log_mutex);
+
+               if (root->fs_info->last_trans_log_full_commit !=
+                   trans->transid && root->log_transid < transid + 2 &&
+                   atomic_read(&root->log_commit[index]))
                        schedule();
-               finish_wait(&log->fs_info->tree_log_wait, &wait);
-               mutex_lock(&log->fs_info->tree_log_mutex);
-       } while(transid == log->fs_info->tree_log_transid &&
-               atomic_read(&log->fs_info->tree_log_commit));
+
+               finish_wait(&root->log_commit_wait[index], &wait);
+               mutex_lock(&root->log_mutex);
+       } while (root->log_transid < transid + 2 &&
+                atomic_read(&root->log_commit[index]));
+       return 0;
+}
+
+static int wait_for_writer(struct btrfs_trans_handle *trans,
+                          struct btrfs_root *root)
+{
+       DEFINE_WAIT(wait);
+       while (atomic_read(&root->log_writers)) {
+               prepare_to_wait(&root->log_writer_wait,
+                               &wait, TASK_UNINTERRUPTIBLE);
+               mutex_unlock(&root->log_mutex);
+               if (root->fs_info->last_trans_log_full_commit !=
+                   trans->transid && atomic_read(&root->log_writers))
+                       schedule();
+               mutex_lock(&root->log_mutex);
+               finish_wait(&root->log_writer_wait, &wait);
+       }
        return 0;
 }
 
 /*
  * btrfs_sync_log does sends a given tree log down to the disk and
  * updates the super blocks to record it.  When this call is done,
- * you know that any inodes previously logged are safely on disk
+ * you know that any inodes previously logged are safely on disk only
+ * if it returns 0.
+ *
+ * Any other return value means you need to call btrfs_commit_transaction.
+ * Some of the edge cases for fsyncing directories that have had unlinks
+ * or renames done in the past mean that sometimes the only safe
+ * fsync is to commit the whole FS.  When btrfs_sync_log returns -EAGAIN,
+ * that has happened.
  */
 int btrfs_sync_log(struct btrfs_trans_handle *trans,
                   struct btrfs_root *root)
 {
+       int index1;
+       int index2;
+       int mark;
        int ret;
-       unsigned long batch;
        struct btrfs_root *log = root->log_root;
-
-       mutex_lock(&log->fs_info->tree_log_mutex);
-       if (atomic_read(&log->fs_info->tree_log_commit)) {
-               wait_log_commit(log);
-               goto out;
+       struct btrfs_root *log_root_tree = root->fs_info->log_root_tree;
+       unsigned long log_transid = 0;
+
+       mutex_lock(&root->log_mutex);
+       index1 = root->log_transid % 2;
+       if (atomic_read(&root->log_commit[index1])) {
+               wait_log_commit(trans, root, root->log_transid);
+               mutex_unlock(&root->log_mutex);
+               return 0;
        }
-       atomic_set(&log->fs_info->tree_log_commit, 1);
-
-       while(1) {
-               batch = log->fs_info->tree_log_batch;
-               mutex_unlock(&log->fs_info->tree_log_mutex);
-               schedule_timeout_uninterruptible(1);
-               mutex_lock(&log->fs_info->tree_log_mutex);
-
-               while(atomic_read(&log->fs_info->tree_log_writers)) {
-                       DEFINE_WAIT(wait);
-                       prepare_to_wait(&log->fs_info->tree_log_wait, &wait,
-                                       TASK_UNINTERRUPTIBLE);
-                       mutex_unlock(&log->fs_info->tree_log_mutex);
-                       if (atomic_read(&log->fs_info->tree_log_writers))
-                               schedule();
-                       mutex_lock(&log->fs_info->tree_log_mutex);
-                       finish_wait(&log->fs_info->tree_log_wait, &wait);
+       atomic_set(&root->log_commit[index1], 1);
+
+       /* wait for previous tree log sync to complete */
+       if (atomic_read(&root->log_commit[(index1 + 1) % 2]))
+               wait_log_commit(trans, root, root->log_transid - 1);
+
+       while (1) {
+               unsigned long batch = root->log_batch;
+               if (root->log_multiple_pids) {
+                       mutex_unlock(&root->log_mutex);
+                       schedule_timeout_uninterruptible(1);
+                       mutex_lock(&root->log_mutex);
                }
-               if (batch == log->fs_info->tree_log_batch)
+               wait_for_writer(trans, root);
+               if (batch == root->log_batch)
                        break;
        }
 
-       ret = btrfs_write_and_wait_marked_extents(log, &log->dirty_log_pages);
+       /* bail out if we need to do a full commit */
+       if (root->fs_info->last_trans_log_full_commit == trans->transid) {
+               ret = -EAGAIN;
+               mutex_unlock(&root->log_mutex);
+               goto out;
+       }
+
+       log_transid = root->log_transid;
+       if (log_transid % 2 == 0)
+               mark = EXTENT_DIRTY;
+       else
+               mark = EXTENT_NEW;
+
+       /* we start IO on  all the marked extents here, but we don't actually
+        * wait for them until later.
+        */
+       ret = btrfs_write_marked_extents(log, &log->dirty_log_pages, mark);
        BUG_ON(ret);
-       ret = btrfs_write_and_wait_marked_extents(root->fs_info->log_root_tree,
-                              &root->fs_info->log_root_tree->dirty_log_pages);
+
+       btrfs_set_root_node(&log->root_item, log->node);
+
+       root->log_batch = 0;
+       root->log_transid++;
+       log->log_transid = root->log_transid;
+       root->log_start_pid = 0;
+       smp_mb();
+       /*
+        * IO has been started, blocks of the log tree have WRITTEN flag set
+        * in their headers. new modifications of the log will be written to
+        * new positions. so it's safe to allow log writers to go in.
+        */
+       mutex_unlock(&root->log_mutex);
+
+       mutex_lock(&log_root_tree->log_mutex);
+       log_root_tree->log_batch++;
+       atomic_inc(&log_root_tree->log_writers);
+       mutex_unlock(&log_root_tree->log_mutex);
+
+       ret = update_log_root(trans, log);
+       BUG_ON(ret);
+
+       mutex_lock(&log_root_tree->log_mutex);
+       if (atomic_dec_and_test(&log_root_tree->log_writers)) {
+               smp_mb();
+               if (waitqueue_active(&log_root_tree->log_writer_wait))
+                       wake_up(&log_root_tree->log_writer_wait);
+       }
+
+       index2 = log_root_tree->log_transid % 2;
+       if (atomic_read(&log_root_tree->log_commit[index2])) {
+               btrfs_wait_marked_extents(log, &log->dirty_log_pages, mark);
+               wait_log_commit(trans, log_root_tree,
+                               log_root_tree->log_transid);
+               mutex_unlock(&log_root_tree->log_mutex);
+               goto out;
+       }
+       atomic_set(&log_root_tree->log_commit[index2], 1);
+
+       if (atomic_read(&log_root_tree->log_commit[(index2 + 1) % 2])) {
+               wait_log_commit(trans, log_root_tree,
+                               log_root_tree->log_transid - 1);
+       }
+
+       wait_for_writer(trans, log_root_tree);
+
+       /*
+        * now that we've moved on to the tree of log tree roots,
+        * check the full commit flag again
+        */
+       if (root->fs_info->last_trans_log_full_commit == trans->transid) {
+               btrfs_wait_marked_extents(log, &log->dirty_log_pages, mark);
+               mutex_unlock(&log_root_tree->log_mutex);
+               ret = -EAGAIN;
+               goto out_wake_log_root;
+       }
+
+       ret = btrfs_write_and_wait_marked_extents(log_root_tree,
+                               &log_root_tree->dirty_log_pages,
+                               EXTENT_DIRTY | EXTENT_NEW);
        BUG_ON(ret);
+       btrfs_wait_marked_extents(log, &log->dirty_log_pages, mark);
 
        btrfs_set_super_log_root(&root->fs_info->super_for_commit,
-                                log->fs_info->log_root_tree->node->start);
+                               log_root_tree->node->start);
        btrfs_set_super_log_root_level(&root->fs_info->super_for_commit,
-                      btrfs_header_level(log->fs_info->log_root_tree->node));
+                               btrfs_header_level(log_root_tree->node));
 
-       write_ctree_super(trans, log->fs_info->tree_root);
-       log->fs_info->tree_log_transid++;
-       log->fs_info->tree_log_batch = 0;
-       atomic_set(&log->fs_info->tree_log_commit, 0);
+       log_root_tree->log_batch = 0;
+       log_root_tree->log_transid++;
        smp_mb();
-       if (waitqueue_active(&log->fs_info->tree_log_wait))
-               wake_up(&log->fs_info->tree_log_wait);
+
+       mutex_unlock(&log_root_tree->log_mutex);
+
+       /*
+        * nobody else is going to jump in and write the the ctree
+        * super here because the log_commit atomic below is protecting
+        * us.  We must be called with a transaction handle pinning
+        * the running transaction open, so a full commit can't hop
+        * in and cause problems either.
+        */
+       write_ctree_super(trans, root->fs_info->tree_root, 1);
+       ret = 0;
+
+       mutex_lock(&root->log_mutex);
+       if (root->last_log_commit < log_transid)
+               root->last_log_commit = log_transid;
+       mutex_unlock(&root->log_mutex);
+
+out_wake_log_root:
+       atomic_set(&log_root_tree->log_commit[index2], 0);
+       smp_mb();
+       if (waitqueue_active(&log_root_tree->log_commit_wait[index2]))
+               wake_up(&log_root_tree->log_commit_wait[index2]);
 out:
-       mutex_unlock(&log->fs_info->tree_log_mutex);
+       atomic_set(&root->log_commit[index1], 0);
+       smp_mb();
+       if (waitqueue_active(&root->log_commit_wait[index1]))
+               wake_up(&root->log_commit_wait[index1]);
        return 0;
-
 }
 
-/* * free all the extents used by the tree log.  This should be called
+/*
+ * free all the extents used by the tree log.  This should be called
  * at commit time of the full transaction
  */
 int btrfs_free_log(struct btrfs_trans_handle *trans, struct btrfs_root *root)
@@ -2025,54 +2145,35 @@ int btrfs_free_log(struct btrfs_trans_handle *trans, struct btrfs_root *root)
                .process_func = process_one_buffer
        };
 
-       if (!root->log_root)
+       if (!root->log_root || root->fs_info->log_root_recovering)
                return 0;
 
        log = root->log_root;
        ret = walk_log_tree(trans, log, &wc);
        BUG_ON(ret);
 
-       while(1) {
+       while (1) {
                ret = find_first_extent_bit(&log->dirty_log_pages,
-                                   0, &start, &end, EXTENT_DIRTY);
+                               0, &start, &end, EXTENT_DIRTY | EXTENT_NEW);
                if (ret)
                        break;
 
-               clear_extent_dirty(&log->dirty_log_pages,
-                                  start, end, GFP_NOFS);
+               clear_extent_bits(&log->dirty_log_pages, start, end,
+                                 EXTENT_DIRTY | EXTENT_NEW, GFP_NOFS);
        }
 
-       log = root->log_root;
-       ret = btrfs_del_root(trans, root->fs_info->log_root_tree,
-                            &log->root_key);
-       BUG_ON(ret);
+       if (log->log_transid > 0) {
+               ret = btrfs_del_root(trans, root->fs_info->log_root_tree,
+                                    &log->root_key);
+               BUG_ON(ret);
+       }
        root->log_root = NULL;
-       kfree(root->log_root);
+       free_extent_buffer(log->node);
+       kfree(log);
        return 0;
 }
 
 /*
- * helper function to update the item for a given subvolumes log root
- * in the tree of log roots
- */
-static int update_log_root(struct btrfs_trans_handle *trans,
-                          struct btrfs_root *log)
-{
-       u64 bytenr = btrfs_root_bytenr(&log->root_item);
-       int ret;
-
-       if (log->node->start == bytenr)
-               return 0;
-
-       btrfs_set_root_bytenr(&log->root_item, log->node->start);
-       btrfs_set_root_level(&log->root_item, btrfs_header_level(log->node));
-       ret = btrfs_update_root(trans, log->fs_info->log_root_tree,
-                               &log->root_key, &log->root_item);
-       BUG_ON(ret);
-       return ret;
-}
-
-/*
  * If both a file and directory are logged, and unlinks or renames are
  * mixed in, we have a few interesting corners:
  *
@@ -2163,7 +2264,7 @@ int btrfs_del_dir_entries_in_log(struct btrfs_trans_handle *trans,
 
        btrfs_free_path(path);
        mutex_unlock(&BTRFS_I(dir)->log_mutex);
-       end_log_trans(root);
+       btrfs_end_log_trans(root);
 
        return 0;
 }
@@ -2190,7 +2291,7 @@ int btrfs_del_inode_ref_in_log(struct btrfs_trans_handle *trans,
        ret = btrfs_del_inode_ref(trans, log, name, name_len, inode->i_ino,
                                  dirid, &index);
        mutex_unlock(&BTRFS_I(inode)->log_mutex);
-       end_log_trans(root);
+       btrfs_end_log_trans(root);
 
        return ret;
 }
@@ -2288,9 +2389,8 @@ static noinline int log_dir_items(struct btrfs_trans_handle *trans,
                        struct btrfs_key tmp;
                        btrfs_item_key_to_cpu(path->nodes[0], &tmp,
                                              path->slots[0]);
-                       if (key_type == tmp.type) {
+                       if (key_type == tmp.type)
                                first_offset = max(min_offset, tmp.offset) + 1;
-                       }
                }
                goto done;
        }
@@ -2320,7 +2420,7 @@ static noinline int log_dir_items(struct btrfs_trans_handle *trans,
         * we have a block from this transaction, log every item in it
         * from our directory
         */
-       while(1) {
+       while (1) {
                struct btrfs_key tmp;
                src = path->nodes[0];
                nritems = btrfs_header_nritems(src);
@@ -2397,7 +2497,7 @@ static noinline int log_directory_changes(struct btrfs_trans_handle *trans,
 again:
        min_key = 0;
        max_key = 0;
-       while(1) {
+       while (1) {
                ret = log_dir_items(trans, root, inode, path,
                                    dst_path, key_type, min_key,
                                    &max_key);
@@ -2433,7 +2533,7 @@ static int drop_objectid_items(struct btrfs_trans_handle *trans,
        key.type = max_key_type;
        key.offset = (u64)-1;
 
-       while(1) {
+       while (1) {
                ret = btrfs_search_slot(trans, log, &key, path, -1, 1);
 
                if (ret != 1)
@@ -2472,6 +2572,9 @@ static noinline int copy_items(struct btrfs_trans_handle *trans,
        u32 *ins_sizes;
        char *ins_data;
        int i;
+       struct list_head ordered_sums;
+
+       INIT_LIST_HEAD(&ordered_sums);
 
        ins_data = kmalloc(nr * sizeof(struct btrfs_key) +
                           nr * sizeof(u32), GFP_NOFS);
@@ -2486,7 +2589,7 @@ static noinline int copy_items(struct btrfs_trans_handle *trans,
                                       ins_keys, ins_sizes, nr);
        BUG_ON(ret);
 
-       for (i = 0; i < nr; i++) {
+       for (i = 0; i < nr; i++, dst_path->slots[0]++) {
                dst_offset = btrfs_item_ptr_offset(dst_path->nodes[0],
                                                   dst_path->slots[0]);
 
@@ -2520,29 +2623,52 @@ static noinline int copy_items(struct btrfs_trans_handle *trans,
                                                struct btrfs_file_extent_item);
 
                        found_type = btrfs_file_extent_type(src, extent);
-                       if (found_type == BTRFS_FILE_EXTENT_REG) {
-                               u64 ds = btrfs_file_extent_disk_bytenr(src,
-                                                                  extent);
-                               u64 dl = btrfs_file_extent_disk_num_bytes(src,
-                                                                     extent);
+                       if (found_type == BTRFS_FILE_EXTENT_REG ||
+                           found_type == BTRFS_FILE_EXTENT_PREALLOC) {
+                               u64 ds, dl, cs, cl;
+                               ds = btrfs_file_extent_disk_bytenr(src,
+                                                               extent);
                                /* ds == 0 is a hole */
-                               if (ds != 0) {
-                                       ret = btrfs_inc_extent_ref(trans, log,
-                                                  ds, dl,
-                                                  dst_path->nodes[0]->start,
-                                                  BTRFS_TREE_LOG_OBJECTID,
-                                                  trans->transid,
-                                                  ins_keys[i].objectid);
-                                       BUG_ON(ret);
+                               if (ds == 0)
+                                       continue;
+
+                               dl = btrfs_file_extent_disk_num_bytes(src,
+                                                               extent);
+                               cs = btrfs_file_extent_offset(src, extent);
+                               cl = btrfs_file_extent_num_bytes(src,
+                                                               extent);
+                               if (btrfs_file_extent_compression(src,
+                                                                 extent)) {
+                                       cs = 0;
+                                       cl = dl;
                                }
+
+                               ret = btrfs_lookup_csums_range(
+                                               log->fs_info->csum_root,
+                                               ds + cs, ds + cs + cl - 1,
+                                               &ordered_sums);
+                               BUG_ON(ret);
                        }
                }
-               dst_path->slots[0]++;
        }
 
        btrfs_mark_buffer_dirty(dst_path->nodes[0]);
        btrfs_release_path(log, dst_path);
        kfree(ins_data);
+
+       /*
+        * we have to do this after the loop above to avoid changing the
+        * log tree while trying to change the log tree.
+        */
+       while (!list_empty(&ordered_sums)) {
+               struct btrfs_ordered_sum *sums = list_entry(ordered_sums.next,
+                                                  struct btrfs_ordered_sum,
+                                                  list);
+               ret = btrfs_csum_file_blocks(trans, log, sums);
+               BUG_ON(ret);
+               list_del(&sums->list);
+               kfree(sums);
+       }
        return 0;
 }
 
@@ -2560,7 +2686,7 @@ static noinline int copy_items(struct btrfs_trans_handle *trans,
  *
  * This handles both files and directories.
  */
-static int __btrfs_log_inode(struct btrfs_trans_handle *trans,
+static int btrfs_log_inode(struct btrfs_trans_handle *trans,
                             struct btrfs_root *root, struct inode *inode,
                             int inode_only)
 {
@@ -2586,28 +2712,17 @@ static int __btrfs_log_inode(struct btrfs_trans_handle *trans,
        min_key.offset = 0;
 
        max_key.objectid = inode->i_ino;
+
+       /* today the code can only do partial logging of directories */
+       if (!S_ISDIR(inode->i_mode))
+           inode_only = LOG_INODE_ALL;
+
        if (inode_only == LOG_INODE_EXISTS || S_ISDIR(inode->i_mode))
                max_key.type = BTRFS_XATTR_ITEM_KEY;
        else
                max_key.type = (u8)-1;
        max_key.offset = (u64)-1;
 
-       /*
-        * if this inode has already been logged and we're in inode_only
-        * mode, we don't want to delete the things that have already
-        * been written to the log.
-        *
-        * But, if the inode has been through an inode_only log,
-        * the logged_trans field is not set.  This allows us to catch
-        * any new names for this inode in the backrefs by logging it
-        * again
-        */
-       if (inode_only == LOG_INODE_EXISTS &&
-           BTRFS_I(inode)->logged_trans == trans->transid) {
-               btrfs_free_path(path);
-               btrfs_free_path(dst_path);
-               goto out;
-       }
        mutex_lock(&BTRFS_I(inode)->log_mutex);
 
        /*
@@ -2627,7 +2742,7 @@ static int __btrfs_log_inode(struct btrfs_trans_handle *trans,
        BUG_ON(ret);
        path->keep_locks = 1;
 
-       while(1) {
+       while (1) {
                ins_nr = 0;
                ret = btrfs_search_forward(root, &min_key, &max_key,
                                           path, 0, trans->transid);
@@ -2694,7 +2809,6 @@ next_slot:
        if (inode_only == LOG_INODE_ALL && S_ISDIR(inode->i_mode)) {
                btrfs_release_path(root, path);
                btrfs_release_path(log, dst_path);
-               BTRFS_I(inode)->log_dirty_trans = 0;
                ret = log_directory_changes(trans, root, inode, path, dst_path);
                BUG_ON(ret);
        }
@@ -2703,58 +2817,171 @@ next_slot:
 
        btrfs_free_path(path);
        btrfs_free_path(dst_path);
+       return 0;
+}
 
-       mutex_lock(&root->fs_info->tree_log_mutex);
-       ret = update_log_root(trans, log);
-       BUG_ON(ret);
-       mutex_unlock(&root->fs_info->tree_log_mutex);
+/*
+ * follow the dentry parent pointers up the chain and see if any
+ * of the directories in it require a full commit before they can
+ * be logged.  Returns zero if nothing special needs to be done or 1 if
+ * a full commit is required.
+ */
+static noinline int check_parent_dirs_for_sync(struct btrfs_trans_handle *trans,
+                                              struct inode *inode,
+                                              struct dentry *parent,
+                                              struct super_block *sb,
+                                              u64 last_committed)
+{
+       int ret = 0;
+       struct btrfs_root *root;
+
+       /*
+        * for regular files, if its inode is already on disk, we don't
+        * have to worry about the parents at all.  This is because
+        * we can use the last_unlink_trans field to record renames
+        * and other fun in this file.
+        */
+       if (S_ISREG(inode->i_mode) &&
+           BTRFS_I(inode)->generation <= last_committed &&
+           BTRFS_I(inode)->last_unlink_trans <= last_committed)
+                       goto out;
+
+       if (!S_ISDIR(inode->i_mode)) {
+               if (!parent || !parent->d_inode || sb != parent->d_inode->i_sb)
+                       goto out;
+               inode = parent->d_inode;
+       }
+
+       while (1) {
+               BTRFS_I(inode)->logged_trans = trans->transid;
+               smp_mb();
+
+               if (BTRFS_I(inode)->last_unlink_trans > last_committed) {
+                       root = BTRFS_I(inode)->root;
+
+                       /*
+                        * make sure any commits to the log are forced
+                        * to be full commits
+                        */
+                       root->fs_info->last_trans_log_full_commit =
+                               trans->transid;
+                       ret = 1;
+                       break;
+               }
+
+               if (!parent || !parent->d_inode || sb != parent->d_inode->i_sb)
+                       break;
+
+               if (IS_ROOT(parent))
+                       break;
+
+               parent = parent->d_parent;
+               inode = parent->d_inode;
+
+       }
 out:
-       return 0;
+       return ret;
 }
 
-int btrfs_log_inode(struct btrfs_trans_handle *trans,
-                   struct btrfs_root *root, struct inode *inode,
-                   int inode_only)
+static int inode_in_log(struct btrfs_trans_handle *trans,
+                struct inode *inode)
 {
-       int ret;
+       struct btrfs_root *root = BTRFS_I(inode)->root;
+       int ret = 0;
 
-       start_log_trans(trans, root);
-       ret = __btrfs_log_inode(trans, root, inode, inode_only);
-       end_log_trans(root);
+       mutex_lock(&root->log_mutex);
+       if (BTRFS_I(inode)->logged_trans == trans->transid &&
+           BTRFS_I(inode)->last_sub_trans <= root->last_log_commit)
+               ret = 1;
+       mutex_unlock(&root->log_mutex);
        return ret;
 }
 
+
 /*
  * helper function around btrfs_log_inode to make sure newly created
  * parent directories also end up in the log.  A minimal inode and backref
  * only logging is done of any parent directories that are older than
  * the last committed transaction
  */
-int btrfs_log_dentry(struct btrfs_trans_handle *trans,
-                   struct btrfs_root *root, struct dentry *dentry)
+int btrfs_log_inode_parent(struct btrfs_trans_handle *trans,
+                   struct btrfs_root *root, struct inode *inode,
+                   struct dentry *parent, int exists_only)
 {
-       int inode_only = LOG_INODE_ALL;
+       int inode_only = exists_only ? LOG_INODE_EXISTS : LOG_INODE_ALL;
        struct super_block *sb;
-       int ret;
+       int ret = 0;
+       u64 last_committed = root->fs_info->last_trans_committed;
+
+       sb = inode->i_sb;
+
+       if (btrfs_test_opt(root, NOTREELOG)) {
+               ret = 1;
+               goto end_no_trans;
+       }
+
+       if (root->fs_info->last_trans_log_full_commit >
+           root->fs_info->last_trans_committed) {
+               ret = 1;
+               goto end_no_trans;
+       }
+
+       if (root != BTRFS_I(inode)->root ||
+           btrfs_root_refs(&root->root_item) == 0) {
+               ret = 1;
+               goto end_no_trans;
+       }
+
+       ret = check_parent_dirs_for_sync(trans, inode, parent,
+                                        sb, last_committed);
+       if (ret)
+               goto end_no_trans;
+
+       if (inode_in_log(trans, inode)) {
+               ret = BTRFS_NO_LOG_SYNC;
+               goto end_no_trans;
+       }
 
        start_log_trans(trans, root);
-       sb = dentry->d_inode->i_sb;
-       while(1) {
-               ret = __btrfs_log_inode(trans, root, dentry->d_inode,
-                                       inode_only);
-               BUG_ON(ret);
-               inode_only = LOG_INODE_EXISTS;
 
-               dentry = dentry->d_parent;
-               if (!dentry || !dentry->d_inode || sb != dentry->d_inode->i_sb)
+       ret = btrfs_log_inode(trans, root, inode, inode_only);
+       BUG_ON(ret);
+
+       /*
+        * for regular files, if its inode is already on disk, we don't
+        * have to worry about the parents at all.  This is because
+        * we can use the last_unlink_trans field to record renames
+        * and other fun in this file.
+        */
+       if (S_ISREG(inode->i_mode) &&
+           BTRFS_I(inode)->generation <= last_committed &&
+           BTRFS_I(inode)->last_unlink_trans <= last_committed)
+                       goto no_parent;
+
+       inode_only = LOG_INODE_EXISTS;
+       while (1) {
+               if (!parent || !parent->d_inode || sb != parent->d_inode->i_sb)
                        break;
 
-               if (BTRFS_I(dentry->d_inode)->generation <=
-                   root->fs_info->last_trans_committed)
+               inode = parent->d_inode;
+               if (root != BTRFS_I(inode)->root)
+                       break;
+
+               if (BTRFS_I(inode)->generation >
+                   root->fs_info->last_trans_committed) {
+                       ret = btrfs_log_inode(trans, root, inode, inode_only);
+                       BUG_ON(ret);
+               }
+               if (IS_ROOT(parent))
                        break;
+
+               parent = parent->d_parent;
        }
-       end_log_trans(root);
-       return 0;
+no_parent:
+       ret = 0;
+       btrfs_end_log_trans(root);
+end_no_trans:
+       return ret;
 }
 
 /*
@@ -2766,12 +2993,8 @@ int btrfs_log_dentry(struct btrfs_trans_handle *trans,
 int btrfs_log_dentry_safe(struct btrfs_trans_handle *trans,
                          struct btrfs_root *root, struct dentry *dentry)
 {
-       u64 gen;
-       gen = root->fs_info->last_trans_new_blockgroup;
-       if (gen > root->fs_info->last_trans_committed)
-               return 1;
-       else
-               return btrfs_log_dentry(trans, root, dentry);
+       return btrfs_log_inode_parent(trans, root, dentry->d_inode,
+                                     dentry->d_parent, 0);
 }
 
 /*
@@ -2788,7 +3011,6 @@ int btrfs_recover_log_trees(struct btrfs_root *log_root_tree)
        struct btrfs_key tmp_key;
        struct btrfs_root *log;
        struct btrfs_fs_info *fs_info = log_root_tree->fs_info;
-       u64 highest_inode;
        struct walk_control wc = {
                .process_func = process_one_buffer,
                .stage = 0,
@@ -2810,7 +3032,7 @@ again:
        key.offset = (u64)-1;
        btrfs_set_key_type(&key, BTRFS_ROOT_ITEM_KEY);
 
-       while(1) {
+       while (1) {
                ret = btrfs_search_slot(NULL, log_root_tree, &key, path, 0, 0);
                if (ret < 0)
                        break;
@@ -2835,10 +3057,10 @@ again:
                tmp_key.offset = (u64)-1;
 
                wc.replay_dest = btrfs_read_fs_root_no_name(fs_info, &tmp_key);
-
                BUG_ON(!wc.replay_dest);
 
-               btrfs_record_root_in_trans(wc.replay_dest);
+               wc.replay_dest->log_root = log;
+               btrfs_record_root_in_trans(trans, wc.replay_dest);
                ret = walk_log_tree(trans, log, &wc);
                BUG_ON(ret);
 
@@ -2847,14 +3069,11 @@ again:
                                                      path);
                        BUG_ON(ret);
                }
-               ret = btrfs_find_highest_inode(wc.replay_dest, &highest_inode);
-               if (ret == 0) {
-                       wc.replay_dest->highest_inode = highest_inode;
-                       wc.replay_dest->last_inode_alloc = highest_inode;
-               }
 
                key.offset = found_key.offset - 1;
+               wc.replay_dest->log_root = NULL;
                free_extent_buffer(log->node);
+               free_extent_buffer(log->commit_root);
                kfree(log);
 
                if (found_key.offset == 0)
@@ -2887,3 +3106,94 @@ again:
        kfree(log_root_tree);
        return 0;
 }
+
+/*
+ * there are some corner cases where we want to force a full
+ * commit instead of allowing a directory to be logged.
+ *
+ * They revolve around files there were unlinked from the directory, and
+ * this function updates the parent directory so that a full commit is
+ * properly done if it is fsync'd later after the unlinks are done.
+ */
+void btrfs_record_unlink_dir(struct btrfs_trans_handle *trans,
+                            struct inode *dir, struct inode *inode,
+                            int for_rename)
+{
+       /*
+        * when we're logging a file, if it hasn't been renamed
+        * or unlinked, and its inode is fully committed on disk,
+        * we don't have to worry about walking up the directory chain
+        * to log its parents.
+        *
+        * So, we use the last_unlink_trans field to put this transid
+        * into the file.  When the file is logged we check it and
+        * don't log the parents if the file is fully on disk.
+        */
+       if (S_ISREG(inode->i_mode))
+               BTRFS_I(inode)->last_unlink_trans = trans->transid;
+
+       /*
+        * if this directory was already logged any new
+        * names for this file/dir will get recorded
+        */
+       smp_mb();
+       if (BTRFS_I(dir)->logged_trans == trans->transid)
+               return;
+
+       /*
+        * if the inode we're about to unlink was logged,
+        * the log will be properly updated for any new names
+        */
+       if (BTRFS_I(inode)->logged_trans == trans->transid)
+               return;
+
+       /*
+        * when renaming files across directories, if the directory
+        * there we're unlinking from gets fsync'd later on, there's
+        * no way to find the destination directory later and fsync it
+        * properly.  So, we have to be conservative and force commits
+        * so the new name gets discovered.
+        */
+       if (for_rename)
+               goto record;
+
+       /* we can safely do the unlink without any special recording */
+       return;
+
+record:
+       BTRFS_I(dir)->last_unlink_trans = trans->transid;
+}
+
+/*
+ * Call this after adding a new name for a file and it will properly
+ * update the log to reflect the new name.
+ *
+ * It will return zero if all goes well, and it will return 1 if a
+ * full transaction commit is required.
+ */
+int btrfs_log_new_name(struct btrfs_trans_handle *trans,
+                       struct inode *inode, struct inode *old_dir,
+                       struct dentry *parent)
+{
+       struct btrfs_root * root = BTRFS_I(inode)->root;
+
+       /*
+        * this will force the logging code to walk the dentry chain
+        * up for the file
+        */
+       if (S_ISREG(inode->i_mode))
+               BTRFS_I(inode)->last_unlink_trans = trans->transid;
+
+       /*
+        * if this inode hasn't been logged and directory we're renaming it
+        * from hasn't been logged, we don't need to log it
+        */
+       if (BTRFS_I(inode)->logged_trans <=
+           root->fs_info->last_trans_committed &&
+           (!old_dir || BTRFS_I(old_dir)->logged_trans <=
+                   root->fs_info->last_trans_committed))
+               return 0;
+
+       return btrfs_log_inode_parent(trans, root, inode, parent, 1);
+}
+