Btrfs: Fix infinite loop in btrfs_extent_post_op
[safe/jmp/linux-2.6] / fs / btrfs / volumes.c
index 501d23d..fd0bedb 100644 (file)
 #include <linux/blkdev.h>
 #include <linux/random.h>
 #include <asm/div64.h>
+#include "compat.h"
 #include "ctree.h"
 #include "extent_map.h"
 #include "disk-io.h"
 #include "transaction.h"
 #include "print-tree.h"
 #include "volumes.h"
+#include "async-thread.h"
 
 struct map_lookup {
        u64 type;
@@ -39,6 +41,11 @@ struct map_lookup {
        struct btrfs_bio_stripe stripes[];
 };
 
+static int init_first_rw_device(struct btrfs_trans_handle *trans,
+                               struct btrfs_root *root,
+                               struct btrfs_device *device);
+static int btrfs_relocate_sys_chunks(struct btrfs_root *root);
+
 #define map_lookup_size(n) (sizeof(struct map_lookup) + \
                            (sizeof(struct btrfs_bio_stripe) * (n)))
 
@@ -55,40 +62,49 @@ void btrfs_unlock_volumes(void)
        mutex_unlock(&uuid_mutex);
 }
 
+static void lock_chunks(struct btrfs_root *root)
+{
+       mutex_lock(&root->fs_info->chunk_mutex);
+}
+
+static void unlock_chunks(struct btrfs_root *root)
+{
+       mutex_unlock(&root->fs_info->chunk_mutex);
+}
+
+static void free_fs_devices(struct btrfs_fs_devices *fs_devices)
+{
+       struct btrfs_device *device;
+       WARN_ON(fs_devices->opened);
+       while (!list_empty(&fs_devices->devices)) {
+               device = list_entry(fs_devices->devices.next,
+                                   struct btrfs_device, dev_list);
+               list_del(&device->dev_list);
+               kfree(device->name);
+               kfree(device);
+       }
+       kfree(fs_devices);
+}
+
 int btrfs_cleanup_fs_uuids(void)
 {
        struct btrfs_fs_devices *fs_devices;
-       struct list_head *uuid_cur;
-       struct list_head *devices_cur;
-       struct btrfs_device *dev;
 
-       list_for_each(uuid_cur, &fs_uuids) {
-               fs_devices = list_entry(uuid_cur, struct btrfs_fs_devices,
-                                       list);
-               while(!list_empty(&fs_devices->devices)) {
-                       devices_cur = fs_devices->devices.next;
-                       dev = list_entry(devices_cur, struct btrfs_device,
-                                        dev_list);
-                       if (dev->bdev) {
-                               close_bdev_excl(dev->bdev);
-                               fs_devices->open_devices--;
-                       }
-                       list_del(&dev->dev_list);
-                       kfree(dev->name);
-                       kfree(dev);
-               }
+       while (!list_empty(&fs_uuids)) {
+               fs_devices = list_entry(fs_uuids.next,
+                                       struct btrfs_fs_devices, list);
+               list_del(&fs_devices->list);
+               free_fs_devices(fs_devices);
        }
        return 0;
 }
 
-static struct btrfs_device *__find_device(struct list_head *head, u64 devid,
-                                         u8 *uuid)
+static noinline struct btrfs_device *__find_device(struct list_head *head,
+                                                  u64 devid, u8 *uuid)
 {
        struct btrfs_device *dev;
-       struct list_head *cur;
 
-       list_for_each(cur, head) {
-               dev = list_entry(cur, struct btrfs_device, dev_list);
+       list_for_each_entry(dev, head, dev_list) {
                if (dev->devid == devid &&
                    (!uuid || !memcmp(dev->uuid, uuid, BTRFS_UUID_SIZE))) {
                        return dev;
@@ -97,20 +113,130 @@ static struct btrfs_device *__find_device(struct list_head *head, u64 devid,
        return NULL;
 }
 
-static struct btrfs_fs_devices *find_fsid(u8 *fsid)
+static noinline struct btrfs_fs_devices *find_fsid(u8 *fsid)
 {
-       struct list_head *cur;
        struct btrfs_fs_devices *fs_devices;
 
-       list_for_each(cur, &fs_uuids) {
-               fs_devices = list_entry(cur, struct btrfs_fs_devices, list);
+       list_for_each_entry(fs_devices, &fs_uuids, list) {
                if (memcmp(fsid, fs_devices->fsid, BTRFS_FSID_SIZE) == 0)
                        return fs_devices;
        }
        return NULL;
 }
 
-static int device_list_add(const char *path,
+/*
+ * we try to collect pending bios for a device so we don't get a large
+ * number of procs sending bios down to the same device.  This greatly
+ * improves the schedulers ability to collect and merge the bios.
+ *
+ * But, it also turns into a long list of bios to process and that is sure
+ * to eventually make the worker thread block.  The solution here is to
+ * make some progress and then put this work struct back at the end of
+ * the list if the block device is congested.  This way, multiple devices
+ * can make progress from a single worker thread.
+ */
+static noinline int run_scheduled_bios(struct btrfs_device *device)
+{
+       struct bio *pending;
+       struct backing_dev_info *bdi;
+       struct btrfs_fs_info *fs_info;
+       struct bio *tail;
+       struct bio *cur;
+       int again = 0;
+       unsigned long num_run = 0;
+       unsigned long limit;
+
+       bdi = device->bdev->bd_inode->i_mapping->backing_dev_info;
+       fs_info = device->dev_root->fs_info;
+       limit = btrfs_async_submit_limit(fs_info);
+       limit = limit * 2 / 3;
+
+loop:
+       spin_lock(&device->io_lock);
+
+       /* take all the bios off the list at once and process them
+        * later on (without the lock held).  But, remember the
+        * tail and other pointers so the bios can be properly reinserted
+        * into the list if we hit congestion
+        */
+       pending = device->pending_bios;
+       tail = device->pending_bio_tail;
+       WARN_ON(pending && !tail);
+       device->pending_bios = NULL;
+       device->pending_bio_tail = NULL;
+
+       /*
+        * if pending was null this time around, no bios need processing
+        * at all and we can stop.  Otherwise it'll loop back up again
+        * and do an additional check so no bios are missed.
+        *
+        * device->running_pending is used to synchronize with the
+        * schedule_bio code.
+        */
+       if (pending) {
+               again = 1;
+               device->running_pending = 1;
+       } else {
+               again = 0;
+               device->running_pending = 0;
+       }
+       spin_unlock(&device->io_lock);
+
+       while (pending) {
+               cur = pending;
+               pending = pending->bi_next;
+               cur->bi_next = NULL;
+               atomic_dec(&fs_info->nr_async_bios);
+
+               if (atomic_read(&fs_info->nr_async_bios) < limit &&
+                   waitqueue_active(&fs_info->async_submit_wait))
+                       wake_up(&fs_info->async_submit_wait);
+
+               BUG_ON(atomic_read(&cur->bi_cnt) == 0);
+               bio_get(cur);
+               submit_bio(cur->bi_rw, cur);
+               bio_put(cur);
+               num_run++;
+
+               /*
+                * we made progress, there is more work to do and the bdi
+                * is now congested.  Back off and let other work structs
+                * run instead
+                */
+               if (pending && bdi_write_congested(bdi) &&
+                   fs_info->fs_devices->open_devices > 1) {
+                       struct bio *old_head;
+
+                       spin_lock(&device->io_lock);
+
+                       old_head = device->pending_bios;
+                       device->pending_bios = pending;
+                       if (device->pending_bio_tail)
+                               tail->bi_next = old_head;
+                       else
+                               device->pending_bio_tail = tail;
+                       device->running_pending = 0;
+
+                       spin_unlock(&device->io_lock);
+                       btrfs_requeue_work(&device->work);
+                       goto done;
+               }
+       }
+       if (again)
+               goto loop;
+done:
+       return 0;
+}
+
+static void pending_bios_fn(struct btrfs_work *work)
+{
+       struct btrfs_device *device;
+
+       device = container_of(work, struct btrfs_device, work);
+       run_scheduled_bios(device);
+}
+
+static noinline int device_list_add(const char *path,
                           struct btrfs_super_block *disk_super,
                           u64 devid, struct btrfs_fs_devices **fs_devices_ret)
 {
@@ -120,7 +246,7 @@ static int device_list_add(const char *path,
 
        fs_devices = find_fsid(disk_super->fsid);
        if (!fs_devices) {
-               fs_devices = kmalloc(sizeof(*fs_devices), GFP_NOFS);
+               fs_devices = kzalloc(sizeof(*fs_devices), GFP_NOFS);
                if (!fs_devices)
                        return -ENOMEM;
                INIT_LIST_HEAD(&fs_devices->devices);
@@ -129,19 +255,22 @@ static int device_list_add(const char *path,
                memcpy(fs_devices->fsid, disk_super->fsid, BTRFS_FSID_SIZE);
                fs_devices->latest_devid = devid;
                fs_devices->latest_trans = found_transid;
-               fs_devices->num_devices = 0;
                device = NULL;
        } else {
                device = __find_device(&fs_devices->devices, devid,
                                       disk_super->dev_item.uuid);
        }
        if (!device) {
+               if (fs_devices->opened)
+                       return -EBUSY;
+
                device = kzalloc(sizeof(*device), GFP_NOFS);
                if (!device) {
                        /* we can safely leave the fs_devices entry around */
                        return -ENOMEM;
                }
                device->devid = devid;
+               device->work.func = pending_bios_fn;
                memcpy(device->uuid, disk_super->dev_item.uuid,
                       BTRFS_UUID_SIZE);
                device->barriers = 1;
@@ -151,8 +280,9 @@ static int device_list_add(const char *path,
                        kfree(device);
                        return -ENOMEM;
                }
+               INIT_LIST_HEAD(&device->dev_alloc_list);
                list_add(&device->dev_list, &fs_devices->devices);
-               list_add(&device->dev_alloc_list, &fs_devices->alloc_list);
+               device->fs_devices = fs_devices;
                fs_devices->num_devices++;
        }
 
@@ -164,119 +294,207 @@ static int device_list_add(const char *path,
        return 0;
 }
 
-int btrfs_close_extra_devices(struct btrfs_fs_devices *fs_devices)
+static struct btrfs_fs_devices *clone_fs_devices(struct btrfs_fs_devices *orig)
 {
-       struct list_head *head = &fs_devices->devices;
-       struct list_head *cur;
+       struct btrfs_fs_devices *fs_devices;
        struct btrfs_device *device;
+       struct btrfs_device *orig_dev;
+
+       fs_devices = kzalloc(sizeof(*fs_devices), GFP_NOFS);
+       if (!fs_devices)
+               return ERR_PTR(-ENOMEM);
+
+       INIT_LIST_HEAD(&fs_devices->devices);
+       INIT_LIST_HEAD(&fs_devices->alloc_list);
+       INIT_LIST_HEAD(&fs_devices->list);
+       fs_devices->latest_devid = orig->latest_devid;
+       fs_devices->latest_trans = orig->latest_trans;
+       memcpy(fs_devices->fsid, orig->fsid, sizeof(fs_devices->fsid));
+
+       list_for_each_entry(orig_dev, &orig->devices, dev_list) {
+               device = kzalloc(sizeof(*device), GFP_NOFS);
+               if (!device)
+                       goto error;
+
+               device->name = kstrdup(orig_dev->name, GFP_NOFS);
+               if (!device->name)
+                       goto error;
+
+               device->devid = orig_dev->devid;
+               device->work.func = pending_bios_fn;
+               memcpy(device->uuid, orig_dev->uuid, sizeof(device->uuid));
+               device->barriers = 1;
+               spin_lock_init(&device->io_lock);
+               INIT_LIST_HEAD(&device->dev_list);
+               INIT_LIST_HEAD(&device->dev_alloc_list);
+
+               list_add(&device->dev_list, &fs_devices->devices);
+               device->fs_devices = fs_devices;
+               fs_devices->num_devices++;
+       }
+       return fs_devices;
+error:
+       free_fs_devices(fs_devices);
+       return ERR_PTR(-ENOMEM);
+}
+
+int btrfs_close_extra_devices(struct btrfs_fs_devices *fs_devices)
+{
+       struct btrfs_device *device, *next;
 
        mutex_lock(&uuid_mutex);
 again:
-       list_for_each(cur, head) {
-               device = list_entry(cur, struct btrfs_device, dev_list);
-               if (!device->in_fs_metadata) {
-                       if (device->bdev) {
-                               close_bdev_excl(device->bdev);
-                               fs_devices->open_devices--;
-                       }
-                       list_del(&device->dev_list);
-                       list_del(&device->dev_alloc_list);
-                       fs_devices->num_devices--;
-                       kfree(device->name);
-                       kfree(device);
-                       goto again;
+       list_for_each_entry_safe(device, next, &fs_devices->devices, dev_list) {
+               if (device->in_fs_metadata)
+                       continue;
+
+               if (device->bdev) {
+                       close_bdev_exclusive(device->bdev, device->mode);
+                       device->bdev = NULL;
+                       fs_devices->open_devices--;
                }
+               if (device->writeable) {
+                       list_del_init(&device->dev_alloc_list);
+                       device->writeable = 0;
+                       fs_devices->rw_devices--;
+               }
+               list_del_init(&device->dev_list);
+               fs_devices->num_devices--;
+               kfree(device->name);
+               kfree(device);
+       }
+
+       if (fs_devices->seed) {
+               fs_devices = fs_devices->seed;
+               goto again;
        }
+
        mutex_unlock(&uuid_mutex);
        return 0;
 }
 
-int btrfs_close_devices(struct btrfs_fs_devices *fs_devices)
+static int __btrfs_close_devices(struct btrfs_fs_devices *fs_devices)
 {
-       struct list_head *head = &fs_devices->devices;
-       struct list_head *cur;
        struct btrfs_device *device;
 
-       mutex_lock(&uuid_mutex);
-       list_for_each(cur, head) {
-               device = list_entry(cur, struct btrfs_device, dev_list);
+       if (--fs_devices->opened > 0)
+               return 0;
+
+       list_for_each_entry(device, &fs_devices->devices, dev_list) {
                if (device->bdev) {
-                       close_bdev_excl(device->bdev);
+                       close_bdev_exclusive(device->bdev, device->mode);
                        fs_devices->open_devices--;
                }
+               if (device->writeable) {
+                       list_del_init(&device->dev_alloc_list);
+                       fs_devices->rw_devices--;
+               }
+
                device->bdev = NULL;
+               device->writeable = 0;
                device->in_fs_metadata = 0;
        }
-       fs_devices->mounted = 0;
-       mutex_unlock(&uuid_mutex);
+       WARN_ON(fs_devices->open_devices);
+       WARN_ON(fs_devices->rw_devices);
+       fs_devices->opened = 0;
+       fs_devices->seeding = 0;
+
        return 0;
 }
 
-int btrfs_open_devices(struct btrfs_fs_devices *fs_devices,
-                      int flags, void *holder)
+int btrfs_close_devices(struct btrfs_fs_devices *fs_devices)
+{
+       struct btrfs_fs_devices *seed_devices = NULL;
+       int ret;
+
+       mutex_lock(&uuid_mutex);
+       ret = __btrfs_close_devices(fs_devices);
+       if (!fs_devices->opened) {
+               seed_devices = fs_devices->seed;
+               fs_devices->seed = NULL;
+       }
+       mutex_unlock(&uuid_mutex);
+
+       while (seed_devices) {
+               fs_devices = seed_devices;
+               seed_devices = fs_devices->seed;
+               __btrfs_close_devices(fs_devices);
+               free_fs_devices(fs_devices);
+       }
+       return ret;
+}
+
+static int __btrfs_open_devices(struct btrfs_fs_devices *fs_devices,
+                               fmode_t flags, void *holder)
 {
        struct block_device *bdev;
        struct list_head *head = &fs_devices->devices;
-       struct list_head *cur;
        struct btrfs_device *device;
        struct block_device *latest_bdev = NULL;
        struct buffer_head *bh;
        struct btrfs_super_block *disk_super;
        u64 latest_devid = 0;
        u64 latest_transid = 0;
-       u64 transid;
        u64 devid;
+       int seeding = 1;
        int ret = 0;
 
-       mutex_lock(&uuid_mutex);
-       if (fs_devices->mounted)
-               goto out;
-
-       list_for_each(cur, head) {
-               device = list_entry(cur, struct btrfs_device, dev_list);
+       list_for_each_entry(device, head, dev_list) {
                if (device->bdev)
                        continue;
-
                if (!device->name)
                        continue;
 
-               bdev = open_bdev_excl(device->name, flags, holder);
-
+               bdev = open_bdev_exclusive(device->name, flags, holder);
                if (IS_ERR(bdev)) {
-                       printk("open %s failed\n", device->name);
+                       printk(KERN_INFO "open %s failed\n", device->name);
                        goto error;
                }
                set_blocksize(bdev, 4096);
 
-               bh = __bread(bdev, BTRFS_SUPER_INFO_OFFSET / 4096, 4096);
+               bh = btrfs_read_dev_super(bdev);
                if (!bh)
                        goto error_close;
 
                disk_super = (struct btrfs_super_block *)bh->b_data;
-               if (strncmp((char *)(&disk_super->magic), BTRFS_MAGIC,
-                   sizeof(disk_super->magic)))
-                       goto error_brelse;
-
                devid = le64_to_cpu(disk_super->dev_item.devid);
                if (devid != device->devid)
                        goto error_brelse;
 
-               transid = btrfs_super_generation(disk_super);
-               if (transid > latest_transid) {
+               if (memcmp(device->uuid, disk_super->dev_item.uuid,
+                          BTRFS_UUID_SIZE))
+                       goto error_brelse;
+
+               device->generation = btrfs_super_generation(disk_super);
+               if (!latest_transid || device->generation > latest_transid) {
                        latest_devid = devid;
-                       latest_transid = transid;
+                       latest_transid = device->generation;
                        latest_bdev = bdev;
                }
 
+               if (btrfs_super_flags(disk_super) & BTRFS_SUPER_FLAG_SEEDING) {
+                       device->writeable = 0;
+               } else {
+                       device->writeable = !bdev_read_only(bdev);
+                       seeding = 0;
+               }
+
                device->bdev = bdev;
                device->in_fs_metadata = 0;
+               device->mode = flags;
+
                fs_devices->open_devices++;
+               if (device->writeable) {
+                       fs_devices->rw_devices++;
+                       list_add(&device->dev_alloc_list,
+                                &fs_devices->alloc_list);
+               }
                continue;
 
 error_brelse:
                brelse(bh);
 error_close:
-               close_bdev_excl(bdev);
+               close_bdev_exclusive(bdev, FMODE_READ);
 error:
                continue;
        }
@@ -284,16 +502,33 @@ error:
                ret = -EIO;
                goto out;
        }
-       fs_devices->mounted = 1;
+       fs_devices->seeding = seeding;
+       fs_devices->opened = 1;
        fs_devices->latest_bdev = latest_bdev;
        fs_devices->latest_devid = latest_devid;
        fs_devices->latest_trans = latest_transid;
+       fs_devices->total_rw_bytes = 0;
 out:
+       return ret;
+}
+
+int btrfs_open_devices(struct btrfs_fs_devices *fs_devices,
+                      fmode_t flags, void *holder)
+{
+       int ret;
+
+       mutex_lock(&uuid_mutex);
+       if (fs_devices->opened) {
+               fs_devices->opened++;
+               ret = 0;
+       } else {
+               ret = __btrfs_open_devices(fs_devices, flags, holder);
+       }
        mutex_unlock(&uuid_mutex);
        return ret;
 }
 
-int btrfs_scan_one_device(const char *path, int flags, void *holder,
+int btrfs_scan_one_device(const char *path, fmode_t flags, void *holder,
                          struct btrfs_fs_devices **fs_devices_ret)
 {
        struct btrfs_super_block *disk_super;
@@ -305,7 +540,7 @@ int btrfs_scan_one_device(const char *path, int flags, void *holder,
 
        mutex_lock(&uuid_mutex);
 
-       bdev = open_bdev_excl(path, flags, holder);
+       bdev = open_bdev_exclusive(path, flags, holder);
 
        if (IS_ERR(bdev)) {
                ret = PTR_ERR(bdev);
@@ -315,34 +550,29 @@ int btrfs_scan_one_device(const char *path, int flags, void *holder,
        ret = set_blocksize(bdev, 4096);
        if (ret)
                goto error_close;
-       bh = __bread(bdev, BTRFS_SUPER_INFO_OFFSET / 4096, 4096);
+       bh = btrfs_read_dev_super(bdev);
        if (!bh) {
                ret = -EIO;
                goto error_close;
        }
        disk_super = (struct btrfs_super_block *)bh->b_data;
-       if (strncmp((char *)(&disk_super->magic), BTRFS_MAGIC,
-           sizeof(disk_super->magic))) {
-               ret = -EINVAL;
-               goto error_brelse;
-       }
        devid = le64_to_cpu(disk_super->dev_item.devid);
        transid = btrfs_super_generation(disk_super);
        if (disk_super->label[0])
-               printk("device label %s ", disk_super->label);
+               printk(KERN_INFO "device label %s ", disk_super->label);
        else {
                /* FIXME, make a readl uuid parser */
-               printk("device fsid %llx-%llx ",
+               printk(KERN_INFO "device fsid %llx-%llx ",
                       *(unsigned long long *)disk_super->fsid,
                       *(unsigned long long *)(disk_super->fsid + 8));
        }
-       printk("devid %Lu transid %Lu %s\n", devid, transid, path);
+       printk(KERN_CONT "devid %llu transid %llu %s\n",
+              (unsigned long long)devid, (unsigned long long)transid, path);
        ret = device_list_add(path, disk_super, devid, fs_devices_ret);
 
-error_brelse:
        brelse(bh);
 error_close:
-       close_bdev_excl(bdev);
+       close_bdev_exclusive(bdev, flags);
 error:
        mutex_unlock(&uuid_mutex);
        return ret;
@@ -353,14 +583,14 @@ error:
  * called very infrequently and that a given device has a small number
  * of extents
  */
-static int find_free_dev_extent(struct btrfs_trans_handle *trans,
-                               struct btrfs_device *device,
-                               struct btrfs_path *path,
-                               u64 num_bytes, u64 *start)
+static noinline int find_free_dev_extent(struct btrfs_trans_handle *trans,
+                                        struct btrfs_device *device,
+                                        u64 num_bytes, u64 *start)
 {
        struct btrfs_key key;
        struct btrfs_root *root = device->dev_root;
        struct btrfs_dev_extent *dev_extent = NULL;
+       struct btrfs_path *path;
        u64 hole_size = 0;
        u64 last_byte = 0;
        u64 search_start = 0;
@@ -370,8 +600,11 @@ static int find_free_dev_extent(struct btrfs_trans_handle *trans,
        int start_found;
        struct extent_buffer *l;
 
-       start_found = 0;
+       path = btrfs_alloc_path();
+       if (!path)
+               return -ENOMEM;
        path->reada = 2;
+       start_found = 0;
 
        /* FIXME use last free of some kind */
 
@@ -440,9 +673,8 @@ no_more_items:
                                goto check_pending;
                        }
                }
-               if (btrfs_key_type(&key) != BTRFS_DEV_EXTENT_KEY) {
+               if (btrfs_key_type(&key) != BTRFS_DEV_EXTENT_KEY)
                        goto next;
-               }
 
                start_found = 1;
                dev_extent = btrfs_item_ptr(l, slot, struct btrfs_dev_extent);
@@ -455,7 +687,6 @@ check_pending:
        /* we have to make sure we didn't find an extent that has already
         * been allocated by the map tree or the original allocation
         */
-       btrfs_release_path(root, path);
        BUG_ON(*start < search_start);
 
        if (*start + num_bytes > search_end) {
@@ -463,14 +694,14 @@ check_pending:
                goto error;
        }
        /* check for pending inserts here */
-       return 0;
+       ret = 0;
 
 error:
-       btrfs_release_path(root, path);
+       btrfs_free_path(path);
        return ret;
 }
 
-int btrfs_free_dev_extent(struct btrfs_trans_handle *trans,
+static int btrfs_free_dev_extent(struct btrfs_trans_handle *trans,
                          struct btrfs_device *device,
                          u64 start)
 {
@@ -521,8 +752,7 @@ int btrfs_free_dev_extent(struct btrfs_trans_handle *trans,
 int btrfs_alloc_dev_extent(struct btrfs_trans_handle *trans,
                           struct btrfs_device *device,
                           u64 chunk_tree, u64 chunk_objectid,
-                          u64 chunk_offset,
-                          u64 num_bytes, u64 *start)
+                          u64 chunk_offset, u64 start, u64 num_bytes)
 {
        int ret;
        struct btrfs_path *path;
@@ -536,13 +766,8 @@ int btrfs_alloc_dev_extent(struct btrfs_trans_handle *trans,
        if (!path)
                return -ENOMEM;
 
-       ret = find_free_dev_extent(trans, device, path, num_bytes, start);
-       if (ret) {
-               goto err;
-       }
-
        key.objectid = device->devid;
-       key.offset = *start;
+       key.offset = start;
        key.type = BTRFS_DEV_EXTENT_KEY;
        ret = btrfs_insert_empty_item(trans, root, path, &key,
                                      sizeof(*extent));
@@ -561,12 +786,12 @@ int btrfs_alloc_dev_extent(struct btrfs_trans_handle *trans,
 
        btrfs_set_dev_extent_length(leaf, extent, num_bytes);
        btrfs_mark_buffer_dirty(leaf);
-err:
        btrfs_free_path(path);
        return ret;
 }
 
-static int find_next_chunk(struct btrfs_root *root, u64 objectid, u64 *offset)
+static noinline int find_next_chunk(struct btrfs_root *root,
+                                   u64 objectid, u64 *offset)
 {
        struct btrfs_path *path;
        int ret;
@@ -608,12 +833,18 @@ error:
        return ret;
 }
 
-static int find_next_devid(struct btrfs_root *root, struct btrfs_path *path,
-                          u64 *objectid)
+static noinline int find_next_devid(struct btrfs_root *root, u64 *objectid)
 {
        int ret;
        struct btrfs_key key;
        struct btrfs_key found_key;
+       struct btrfs_path *path;
+
+       root = root->fs_info->chunk_root;
+
+       path = btrfs_alloc_path();
+       if (!path)
+               return -ENOMEM;
 
        key.objectid = BTRFS_DEV_ITEMS_OBJECTID;
        key.type = BTRFS_DEV_ITEM_KEY;
@@ -636,7 +867,7 @@ static int find_next_devid(struct btrfs_root *root, struct btrfs_path *path,
        }
        ret = 0;
 error:
-       btrfs_release_path(root, path);
+       btrfs_free_path(path);
        return ret;
 }
 
@@ -654,7 +885,6 @@ int btrfs_add_device(struct btrfs_trans_handle *trans,
        struct extent_buffer *leaf;
        struct btrfs_key key;
        unsigned long ptr;
-       u64 free_devid = 0;
 
        root = root->fs_info->chunk_root;
 
@@ -662,13 +892,9 @@ int btrfs_add_device(struct btrfs_trans_handle *trans,
        if (!path)
                return -ENOMEM;
 
-       ret = find_next_devid(root, path, &free_devid);
-       if (ret)
-               goto out;
-
        key.objectid = BTRFS_DEV_ITEMS_OBJECTID;
        key.type = BTRFS_DEV_ITEM_KEY;
-       key.offset = free_devid;
+       key.offset = device->devid;
 
        ret = btrfs_insert_empty_item(trans, root, path, &key,
                                      sizeof(*dev_item));
@@ -678,8 +904,8 @@ int btrfs_add_device(struct btrfs_trans_handle *trans,
        leaf = path->nodes[0];
        dev_item = btrfs_item_ptr(leaf, path->slots[0], struct btrfs_dev_item);
 
-       device->devid = free_devid;
        btrfs_set_device_id(leaf, dev_item, device->devid);
+       btrfs_set_device_generation(leaf, dev_item, 0);
        btrfs_set_device_type(leaf, dev_item, device->type);
        btrfs_set_device_io_align(leaf, dev_item, device->io_align);
        btrfs_set_device_io_width(leaf, dev_item, device->io_width);
@@ -689,12 +915,15 @@ int btrfs_add_device(struct btrfs_trans_handle *trans,
        btrfs_set_device_group(leaf, dev_item, 0);
        btrfs_set_device_seek_speed(leaf, dev_item, 0);
        btrfs_set_device_bandwidth(leaf, dev_item, 0);
+       btrfs_set_device_start_offset(leaf, dev_item, 0);
 
        ptr = (unsigned long)btrfs_device_uuid(dev_item);
        write_extent_buffer(leaf, device->uuid, ptr, BTRFS_UUID_SIZE);
+       ptr = (unsigned long)btrfs_device_fsid(dev_item);
+       write_extent_buffer(leaf, root->fs_info->fsid, ptr, BTRFS_UUID_SIZE);
        btrfs_mark_buffer_dirty(leaf);
-       ret = 0;
 
+       ret = 0;
 out:
        btrfs_free_path(path);
        return ret;
@@ -705,11 +934,7 @@ static int btrfs_rm_dev_item(struct btrfs_root *root,
 {
        int ret;
        struct btrfs_path *path;
-       struct block_device *bdev = device->bdev;
-       struct btrfs_device *next_dev;
        struct btrfs_key key;
-       u64 total_bytes;
-       struct btrfs_fs_devices *fs_devices;
        struct btrfs_trans_handle *trans;
 
        root = root->fs_info->chunk_root;
@@ -722,6 +947,7 @@ static int btrfs_rm_dev_item(struct btrfs_root *root,
        key.objectid = BTRFS_DEV_ITEMS_OBJECTID;
        key.type = BTRFS_DEV_ITEM_KEY;
        key.offset = device->devid;
+       lock_chunks(root);
 
        ret = btrfs_search_slot(trans, root, &key, path, -1, 1);
        if (ret < 0)
@@ -735,31 +961,9 @@ static int btrfs_rm_dev_item(struct btrfs_root *root,
        ret = btrfs_del_item(trans, root, path);
        if (ret)
                goto out;
-
-       /*
-        * at this point, the device is zero sized.  We want to
-        * remove it from the devices list and zero out the old super
-        */
-       list_del_init(&device->dev_list);
-       list_del_init(&device->dev_alloc_list);
-       fs_devices = root->fs_info->fs_devices;
-
-       next_dev = list_entry(fs_devices->devices.next, struct btrfs_device,
-                             dev_list);
-       if (bdev == root->fs_info->sb->s_bdev)
-               root->fs_info->sb->s_bdev = next_dev->bdev;
-       if (bdev == fs_devices->latest_bdev)
-               fs_devices->latest_bdev = next_dev->bdev;
-
-       total_bytes = btrfs_super_total_bytes(&root->fs_info->super_copy);
-       btrfs_set_super_total_bytes(&root->fs_info->super_copy,
-                                   total_bytes - device->total_bytes);
-
-       total_bytes = btrfs_super_num_devices(&root->fs_info->super_copy);
-       btrfs_set_super_num_devices(&root->fs_info->super_copy,
-                                   total_bytes - 1);
 out:
        btrfs_free_path(path);
+       unlock_chunks(root);
        btrfs_commit_transaction(trans, root);
        return ret;
 }
@@ -767,43 +971,46 @@ out:
 int btrfs_rm_device(struct btrfs_root *root, char *device_path)
 {
        struct btrfs_device *device;
+       struct btrfs_device *next_device;
        struct block_device *bdev;
        struct buffer_head *bh = NULL;
        struct btrfs_super_block *disk_super;
        u64 all_avail;
        u64 devid;
+       u64 num_devices;
+       u8 *dev_uuid;
        int ret = 0;
 
-       mutex_lock(&root->fs_info->fs_mutex);
        mutex_lock(&uuid_mutex);
+       mutex_lock(&root->fs_info->volume_mutex);
 
        all_avail = root->fs_info->avail_data_alloc_bits |
                root->fs_info->avail_system_alloc_bits |
                root->fs_info->avail_metadata_alloc_bits;
 
        if ((all_avail & BTRFS_BLOCK_GROUP_RAID10) &&
-           btrfs_super_num_devices(&root->fs_info->super_copy) <= 4) {
-               printk("btrfs: unable to go below four devices on raid10\n");
+           root->fs_info->fs_devices->rw_devices <= 4) {
+               printk(KERN_ERR "btrfs: unable to go below four devices "
+                      "on raid10\n");
                ret = -EINVAL;
                goto out;
        }
 
        if ((all_avail & BTRFS_BLOCK_GROUP_RAID1) &&
-           btrfs_super_num_devices(&root->fs_info->super_copy) <= 2) {
-               printk("btrfs: unable to go below two devices on raid1\n");
+           root->fs_info->fs_devices->rw_devices <= 2) {
+               printk(KERN_ERR "btrfs: unable to go below two "
+                      "devices on raid1\n");
                ret = -EINVAL;
                goto out;
        }
 
        if (strcmp(device_path, "missing") == 0) {
-               struct list_head *cur;
                struct list_head *devices;
                struct btrfs_device *tmp;
 
                device = NULL;
                devices = &root->fs_info->fs_devices->devices;
-               list_for_each(cur, devices) {
-                       tmp = list_entry(cur, struct btrfs_device, dev_list);
+               list_for_each_entry(tmp, devices, dev_list) {
                        if (tmp->in_fs_metadata && !tmp->bdev) {
                                device = tmp;
                                break;
@@ -813,141 +1020,335 @@ int btrfs_rm_device(struct btrfs_root *root, char *device_path)
                bh = NULL;
                disk_super = NULL;
                if (!device) {
-                       printk("btrfs: no missing devices found to remove\n");
+                       printk(KERN_ERR "btrfs: no missing devices found to "
+                              "remove\n");
                        goto out;
                }
-
        } else {
-               bdev = open_bdev_excl(device_path, 0,
+               bdev = open_bdev_exclusive(device_path, FMODE_READ,
                                      root->fs_info->bdev_holder);
                if (IS_ERR(bdev)) {
                        ret = PTR_ERR(bdev);
                        goto out;
                }
 
-               bh = __bread(bdev, BTRFS_SUPER_INFO_OFFSET / 4096, 4096);
+               set_blocksize(bdev, 4096);
+               bh = btrfs_read_dev_super(bdev);
                if (!bh) {
                        ret = -EIO;
                        goto error_close;
                }
                disk_super = (struct btrfs_super_block *)bh->b_data;
-               if (strncmp((char *)(&disk_super->magic), BTRFS_MAGIC,
-                   sizeof(disk_super->magic))) {
-                       ret = -ENOENT;
-                       goto error_brelse;
-               }
-               if (memcmp(disk_super->fsid, root->fs_info->fsid,
-                          BTRFS_FSID_SIZE)) {
-                       ret = -ENOENT;
-                       goto error_brelse;
-               }
                devid = le64_to_cpu(disk_super->dev_item.devid);
-               device = btrfs_find_device(root, devid, NULL);
+               dev_uuid = disk_super->dev_item.uuid;
+               device = btrfs_find_device(root, devid, dev_uuid,
+                                          disk_super->fsid);
                if (!device) {
                        ret = -ENOENT;
                        goto error_brelse;
                }
+       }
 
+       if (device->writeable && root->fs_info->fs_devices->rw_devices == 1) {
+               printk(KERN_ERR "btrfs: unable to remove the only writeable "
+                      "device\n");
+               ret = -EINVAL;
+               goto error_brelse;
+       }
+
+       if (device->writeable) {
+               list_del_init(&device->dev_alloc_list);
+               root->fs_info->fs_devices->rw_devices--;
        }
-       root->fs_info->fs_devices->num_devices--;
 
        ret = btrfs_shrink_device(device, 0);
        if (ret)
                goto error_brelse;
 
-
        ret = btrfs_rm_dev_item(root->fs_info->chunk_root, device);
        if (ret)
                goto error_brelse;
 
-       if (bh) {
+       device->in_fs_metadata = 0;
+       list_del_init(&device->dev_list);
+       device->fs_devices->num_devices--;
+
+       next_device = list_entry(root->fs_info->fs_devices->devices.next,
+                                struct btrfs_device, dev_list);
+       if (device->bdev == root->fs_info->sb->s_bdev)
+               root->fs_info->sb->s_bdev = next_device->bdev;
+       if (device->bdev == root->fs_info->fs_devices->latest_bdev)
+               root->fs_info->fs_devices->latest_bdev = next_device->bdev;
+
+       if (device->bdev) {
+               close_bdev_exclusive(device->bdev, device->mode);
+               device->bdev = NULL;
+               device->fs_devices->open_devices--;
+       }
+
+       num_devices = btrfs_super_num_devices(&root->fs_info->super_copy) - 1;
+       btrfs_set_super_num_devices(&root->fs_info->super_copy, num_devices);
+
+       if (device->fs_devices->open_devices == 0) {
+               struct btrfs_fs_devices *fs_devices;
+               fs_devices = root->fs_info->fs_devices;
+               while (fs_devices) {
+                       if (fs_devices->seed == device->fs_devices)
+                               break;
+                       fs_devices = fs_devices->seed;
+               }
+               fs_devices->seed = device->fs_devices->seed;
+               device->fs_devices->seed = NULL;
+               __btrfs_close_devices(device->fs_devices);
+               free_fs_devices(device->fs_devices);
+       }
+
+       /*
+        * at this point, the device is zero sized.  We want to
+        * remove it from the devices list and zero out the old super
+        */
+       if (device->writeable) {
                /* make sure this device isn't detected as part of
                 * the FS anymore
                 */
                memset(&disk_super->magic, 0, sizeof(disk_super->magic));
                set_buffer_dirty(bh);
                sync_dirty_buffer(bh);
-
-               brelse(bh);
        }
 
-       if (device->bdev) {
-               /* one close for the device struct or super_block */
-               close_bdev_excl(device->bdev);
-               root->fs_info->fs_devices->open_devices--;
-       }
-       if (bdev) {
-               /* one close for us */
-               close_bdev_excl(bdev);
-       }
        kfree(device->name);
        kfree(device);
        ret = 0;
-       goto out;
 
 error_brelse:
        brelse(bh);
 error_close:
        if (bdev)
-               close_bdev_excl(bdev);
+               close_bdev_exclusive(bdev, FMODE_READ);
 out:
+       mutex_unlock(&root->fs_info->volume_mutex);
        mutex_unlock(&uuid_mutex);
-       mutex_unlock(&root->fs_info->fs_mutex);
        return ret;
 }
 
-int btrfs_init_new_device(struct btrfs_root *root, char *device_path)
+/*
+ * does all the dirty work required for changing file system's UUID.
+ */
+static int btrfs_prepare_sprout(struct btrfs_trans_handle *trans,
+                               struct btrfs_root *root)
 {
-       struct btrfs_trans_handle *trans;
+       struct btrfs_fs_devices *fs_devices = root->fs_info->fs_devices;
+       struct btrfs_fs_devices *old_devices;
+       struct btrfs_fs_devices *seed_devices;
+       struct btrfs_super_block *disk_super = &root->fs_info->super_copy;
        struct btrfs_device *device;
-       struct block_device *bdev;
-       struct list_head *cur;
-       struct list_head *devices;
-       u64 total_bytes;
-       int ret = 0;
+       u64 super_flags;
 
+       BUG_ON(!mutex_is_locked(&uuid_mutex));
+       if (!fs_devices->seeding)
+               return -EINVAL;
 
-       bdev = open_bdev_excl(device_path, 0, root->fs_info->bdev_holder);
-       if (!bdev) {
-               return -EIO;
-       }
-       mutex_lock(&root->fs_info->fs_mutex);
-       trans = btrfs_start_transaction(root, 1);
-       devices = &root->fs_info->fs_devices->devices;
-       list_for_each(cur, devices) {
-               device = list_entry(cur, struct btrfs_device, dev_list);
-               if (device->bdev == bdev) {
-                       ret = -EEXIST;
-                       goto out;
-               }
-       }
+       seed_devices = kzalloc(sizeof(*fs_devices), GFP_NOFS);
+       if (!seed_devices)
+               return -ENOMEM;
 
-       device = kzalloc(sizeof(*device), GFP_NOFS);
-       if (!device) {
-               /* we can safely leave the fs_devices entry around */
-               ret = -ENOMEM;
-               goto out_close_bdev;
+       old_devices = clone_fs_devices(fs_devices);
+       if (IS_ERR(old_devices)) {
+               kfree(seed_devices);
+               return PTR_ERR(old_devices);
        }
 
-       device->barriers = 1;
-       generate_random_uuid(device->uuid);
-       spin_lock_init(&device->io_lock);
-       device->name = kstrdup(device_path, GFP_NOFS);
-       if (!device->name) {
-               kfree(device);
-               goto out_close_bdev;
+       list_add(&old_devices->list, &fs_uuids);
+
+       memcpy(seed_devices, fs_devices, sizeof(*seed_devices));
+       seed_devices->opened = 1;
+       INIT_LIST_HEAD(&seed_devices->devices);
+       INIT_LIST_HEAD(&seed_devices->alloc_list);
+       list_splice_init(&fs_devices->devices, &seed_devices->devices);
+       list_splice_init(&fs_devices->alloc_list, &seed_devices->alloc_list);
+       list_for_each_entry(device, &seed_devices->devices, dev_list) {
+               device->fs_devices = seed_devices;
        }
-       device->io_width = root->sectorsize;
-       device->io_align = root->sectorsize;
-       device->sector_size = root->sectorsize;
-       device->total_bytes = i_size_read(bdev->bd_inode);
-       device->dev_root = root->fs_info->dev_root;
-       device->bdev = bdev;
+
+       fs_devices->seeding = 0;
+       fs_devices->num_devices = 0;
+       fs_devices->open_devices = 0;
+       fs_devices->seed = seed_devices;
+
+       generate_random_uuid(fs_devices->fsid);
+       memcpy(root->fs_info->fsid, fs_devices->fsid, BTRFS_FSID_SIZE);
+       memcpy(disk_super->fsid, fs_devices->fsid, BTRFS_FSID_SIZE);
+       super_flags = btrfs_super_flags(disk_super) &
+                     ~BTRFS_SUPER_FLAG_SEEDING;
+       btrfs_set_super_flags(disk_super, super_flags);
+
+       return 0;
+}
+
+/*
+ * strore the expected generation for seed devices in device items.
+ */
+static int btrfs_finish_sprout(struct btrfs_trans_handle *trans,
+                              struct btrfs_root *root)
+{
+       struct btrfs_path *path;
+       struct extent_buffer *leaf;
+       struct btrfs_dev_item *dev_item;
+       struct btrfs_device *device;
+       struct btrfs_key key;
+       u8 fs_uuid[BTRFS_UUID_SIZE];
+       u8 dev_uuid[BTRFS_UUID_SIZE];
+       u64 devid;
+       int ret;
+
+       path = btrfs_alloc_path();
+       if (!path)
+               return -ENOMEM;
+
+       root = root->fs_info->chunk_root;
+       key.objectid = BTRFS_DEV_ITEMS_OBJECTID;
+       key.offset = 0;
+       key.type = BTRFS_DEV_ITEM_KEY;
+
+       while (1) {
+               ret = btrfs_search_slot(trans, root, &key, path, 0, 1);
+               if (ret < 0)
+                       goto error;
+
+               leaf = path->nodes[0];
+next_slot:
+               if (path->slots[0] >= btrfs_header_nritems(leaf)) {
+                       ret = btrfs_next_leaf(root, path);
+                       if (ret > 0)
+                               break;
+                       if (ret < 0)
+                               goto error;
+                       leaf = path->nodes[0];
+                       btrfs_item_key_to_cpu(leaf, &key, path->slots[0]);
+                       btrfs_release_path(root, path);
+                       continue;
+               }
+
+               btrfs_item_key_to_cpu(leaf, &key, path->slots[0]);
+               if (key.objectid != BTRFS_DEV_ITEMS_OBJECTID ||
+                   key.type != BTRFS_DEV_ITEM_KEY)
+                       break;
+
+               dev_item = btrfs_item_ptr(leaf, path->slots[0],
+                                         struct btrfs_dev_item);
+               devid = btrfs_device_id(leaf, dev_item);
+               read_extent_buffer(leaf, dev_uuid,
+                                  (unsigned long)btrfs_device_uuid(dev_item),
+                                  BTRFS_UUID_SIZE);
+               read_extent_buffer(leaf, fs_uuid,
+                                  (unsigned long)btrfs_device_fsid(dev_item),
+                                  BTRFS_UUID_SIZE);
+               device = btrfs_find_device(root, devid, dev_uuid, fs_uuid);
+               BUG_ON(!device);
+
+               if (device->fs_devices->seeding) {
+                       btrfs_set_device_generation(leaf, dev_item,
+                                                   device->generation);
+                       btrfs_mark_buffer_dirty(leaf);
+               }
+
+               path->slots[0]++;
+               goto next_slot;
+       }
+       ret = 0;
+error:
+       btrfs_free_path(path);
+       return ret;
+}
+
+int btrfs_init_new_device(struct btrfs_root *root, char *device_path)
+{
+       struct btrfs_trans_handle *trans;
+       struct btrfs_device *device;
+       struct block_device *bdev;
+       struct list_head *devices;
+       struct super_block *sb = root->fs_info->sb;
+       u64 total_bytes;
+       int seeding_dev = 0;
+       int ret = 0;
+
+       if ((sb->s_flags & MS_RDONLY) && !root->fs_info->fs_devices->seeding)
+               return -EINVAL;
+
+       bdev = open_bdev_exclusive(device_path, 0, root->fs_info->bdev_holder);
+       if (!bdev)
+               return -EIO;
+
+       if (root->fs_info->fs_devices->seeding) {
+               seeding_dev = 1;
+               down_write(&sb->s_umount);
+               mutex_lock(&uuid_mutex);
+       }
+
+       filemap_write_and_wait(bdev->bd_inode->i_mapping);
+       mutex_lock(&root->fs_info->volume_mutex);
+
+       devices = &root->fs_info->fs_devices->devices;
+       list_for_each_entry(device, devices, dev_list) {
+               if (device->bdev == bdev) {
+                       ret = -EEXIST;
+                       goto error;
+               }
+       }
+
+       device = kzalloc(sizeof(*device), GFP_NOFS);
+       if (!device) {
+               /* we can safely leave the fs_devices entry around */
+               ret = -ENOMEM;
+               goto error;
+       }
+
+       device->name = kstrdup(device_path, GFP_NOFS);
+       if (!device->name) {
+               kfree(device);
+               ret = -ENOMEM;
+               goto error;
+       }
+
+       ret = find_next_devid(root, &device->devid);
+       if (ret) {
+               kfree(device);
+               goto error;
+       }
+
+       trans = btrfs_start_transaction(root, 1);
+       lock_chunks(root);
+
+       device->barriers = 1;
+       device->writeable = 1;
+       device->work.func = pending_bios_fn;
+       generate_random_uuid(device->uuid);
+       spin_lock_init(&device->io_lock);
+       device->generation = trans->transid;
+       device->io_width = root->sectorsize;
+       device->io_align = root->sectorsize;
+       device->sector_size = root->sectorsize;
+       device->total_bytes = i_size_read(bdev->bd_inode);
+       device->dev_root = root->fs_info->dev_root;
+       device->bdev = bdev;
        device->in_fs_metadata = 1;
+       device->mode = 0;
+       set_blocksize(device->bdev, 4096);
 
-       ret = btrfs_add_device(trans, root, device);
-       if (ret)
-               goto out_close_bdev;
+       if (seeding_dev) {
+               sb->s_flags &= ~MS_RDONLY;
+               ret = btrfs_prepare_sprout(trans, root);
+               BUG_ON(ret);
+       }
+
+       device->fs_devices = root->fs_info->fs_devices;
+       list_add(&device->dev_list, &root->fs_info->fs_devices->devices);
+       list_add(&device->dev_alloc_list,
+                &root->fs_info->fs_devices->alloc_list);
+       root->fs_info->fs_devices->num_devices++;
+       root->fs_info->fs_devices->open_devices++;
+       root->fs_info->fs_devices->rw_devices++;
+       root->fs_info->fs_devices->total_rw_bytes += device->total_bytes;
 
        total_bytes = btrfs_super_total_bytes(&root->fs_info->super_copy);
        btrfs_set_super_total_bytes(&root->fs_info->super_copy,
@@ -957,23 +1358,39 @@ int btrfs_init_new_device(struct btrfs_root *root, char *device_path)
        btrfs_set_super_num_devices(&root->fs_info->super_copy,
                                    total_bytes + 1);
 
-       list_add(&device->dev_list, &root->fs_info->fs_devices->devices);
-       list_add(&device->dev_alloc_list,
-                &root->fs_info->fs_devices->alloc_list);
-       root->fs_info->fs_devices->num_devices++;
-       root->fs_info->fs_devices->open_devices++;
+       if (seeding_dev) {
+               ret = init_first_rw_device(trans, root, device);
+               BUG_ON(ret);
+               ret = btrfs_finish_sprout(trans, root);
+               BUG_ON(ret);
+       } else {
+               ret = btrfs_add_device(trans, root, device);
+       }
+
+       unlock_chunks(root);
+       btrfs_commit_transaction(trans, root);
+
+       if (seeding_dev) {
+               mutex_unlock(&uuid_mutex);
+               up_write(&sb->s_umount);
+
+               ret = btrfs_relocate_sys_chunks(root);
+               BUG_ON(ret);
+       }
 out:
-       btrfs_end_transaction(trans, root);
-       mutex_unlock(&root->fs_info->fs_mutex);
+       mutex_unlock(&root->fs_info->volume_mutex);
        return ret;
-
-out_close_bdev:
-       close_bdev_excl(bdev);
+error:
+       close_bdev_exclusive(bdev, 0);
+       if (seeding_dev) {
+               mutex_unlock(&uuid_mutex);
+               up_write(&sb->s_umount);
+       }
        goto out;
 }
 
-int btrfs_update_device(struct btrfs_trans_handle *trans,
-                       struct btrfs_device *device)
+static noinline int btrfs_update_device(struct btrfs_trans_handle *trans,
+                                       struct btrfs_device *device)
 {
        int ret;
        struct btrfs_path *path;
@@ -1018,7 +1435,7 @@ out:
        return ret;
 }
 
-int btrfs_grow_device(struct btrfs_trans_handle *trans,
+static int __btrfs_grow_device(struct btrfs_trans_handle *trans,
                      struct btrfs_device *device, u64 new_size)
 {
        struct btrfs_super_block *super_copy =
@@ -1026,10 +1443,28 @@ int btrfs_grow_device(struct btrfs_trans_handle *trans,
        u64 old_total = btrfs_super_total_bytes(super_copy);
        u64 diff = new_size - device->total_bytes;
 
+       if (!device->writeable)
+               return -EACCES;
+       if (new_size <= device->total_bytes)
+               return -EINVAL;
+
        btrfs_set_super_total_bytes(super_copy, old_total + diff);
+       device->fs_devices->total_rw_bytes += diff;
+
+       device->total_bytes = new_size;
        return btrfs_update_device(trans, device);
 }
 
+int btrfs_grow_device(struct btrfs_trans_handle *trans,
+                     struct btrfs_device *device, u64 new_size)
+{
+       int ret;
+       lock_chunks(device->dev_root);
+       ret = __btrfs_grow_device(trans, device, new_size);
+       unlock_chunks(device->dev_root);
+       return ret;
+}
+
 static int btrfs_free_chunk(struct btrfs_trans_handle *trans,
                            struct btrfs_root *root,
                            u64 chunk_tree, u64 chunk_objectid,
@@ -1058,7 +1493,7 @@ static int btrfs_free_chunk(struct btrfs_trans_handle *trans,
        return 0;
 }
 
-int btrfs_del_sys_chunk(struct btrfs_root *root, u64 chunk_objectid, u64
+static int btrfs_del_sys_chunk(struct btrfs_root *root, u64 chunk_objectid, u64
                        chunk_offset)
 {
        struct btrfs_super_block *super_copy = &root->fs_info->super_copy;
@@ -1104,8 +1539,7 @@ int btrfs_del_sys_chunk(struct btrfs_root *root, u64 chunk_objectid, u64
        return ret;
 }
 
-
-int btrfs_relocate_chunk(struct btrfs_root *root,
+static int btrfs_relocate_chunk(struct btrfs_root *root,
                         u64 chunk_tree, u64 chunk_objectid,
                         u64 chunk_offset)
 {
@@ -1117,19 +1551,21 @@ int btrfs_relocate_chunk(struct btrfs_root *root,
        int ret;
        int i;
 
-       printk("btrfs relocating chunk %llu\n",
+       printk(KERN_INFO "btrfs relocating chunk %llu\n",
               (unsigned long long)chunk_offset);
        root = root->fs_info->chunk_root;
        extent_root = root->fs_info->extent_root;
        em_tree = &root->fs_info->mapping_tree.map_tree;
 
        /* step one, relocate all the extents inside this chunk */
-       ret = btrfs_shrink_extent_tree(extent_root, chunk_offset);
+       ret = btrfs_relocate_block_group(extent_root, chunk_offset);
        BUG_ON(ret);
 
        trans = btrfs_start_transaction(root, 1);
        BUG_ON(!trans);
 
+       lock_chunks(root);
+
        /*
         * step two, delete the device extents and the
         * chunk tree entries
@@ -1162,22 +1598,84 @@ int btrfs_relocate_chunk(struct btrfs_root *root,
                BUG_ON(ret);
        }
 
+       ret = btrfs_remove_block_group(trans, extent_root, chunk_offset);
+       BUG_ON(ret);
+
        spin_lock(&em_tree->lock);
        remove_extent_mapping(em_tree, em);
+       spin_unlock(&em_tree->lock);
+
        kfree(map);
        em->bdev = NULL;
 
        /* once for the tree */
        free_extent_map(em);
-       spin_unlock(&em_tree->lock);
-
        /* once for us */
        free_extent_map(em);
 
+       unlock_chunks(root);
        btrfs_end_transaction(trans, root);
        return 0;
 }
 
+static int btrfs_relocate_sys_chunks(struct btrfs_root *root)
+{
+       struct btrfs_root *chunk_root = root->fs_info->chunk_root;
+       struct btrfs_path *path;
+       struct extent_buffer *leaf;
+       struct btrfs_chunk *chunk;
+       struct btrfs_key key;
+       struct btrfs_key found_key;
+       u64 chunk_tree = chunk_root->root_key.objectid;
+       u64 chunk_type;
+       int ret;
+
+       path = btrfs_alloc_path();
+       if (!path)
+               return -ENOMEM;
+
+       key.objectid = BTRFS_FIRST_CHUNK_TREE_OBJECTID;
+       key.offset = (u64)-1;
+       key.type = BTRFS_CHUNK_ITEM_KEY;
+
+       while (1) {
+               ret = btrfs_search_slot(NULL, chunk_root, &key, path, 0, 0);
+               if (ret < 0)
+                       goto error;
+               BUG_ON(ret == 0);
+
+               ret = btrfs_previous_item(chunk_root, path, key.objectid,
+                                         key.type);
+               if (ret < 0)
+                       goto error;
+               if (ret > 0)
+                       break;
+
+               leaf = path->nodes[0];
+               btrfs_item_key_to_cpu(leaf, &found_key, path->slots[0]);
+
+               chunk = btrfs_item_ptr(leaf, path->slots[0],
+                                      struct btrfs_chunk);
+               chunk_type = btrfs_chunk_type(leaf, chunk);
+               btrfs_release_path(chunk_root, path);
+
+               if (chunk_type & BTRFS_BLOCK_GROUP_SYSTEM) {
+                       ret = btrfs_relocate_chunk(chunk_root, chunk_tree,
+                                                  found_key.objectid,
+                                                  found_key.offset);
+                       BUG_ON(ret);
+               }
+
+               if (found_key.offset == 0)
+                       break;
+               key.offset = found_key.offset - 1;
+       }
+       ret = 0;
+error:
+       btrfs_free_path(path);
+       return ret;
+}
+
 static u64 div_factor(u64 num, int factor)
 {
        if (factor == 10)
@@ -1187,11 +1685,9 @@ static u64 div_factor(u64 num, int factor)
        return num;
 }
 
-
 int btrfs_balance(struct btrfs_root *dev_root)
 {
        int ret;
-       struct list_head *cur;
        struct list_head *devices = &dev_root->fs_info->fs_devices->devices;
        struct btrfs_device *device;
        u64 old_size;
@@ -1203,17 +1699,19 @@ int btrfs_balance(struct btrfs_root *dev_root)
        struct btrfs_trans_handle *trans;
        struct btrfs_key found_key;
 
+       if (dev_root->fs_info->sb->s_flags & MS_RDONLY)
+               return -EROFS;
 
+       mutex_lock(&dev_root->fs_info->volume_mutex);
        dev_root = dev_root->fs_info->dev_root;
 
-       mutex_lock(&dev_root->fs_info->fs_mutex);
        /* step one make some room on all the devices */
-       list_for_each(cur, devices) {
-               device = list_entry(cur, struct btrfs_device, dev_list);
+       list_for_each_entry(device, devices, dev_list) {
                old_size = device->total_bytes;
                size_to_free = div_factor(old_size, 1);
                size_to_free = min(size_to_free, (u64)1 * 1024 * 1024);
-               if (device->total_bytes - device->bytes_used > size_to_free)
+               if (!device->writeable ||
+                   device->total_bytes - device->bytes_used > size_to_free)
                        continue;
 
                ret = btrfs_shrink_device(device, old_size - size_to_free);
@@ -1236,7 +1734,7 @@ int btrfs_balance(struct btrfs_root *dev_root)
        key.offset = (u64)-1;
        key.type = BTRFS_CHUNK_ITEM_KEY;
 
-       while(1) {
+       while (1) {
                ret = btrfs_search_slot(NULL, chunk_root, &key, path, 0, 0);
                if (ret < 0)
                        goto error;
@@ -1250,13 +1748,14 @@ int btrfs_balance(struct btrfs_root *dev_root)
 
                ret = btrfs_previous_item(chunk_root, path, 0,
                                          BTRFS_CHUNK_ITEM_KEY);
-               if (ret) {
+               if (ret)
                        break;
-               }
+
                btrfs_item_key_to_cpu(path->nodes[0], &found_key,
                                      path->slots[0]);
                if (found_key.objectid != key.objectid)
                        break;
+
                chunk = btrfs_item_ptr(path->nodes[0],
                                       path->slots[0],
                                       struct btrfs_chunk);
@@ -1265,17 +1764,17 @@ int btrfs_balance(struct btrfs_root *dev_root)
                if (key.offset == 0)
                        break;
 
+               btrfs_release_path(chunk_root, path);
                ret = btrfs_relocate_chunk(chunk_root,
                                           chunk_root->root_key.objectid,
                                           found_key.objectid,
                                           found_key.offset);
                BUG_ON(ret);
-               btrfs_release_path(chunk_root, path);
        }
        ret = 0;
 error:
        btrfs_free_path(path);
-       mutex_unlock(&dev_root->fs_info->fs_mutex);
+       mutex_unlock(&dev_root->fs_info->volume_mutex);
        return ret;
 }
 
@@ -1302,6 +1801,8 @@ int btrfs_shrink_device(struct btrfs_device *device, u64 new_size)
        u64 old_total = btrfs_super_total_bytes(super_copy);
        u64 diff = device->total_bytes - new_size;
 
+       if (new_size >= device->total_bytes)
+               return -EINVAL;
 
        path = btrfs_alloc_path();
        if (!path)
@@ -1315,14 +1816,20 @@ int btrfs_shrink_device(struct btrfs_device *device, u64 new_size)
 
        path->reada = 2;
 
+       lock_chunks(root);
+
        device->total_bytes = new_size;
+       if (device->writeable)
+               device->fs_devices->total_rw_bytes -= diff;
        ret = btrfs_update_device(trans, device);
        if (ret) {
+               unlock_chunks(root);
                btrfs_end_transaction(trans, root);
                goto done;
        }
        WARN_ON(diff > old_total);
        btrfs_set_super_total_bytes(super_copy, old_total - diff);
+       unlock_chunks(root);
        btrfs_end_transaction(trans, root);
 
        key.objectid = device->devid;
@@ -1371,7 +1878,7 @@ done:
        return ret;
 }
 
-int btrfs_add_system_chunk(struct btrfs_trans_handle *trans,
+static int btrfs_add_system_chunk(struct btrfs_trans_handle *trans,
                           struct btrfs_root *root,
                           struct btrfs_key *key,
                           struct btrfs_chunk *chunk, int item_size)
@@ -1395,8 +1902,8 @@ int btrfs_add_system_chunk(struct btrfs_trans_handle *trans,
        return 0;
 }
 
-static u64 chunk_bytes_by_type(u64 type, u64 calc_size, int num_stripes,
-                              int sub_stripes)
+static noinline u64 chunk_bytes_by_type(u64 type, u64 calc_size,
+                                       int num_stripes, int sub_stripes)
 {
        if (type & (BTRFS_BLOCK_GROUP_RAID1 | BTRFS_BLOCK_GROUP_DUP))
                return calc_size;
@@ -1406,32 +1913,27 @@ static u64 chunk_bytes_by_type(u64 type, u64 calc_size, int num_stripes,
                return calc_size * num_stripes;
 }
 
-
-int btrfs_alloc_chunk(struct btrfs_trans_handle *trans,
-                     struct btrfs_root *extent_root, u64 *start,
-                     u64 *num_bytes, u64 type)
+static int __btrfs_alloc_chunk(struct btrfs_trans_handle *trans,
+                              struct btrfs_root *extent_root,
+                              struct map_lookup **map_ret,
+                              u64 *num_bytes, u64 *stripe_size,
+                              u64 start, u64 type)
 {
-       u64 dev_offset;
        struct btrfs_fs_info *info = extent_root->fs_info;
-       struct btrfs_root *chunk_root = extent_root->fs_info->chunk_root;
-       struct btrfs_path *path;
-       struct btrfs_stripe *stripes;
        struct btrfs_device *device = NULL;
-       struct btrfs_chunk *chunk;
-       struct list_head private_devs;
-       struct list_head *dev_list;
+       struct btrfs_fs_devices *fs_devices = info->fs_devices;
        struct list_head *cur;
+       struct map_lookup *map = NULL;
        struct extent_map_tree *em_tree;
-       struct map_lookup *map;
        struct extent_map *em;
+       struct list_head private_devs;
        int min_stripe_size = 1 * 1024 * 1024;
-       u64 physical;
        u64 calc_size = 1024 * 1024 * 1024;
        u64 max_chunk_size = calc_size;
        u64 min_free;
        u64 avail;
        u64 max_avail = 0;
-       u64 percent_max;
+       u64 dev_offset;
        int num_stripes = 1;
        int min_stripes = 1;
        int sub_stripes = 0;
@@ -1439,19 +1941,17 @@ int btrfs_alloc_chunk(struct btrfs_trans_handle *trans,
        int ret;
        int index;
        int stripe_len = 64 * 1024;
-       struct btrfs_key key;
 
        if ((type & BTRFS_BLOCK_GROUP_RAID1) &&
            (type & BTRFS_BLOCK_GROUP_DUP)) {
                WARN_ON(1);
                type &= ~BTRFS_BLOCK_GROUP_DUP;
        }
-       dev_list = &extent_root->fs_info->fs_devices->alloc_list;
-       if (list_empty(dev_list))
+       if (list_empty(&fs_devices->alloc_list))
                return -ENOSPC;
 
        if (type & (BTRFS_BLOCK_GROUP_RAID0)) {
-               num_stripes = btrfs_super_num_devices(&info->super_copy);
+               num_stripes = fs_devices->rw_devices;
                min_stripes = 2;
        }
        if (type & (BTRFS_BLOCK_GROUP_DUP)) {
@@ -1459,14 +1959,13 @@ int btrfs_alloc_chunk(struct btrfs_trans_handle *trans,
                min_stripes = 2;
        }
        if (type & (BTRFS_BLOCK_GROUP_RAID1)) {
-               num_stripes = min_t(u64, 2,
-                                 btrfs_super_num_devices(&info->super_copy));
+               num_stripes = min_t(u64, 2, fs_devices->rw_devices);
                if (num_stripes < 2)
                        return -ENOSPC;
                min_stripes = 2;
        }
        if (type & (BTRFS_BLOCK_GROUP_RAID10)) {
-               num_stripes = btrfs_super_num_devices(&info->super_copy);
+               num_stripes = fs_devices->rw_devices;
                if (num_stripes < 4)
                        return -ENOSPC;
                num_stripes &= ~(u32)1;
@@ -1486,15 +1985,19 @@ int btrfs_alloc_chunk(struct btrfs_trans_handle *trans,
                min_stripe_size = 1 * 1024 * 1024;
        }
 
-       path = btrfs_alloc_path();
-       if (!path)
-               return -ENOMEM;
-
-       /* we don't want a chunk larger than 10% of the FS */
-       percent_max = div_factor(btrfs_super_total_bytes(&info->super_copy), 1);
-       max_chunk_size = min(percent_max, max_chunk_size);
+       /* we don't want a chunk larger than 10% of writeable space */
+       max_chunk_size = min(div_factor(fs_devices->total_rw_bytes, 1),
+                            max_chunk_size);
 
 again:
+       if (!map || map->num_stripes != num_stripes) {
+               kfree(map);
+               map = kmalloc(map_lookup_size(num_stripes), GFP_NOFS);
+               if (!map)
+                       return -ENOMEM;
+               map->num_stripes = num_stripes;
+       }
+
        if (calc_size * num_stripes > max_chunk_size) {
                calc_size = max_chunk_size;
                do_div(calc_size, num_stripes);
@@ -1507,8 +2010,7 @@ again:
        do_div(calc_size, stripe_len);
        calc_size *= stripe_len;
 
-       INIT_LIST_HEAD(&private_devs);
-       cur = dev_list->next;
+       cur = fs_devices->alloc_list.next;
        index = 0;
 
        if (type & BTRFS_BLOCK_GROUP_DUP)
@@ -1516,13 +2018,18 @@ again:
        else
                min_free = calc_size;
 
-       /* we add 1MB because we never use the first 1MB of the device */
-       min_free += 1024 * 1024;
+       /*
+        * we add 1MB because we never use the first 1MB of the device, unless
+        * we've looped, then we are likely allocating the maximum amount of
+        * space left already
+        */
+       if (!looped)
+               min_free += 1024 * 1024;
 
-       /* build a private list of devices we will allocate from */
-       while(index < num_stripes) {
+       INIT_LIST_HEAD(&private_devs);
+       while (index < num_stripes) {
                device = list_entry(cur, struct btrfs_device, dev_alloc_list);
-
+               BUG_ON(!device->writeable);
                if (device->total_bytes > device->bytes_used)
                        avail = device->total_bytes - device->bytes_used;
                else
@@ -1530,24 +2037,28 @@ again:
                cur = cur->next;
 
                if (device->in_fs_metadata && avail >= min_free) {
-                       u64 ignored_start = 0;
-                       ret = find_free_dev_extent(trans, device, path,
-                                                  min_free,
-                                                  &ignored_start);
+                       ret = find_free_dev_extent(trans, device,
+                                                  min_free, &dev_offset);
                        if (ret == 0) {
                                list_move_tail(&device->dev_alloc_list,
                                               &private_devs);
+                               map->stripes[index].dev = device;
+                               map->stripes[index].physical = dev_offset;
                                index++;
-                               if (type & BTRFS_BLOCK_GROUP_DUP)
+                               if (type & BTRFS_BLOCK_GROUP_DUP) {
+                                       map->stripes[index].dev = device;
+                                       map->stripes[index].physical =
+                                               dev_offset + calc_size;
                                        index++;
+                               }
                        }
                } else if (device->in_fs_metadata && avail > max_avail)
                        max_avail = avail;
-               if (cur == dev_list)
+               if (cur == &fs_devices->alloc_list)
                        break;
        }
+       list_splice(&private_devs, &fs_devices->alloc_list);
        if (index < num_stripes) {
-               list_splice(&private_devs, dev_list);
                if (index >= min_stripes) {
                        num_stripes = index;
                        if (type & (BTRFS_BLOCK_GROUP_RAID10)) {
@@ -1562,114 +2073,246 @@ again:
                        calc_size = max_avail;
                        goto again;
                }
-               btrfs_free_path(path);
+               kfree(map);
                return -ENOSPC;
        }
-       key.objectid = BTRFS_FIRST_CHUNK_TREE_OBJECTID;
-       key.type = BTRFS_CHUNK_ITEM_KEY;
-       ret = find_next_chunk(chunk_root, BTRFS_FIRST_CHUNK_TREE_OBJECTID,
-                             &key.offset);
-       if (ret) {
-               btrfs_free_path(path);
-               return ret;
-       }
+       map->sector_size = extent_root->sectorsize;
+       map->stripe_len = stripe_len;
+       map->io_align = stripe_len;
+       map->io_width = stripe_len;
+       map->type = type;
+       map->num_stripes = num_stripes;
+       map->sub_stripes = sub_stripes;
 
-       chunk = kmalloc(btrfs_chunk_item_size(num_stripes), GFP_NOFS);
-       if (!chunk) {
-               btrfs_free_path(path);
-               return -ENOMEM;
-       }
+       *map_ret = map;
+       *stripe_size = calc_size;
+       *num_bytes = chunk_bytes_by_type(type, calc_size,
+                                        num_stripes, sub_stripes);
 
-       map = kmalloc(map_lookup_size(num_stripes), GFP_NOFS);
-       if (!map) {
-               kfree(chunk);
-               btrfs_free_path(path);
+       em = alloc_extent_map(GFP_NOFS);
+       if (!em) {
+               kfree(map);
                return -ENOMEM;
        }
-       btrfs_free_path(path);
-       path = NULL;
+       em->bdev = (struct block_device *)map;
+       em->start = start;
+       em->len = *num_bytes;
+       em->block_start = 0;
+       em->block_len = em->len;
 
-       stripes = &chunk->stripe;
-       *num_bytes = chunk_bytes_by_type(type, calc_size,
-                                        num_stripes, sub_stripes);
+       em_tree = &extent_root->fs_info->mapping_tree.map_tree;
+       spin_lock(&em_tree->lock);
+       ret = add_extent_mapping(em_tree, em);
+       spin_unlock(&em_tree->lock);
+       BUG_ON(ret);
+       free_extent_map(em);
 
-       index = 0;
-       while(index < num_stripes) {
-               struct btrfs_stripe *stripe;
-               BUG_ON(list_empty(&private_devs));
-               cur = private_devs.next;
-               device = list_entry(cur, struct btrfs_device, dev_alloc_list);
+       ret = btrfs_make_block_group(trans, extent_root, 0, type,
+                                    BTRFS_FIRST_CHUNK_TREE_OBJECTID,
+                                    start, *num_bytes);
+       BUG_ON(ret);
 
-               /* loop over this device again if we're doing a dup group */
-               if (!(type & BTRFS_BLOCK_GROUP_DUP) ||
-                   (index == num_stripes - 1))
-                       list_move_tail(&device->dev_alloc_list, dev_list);
+       index = 0;
+       while (index < map->num_stripes) {
+               device = map->stripes[index].dev;
+               dev_offset = map->stripes[index].physical;
 
                ret = btrfs_alloc_dev_extent(trans, device,
-                            info->chunk_root->root_key.objectid,
-                            BTRFS_FIRST_CHUNK_TREE_OBJECTID, key.offset,
-                            calc_size, &dev_offset);
+                               info->chunk_root->root_key.objectid,
+                               BTRFS_FIRST_CHUNK_TREE_OBJECTID,
+                               start, dev_offset, calc_size);
                BUG_ON(ret);
-               device->bytes_used += calc_size;
+               index++;
+       }
+
+       return 0;
+}
+
+static int __finish_chunk_alloc(struct btrfs_trans_handle *trans,
+                               struct btrfs_root *extent_root,
+                               struct map_lookup *map, u64 chunk_offset,
+                               u64 chunk_size, u64 stripe_size)
+{
+       u64 dev_offset;
+       struct btrfs_key key;
+       struct btrfs_root *chunk_root = extent_root->fs_info->chunk_root;
+       struct btrfs_device *device;
+       struct btrfs_chunk *chunk;
+       struct btrfs_stripe *stripe;
+       size_t item_size = btrfs_chunk_item_size(map->num_stripes);
+       int index = 0;
+       int ret;
+
+       chunk = kzalloc(item_size, GFP_NOFS);
+       if (!chunk)
+               return -ENOMEM;
+
+       index = 0;
+       while (index < map->num_stripes) {
+               device = map->stripes[index].dev;
+               device->bytes_used += stripe_size;
                ret = btrfs_update_device(trans, device);
                BUG_ON(ret);
+               index++;
+       }
+
+       index = 0;
+       stripe = &chunk->stripe;
+       while (index < map->num_stripes) {
+               device = map->stripes[index].dev;
+               dev_offset = map->stripes[index].physical;
 
-               map->stripes[index].dev = device;
-               map->stripes[index].physical = dev_offset;
-               stripe = stripes + index;
                btrfs_set_stack_stripe_devid(stripe, device->devid);
                btrfs_set_stack_stripe_offset(stripe, dev_offset);
                memcpy(stripe->dev_uuid, device->uuid, BTRFS_UUID_SIZE);
-               physical = dev_offset;
+               stripe++;
                index++;
        }
-       BUG_ON(!list_empty(&private_devs));
 
-       /* key was set above */
-       btrfs_set_stack_chunk_length(chunk, *num_bytes);
+       btrfs_set_stack_chunk_length(chunk, chunk_size);
        btrfs_set_stack_chunk_owner(chunk, extent_root->root_key.objectid);
-       btrfs_set_stack_chunk_stripe_len(chunk, stripe_len);
-       btrfs_set_stack_chunk_type(chunk, type);
-       btrfs_set_stack_chunk_num_stripes(chunk, num_stripes);
-       btrfs_set_stack_chunk_io_align(chunk, stripe_len);
-       btrfs_set_stack_chunk_io_width(chunk, stripe_len);
+       btrfs_set_stack_chunk_stripe_len(chunk, map->stripe_len);
+       btrfs_set_stack_chunk_type(chunk, map->type);
+       btrfs_set_stack_chunk_num_stripes(chunk, map->num_stripes);
+       btrfs_set_stack_chunk_io_align(chunk, map->stripe_len);
+       btrfs_set_stack_chunk_io_width(chunk, map->stripe_len);
        btrfs_set_stack_chunk_sector_size(chunk, extent_root->sectorsize);
-       btrfs_set_stack_chunk_sub_stripes(chunk, sub_stripes);
-       map->sector_size = extent_root->sectorsize;
-       map->stripe_len = stripe_len;
-       map->io_align = stripe_len;
-       map->io_width = stripe_len;
-       map->type = type;
-       map->num_stripes = num_stripes;
-       map->sub_stripes = sub_stripes;
+       btrfs_set_stack_chunk_sub_stripes(chunk, map->sub_stripes);
 
-       ret = btrfs_insert_item(trans, chunk_root, &key, chunk,
-                               btrfs_chunk_item_size(num_stripes));
-       BUG_ON(ret);
-       *start = key.offset;;
+       key.objectid = BTRFS_FIRST_CHUNK_TREE_OBJECTID;
+       key.type = BTRFS_CHUNK_ITEM_KEY;
+       key.offset = chunk_offset;
 
-       em = alloc_extent_map(GFP_NOFS);
-       if (!em)
-               return -ENOMEM;
-       em->bdev = (struct block_device *)map;
-       em->start = key.offset;
-       em->len = *num_bytes;
-       em->block_start = 0;
+       ret = btrfs_insert_item(trans, chunk_root, &key, chunk, item_size);
+       BUG_ON(ret);
 
-       if (type & BTRFS_BLOCK_GROUP_SYSTEM) {
-               ret = btrfs_add_system_chunk(trans, chunk_root, &key,
-                                   chunk, btrfs_chunk_item_size(num_stripes));
+       if (map->type & BTRFS_BLOCK_GROUP_SYSTEM) {
+               ret = btrfs_add_system_chunk(trans, chunk_root, &key, chunk,
+                                            item_size);
                BUG_ON(ret);
        }
        kfree(chunk);
+       return 0;
+}
 
-       em_tree = &extent_root->fs_info->mapping_tree.map_tree;
-       spin_lock(&em_tree->lock);
-       ret = add_extent_mapping(em_tree, em);
-       spin_unlock(&em_tree->lock);
+/*
+ * Chunk allocation falls into two parts. The first part does works
+ * that make the new allocated chunk useable, but not do any operation
+ * that modifies the chunk tree. The second part does the works that
+ * require modifying the chunk tree. This division is important for the
+ * bootstrap process of adding storage to a seed btrfs.
+ */
+int btrfs_alloc_chunk(struct btrfs_trans_handle *trans,
+                     struct btrfs_root *extent_root, u64 type)
+{
+       u64 chunk_offset;
+       u64 chunk_size;
+       u64 stripe_size;
+       struct map_lookup *map;
+       struct btrfs_root *chunk_root = extent_root->fs_info->chunk_root;
+       int ret;
+
+       ret = find_next_chunk(chunk_root, BTRFS_FIRST_CHUNK_TREE_OBJECTID,
+                             &chunk_offset);
+       if (ret)
+               return ret;
+
+       ret = __btrfs_alloc_chunk(trans, extent_root, &map, &chunk_size,
+                                 &stripe_size, chunk_offset, type);
+       if (ret)
+               return ret;
+
+       ret = __finish_chunk_alloc(trans, extent_root, map, chunk_offset,
+                                  chunk_size, stripe_size);
+       BUG_ON(ret);
+       return 0;
+}
+
+static noinline int init_first_rw_device(struct btrfs_trans_handle *trans,
+                                        struct btrfs_root *root,
+                                        struct btrfs_device *device)
+{
+       u64 chunk_offset;
+       u64 sys_chunk_offset;
+       u64 chunk_size;
+       u64 sys_chunk_size;
+       u64 stripe_size;
+       u64 sys_stripe_size;
+       u64 alloc_profile;
+       struct map_lookup *map;
+       struct map_lookup *sys_map;
+       struct btrfs_fs_info *fs_info = root->fs_info;
+       struct btrfs_root *extent_root = fs_info->extent_root;
+       int ret;
+
+       ret = find_next_chunk(fs_info->chunk_root,
+                             BTRFS_FIRST_CHUNK_TREE_OBJECTID, &chunk_offset);
        BUG_ON(ret);
+
+       alloc_profile = BTRFS_BLOCK_GROUP_METADATA |
+                       (fs_info->metadata_alloc_profile &
+                        fs_info->avail_metadata_alloc_bits);
+       alloc_profile = btrfs_reduce_alloc_profile(root, alloc_profile);
+
+       ret = __btrfs_alloc_chunk(trans, extent_root, &map, &chunk_size,
+                                 &stripe_size, chunk_offset, alloc_profile);
+       BUG_ON(ret);
+
+       sys_chunk_offset = chunk_offset + chunk_size;
+
+       alloc_profile = BTRFS_BLOCK_GROUP_SYSTEM |
+                       (fs_info->system_alloc_profile &
+                        fs_info->avail_system_alloc_bits);
+       alloc_profile = btrfs_reduce_alloc_profile(root, alloc_profile);
+
+       ret = __btrfs_alloc_chunk(trans, extent_root, &sys_map,
+                                 &sys_chunk_size, &sys_stripe_size,
+                                 sys_chunk_offset, alloc_profile);
+       BUG_ON(ret);
+
+       ret = btrfs_add_device(trans, fs_info->chunk_root, device);
+       BUG_ON(ret);
+
+       /*
+        * Modifying chunk tree needs allocating new blocks from both
+        * system block group and metadata block group. So we only can
+        * do operations require modifying the chunk tree after both
+        * block groups were created.
+        */
+       ret = __finish_chunk_alloc(trans, extent_root, map, chunk_offset,
+                                  chunk_size, stripe_size);
+       BUG_ON(ret);
+
+       ret = __finish_chunk_alloc(trans, extent_root, sys_map,
+                                  sys_chunk_offset, sys_chunk_size,
+                                  sys_stripe_size);
+       BUG_ON(ret);
+       return 0;
+}
+
+int btrfs_chunk_readonly(struct btrfs_root *root, u64 chunk_offset)
+{
+       struct extent_map *em;
+       struct map_lookup *map;
+       struct btrfs_mapping_tree *map_tree = &root->fs_info->mapping_tree;
+       int readonly = 0;
+       int i;
+
+       spin_lock(&map_tree->map_tree.lock);
+       em = lookup_extent_mapping(&map_tree->map_tree, chunk_offset, 1);
+       spin_unlock(&map_tree->map_tree.lock);
+       if (!em)
+               return 1;
+
+       map = (struct map_lookup *)em->bdev;
+       for (i = 0; i < map->num_stripes; i++) {
+               if (!map->stripes[i].dev->writeable) {
+                       readonly = 1;
+                       break;
+               }
+       }
        free_extent_map(em);
-       return ret;
+       return readonly;
 }
 
 void btrfs_mapping_init(struct btrfs_mapping_tree *tree)
@@ -1681,7 +2324,7 @@ void btrfs_mapping_tree_free(struct btrfs_mapping_tree *tree)
 {
        struct extent_map *em;
 
-       while(1) {
+       while (1) {
                spin_lock(&tree->map_tree.lock);
                em = lookup_extent_mapping(&tree->map_tree, 0, (u64)-1);
                if (em)
@@ -1756,9 +2399,8 @@ static int __btrfs_map_block(struct btrfs_mapping_tree *map_tree, int rw,
        int max_errors = 0;
        struct btrfs_multi_bio *multi = NULL;
 
-       if (multi_ret && !(rw & (1 << BIO_RW))) {
+       if (multi_ret && !(rw & (1 << BIO_RW)))
                stripes_allocated = 1;
-       }
 again:
        if (multi_ret) {
                multi = kzalloc(btrfs_multi_bio_size(stripes_allocated),
@@ -1777,7 +2419,9 @@ again:
                return 0;
 
        if (!em) {
-               printk("unable to find logical %Lu len %Lu\n", logical, *length);
+               printk(KERN_CRIT "unable to find logical %llu len %llu\n",
+                      (unsigned long long)logical,
+                      (unsigned long long)*length);
                BUG();
        }
 
@@ -1884,9 +2528,8 @@ again:
                        device = map->stripes[stripe_index].dev;
                        if (device->bdev) {
                                bdi = blk_get_backing_dev_info(device->bdev);
-                               if (bdi->unplug_io_fn) {
+                               if (bdi->unplug_io_fn)
                                        bdi->unplug_io_fn(bdi, unplug_page);
-                               }
                        }
                } else {
                        multi->stripes[i].physical =
@@ -1914,6 +2557,91 @@ int btrfs_map_block(struct btrfs_mapping_tree *map_tree, int rw,
                                 mirror_num, NULL);
 }
 
+int btrfs_rmap_block(struct btrfs_mapping_tree *map_tree,
+                    u64 chunk_start, u64 physical, u64 devid,
+                    u64 **logical, int *naddrs, int *stripe_len)
+{
+       struct extent_map_tree *em_tree = &map_tree->map_tree;
+       struct extent_map *em;
+       struct map_lookup *map;
+       u64 *buf;
+       u64 bytenr;
+       u64 length;
+       u64 stripe_nr;
+       int i, j, nr = 0;
+
+       spin_lock(&em_tree->lock);
+       em = lookup_extent_mapping(em_tree, chunk_start, 1);
+       spin_unlock(&em_tree->lock);
+
+       BUG_ON(!em || em->start != chunk_start);
+       map = (struct map_lookup *)em->bdev;
+
+       length = em->len;
+       if (map->type & BTRFS_BLOCK_GROUP_RAID10)
+               do_div(length, map->num_stripes / map->sub_stripes);
+       else if (map->type & BTRFS_BLOCK_GROUP_RAID0)
+               do_div(length, map->num_stripes);
+
+       buf = kzalloc(sizeof(u64) * map->num_stripes, GFP_NOFS);
+       BUG_ON(!buf);
+
+       for (i = 0; i < map->num_stripes; i++) {
+               if (devid && map->stripes[i].dev->devid != devid)
+                       continue;
+               if (map->stripes[i].physical > physical ||
+                   map->stripes[i].physical + length <= physical)
+                       continue;
+
+               stripe_nr = physical - map->stripes[i].physical;
+               do_div(stripe_nr, map->stripe_len);
+
+               if (map->type & BTRFS_BLOCK_GROUP_RAID10) {
+                       stripe_nr = stripe_nr * map->num_stripes + i;
+                       do_div(stripe_nr, map->sub_stripes);
+               } else if (map->type & BTRFS_BLOCK_GROUP_RAID0) {
+                       stripe_nr = stripe_nr * map->num_stripes + i;
+               }
+               bytenr = chunk_start + stripe_nr * map->stripe_len;
+               WARN_ON(nr >= map->num_stripes);
+               for (j = 0; j < nr; j++) {
+                       if (buf[j] == bytenr)
+                               break;
+               }
+               if (j == nr) {
+                       WARN_ON(nr >= map->num_stripes);
+                       buf[nr++] = bytenr;
+               }
+       }
+
+       for (i = 0; i > nr; i++) {
+               struct btrfs_multi_bio *multi;
+               struct btrfs_bio_stripe *stripe;
+               int ret;
+
+               length = 1;
+               ret = btrfs_map_block(map_tree, WRITE, buf[i],
+                                     &length, &multi, 0);
+               BUG_ON(ret);
+
+               stripe = multi->stripes;
+               for (j = 0; j < multi->num_stripes; j++) {
+                       if (stripe->physical >= physical &&
+                           physical < stripe->physical + length)
+                               break;
+               }
+               BUG_ON(j >= multi->num_stripes);
+               kfree(multi);
+       }
+
+       *logical = buf;
+       *naddrs = nr;
+       *stripe_len = map->stripe_len;
+
+       free_extent_map(em);
+       return 0;
+}
+
 int btrfs_unplug_page(struct btrfs_mapping_tree *map_tree,
                      u64 logical, struct page *page)
 {
@@ -1922,24 +2650,22 @@ int btrfs_unplug_page(struct btrfs_mapping_tree *map_tree,
                                 NULL, 0, page);
 }
 
-
-#if LINUX_VERSION_CODE > KERNEL_VERSION(2,6,23)
 static void end_bio_multi_stripe(struct bio *bio, int err)
-#else
-static int end_bio_multi_stripe(struct bio *bio,
-                                  unsigned int bytes_done, int err)
-#endif
 {
        struct btrfs_multi_bio *multi = bio->bi_private;
+       int is_orig_bio = 0;
 
-#if LINUX_VERSION_CODE <= KERNEL_VERSION(2,6,23)
-       if (bio->bi_size)
-               return 1;
-#endif
        if (err)
                atomic_inc(&multi->error);
 
+       if (bio == multi->orig_bio)
+               is_orig_bio = 1;
+
        if (atomic_dec_and_test(&multi->stripes_pending)) {
+               if (!is_orig_bio) {
+                       bio_put(bio);
+                       bio = multi->orig_bio;
+               }
                bio->bi_private = multi->private;
                bio->bi_end_io = multi->end_io;
                /* only send an error to the higher layers if it is
@@ -1957,26 +2683,77 @@ static int end_bio_multi_stripe(struct bio *bio,
                }
                kfree(multi);
 
-#if LINUX_VERSION_CODE <= KERNEL_VERSION(2,6,23)
-               bio_endio(bio, bio->bi_size, err);
-#else
                bio_endio(bio, err);
-#endif
-       } else {
+       } else if (!is_orig_bio) {
                bio_put(bio);
        }
-#if LINUX_VERSION_CODE <= KERNEL_VERSION(2,6,23)
+}
+
+struct async_sched {
+       struct bio *bio;
+       int rw;
+       struct btrfs_fs_info *info;
+       struct btrfs_work work;
+};
+
+/*
+ * see run_scheduled_bios for a description of why bios are collected for
+ * async submit.
+ *
+ * This will add one bio to the pending list for a device and make sure
+ * the work struct is scheduled.
+ */
+static noinline int schedule_bio(struct btrfs_root *root,
+                                struct btrfs_device *device,
+                                int rw, struct bio *bio)
+{
+       int should_queue = 1;
+
+       /* don't bother with additional async steps for reads, right now */
+       if (!(rw & (1 << BIO_RW))) {
+               bio_get(bio);
+               submit_bio(rw, bio);
+               bio_put(bio);
+               return 0;
+       }
+
+       /*
+        * nr_async_bios allows us to reliably return congestion to the
+        * higher layers.  Otherwise, the async bio makes it appear we have
+        * made progress against dirty pages when we've really just put it
+        * on a queue for later
+        */
+       atomic_inc(&root->fs_info->nr_async_bios);
+       WARN_ON(bio->bi_next);
+       bio->bi_next = NULL;
+       bio->bi_rw |= rw;
+
+       spin_lock(&device->io_lock);
+
+       if (device->pending_bio_tail)
+               device->pending_bio_tail->bi_next = bio;
+
+       device->pending_bio_tail = bio;
+       if (!device->pending_bios)
+               device->pending_bios = bio;
+       if (device->running_pending)
+               should_queue = 0;
+
+       spin_unlock(&device->io_lock);
+
+       if (should_queue)
+               btrfs_queue_worker(&root->fs_info->submit_workers,
+                                  &device->work);
        return 0;
-#endif
 }
 
 int btrfs_map_bio(struct btrfs_root *root, int rw, struct bio *bio,
-                 int mirror_num)
+                 int mirror_num, int async_submit)
 {
        struct btrfs_mapping_tree *map_tree;
        struct btrfs_device *dev;
        struct bio *first_bio = bio;
-       u64 logical = bio->bi_sector << 9;
+       u64 logical = (u64)bio->bi_sector << 9;
        u64 length = 0;
        u64 map_length;
        struct btrfs_multi_bio *multi = NULL;
@@ -1994,15 +2771,18 @@ int btrfs_map_bio(struct btrfs_root *root, int rw, struct bio *bio,
 
        total_devs = multi->num_stripes;
        if (map_length < length) {
-               printk("mapping failed logical %Lu bio len %Lu "
-                      "len %Lu\n", logical, length, map_length);
+               printk(KERN_CRIT "mapping failed logical %llu bio len %llu "
+                      "len %llu\n", (unsigned long long)logical,
+                      (unsigned long long)length,
+                      (unsigned long long)map_length);
                BUG();
        }
        multi->end_io = first_bio->bi_end_io;
        multi->private = first_bio->bi_private;
+       multi->orig_bio = first_bio;
        atomic_set(&multi->stripes_pending, multi->num_stripes);
 
-       while(dev_nr < total_devs) {
+       while (dev_nr < total_devs) {
                if (total_devs > 1) {
                        if (dev_nr < total_devs - 1) {
                                bio = bio_clone(first_bio, GFP_NOFS);
@@ -2015,20 +2795,17 @@ int btrfs_map_bio(struct btrfs_root *root, int rw, struct bio *bio,
                }
                bio->bi_sector = multi->stripes[dev_nr].physical >> 9;
                dev = multi->stripes[dev_nr].dev;
+               BUG_ON(rw == WRITE && !dev->writeable);
                if (dev && dev->bdev) {
                        bio->bi_bdev = dev->bdev;
-                       spin_lock(&dev->io_lock);
-                       dev->total_ios++;
-                       spin_unlock(&dev->io_lock);
-                       submit_bio(rw, bio);
+                       if (async_submit)
+                               schedule_bio(root, dev, rw, bio);
+                       else
+                               submit_bio(rw, bio);
                } else {
                        bio->bi_bdev = root->fs_info->fs_devices->latest_bdev;
                        bio->bi_sector = logical >> 9;
-#if LINUX_VERSION_CODE <= KERNEL_VERSION(2,6,23)
-                       bio_endio(bio, bio->bi_size, -EIO);
-#else
                        bio_endio(bio, -EIO);
-#endif
                }
                dev_nr++;
        }
@@ -2038,11 +2815,23 @@ int btrfs_map_bio(struct btrfs_root *root, int rw, struct bio *bio,
 }
 
 struct btrfs_device *btrfs_find_device(struct btrfs_root *root, u64 devid,
-                                      u8 *uuid)
+                                      u8 *uuid, u8 *fsid)
 {
-       struct list_head *head = &root->fs_info->fs_devices->devices;
-
-       return __find_device(head, devid, uuid);
+       struct btrfs_device *device;
+       struct btrfs_fs_devices *cur_devices;
+
+       cur_devices = root->fs_info->fs_devices;
+       while (cur_devices) {
+               if (!fsid ||
+                   !memcmp(cur_devices->fsid, fsid, BTRFS_UUID_SIZE)) {
+                       device = __find_device(&cur_devices->devices,
+                                              devid, uuid);
+                       if (device)
+                               return device;
+               }
+               cur_devices = cur_devices->seed;
+       }
+       return NULL;
 }
 
 static struct btrfs_device *add_missing_dev(struct btrfs_root *root,
@@ -2052,20 +2841,22 @@ static struct btrfs_device *add_missing_dev(struct btrfs_root *root,
        struct btrfs_fs_devices *fs_devices = root->fs_info->fs_devices;
 
        device = kzalloc(sizeof(*device), GFP_NOFS);
+       if (!device)
+               return NULL;
        list_add(&device->dev_list,
                 &fs_devices->devices);
-       list_add(&device->dev_alloc_list,
-                &fs_devices->alloc_list);
        device->barriers = 1;
        device->dev_root = root->fs_info->dev_root;
        device->devid = devid;
+       device->work.func = pending_bios_fn;
+       device->fs_devices = fs_devices;
        fs_devices->num_devices++;
        spin_lock_init(&device->io_lock);
+       INIT_LIST_HEAD(&device->dev_alloc_list);
        memcpy(device->uuid, dev_uuid, BTRFS_UUID_SIZE);
        return device;
 }
 
-
 static int read_one_chunk(struct btrfs_root *root, struct btrfs_key *key,
                          struct extent_buffer *leaf,
                          struct btrfs_chunk *chunk)
@@ -2114,6 +2905,7 @@ static int read_one_chunk(struct btrfs_root *root, struct btrfs_key *key,
        em->start = logical;
        em->len = length;
        em->block_start = 0;
+       em->block_len = em->len;
 
        map->num_stripes = num_stripes;
        map->io_width = btrfs_chunk_io_width(leaf, chunk);
@@ -2129,8 +2921,8 @@ static int read_one_chunk(struct btrfs_root *root, struct btrfs_key *key,
                read_extent_buffer(leaf, uuid, (unsigned long)
                                   btrfs_stripe_dev_uuid_nr(chunk, i),
                                   BTRFS_UUID_SIZE);
-               map->stripes[i].dev = btrfs_find_device(root, devid, uuid);
-
+               map->stripes[i].dev = btrfs_find_device(root, devid, uuid,
+                                                       NULL);
                if (!map->stripes[i].dev && !btrfs_test_opt(root, DEGRADED)) {
                        kfree(map);
                        free_extent_map(em);
@@ -2177,6 +2969,53 @@ static int fill_device_from_item(struct extent_buffer *leaf,
        return 0;
 }
 
+static int open_seed_devices(struct btrfs_root *root, u8 *fsid)
+{
+       struct btrfs_fs_devices *fs_devices;
+       int ret;
+
+       mutex_lock(&uuid_mutex);
+
+       fs_devices = root->fs_info->fs_devices->seed;
+       while (fs_devices) {
+               if (!memcmp(fs_devices->fsid, fsid, BTRFS_UUID_SIZE)) {
+                       ret = 0;
+                       goto out;
+               }
+               fs_devices = fs_devices->seed;
+       }
+
+       fs_devices = find_fsid(fsid);
+       if (!fs_devices) {
+               ret = -ENOENT;
+               goto out;
+       }
+
+       fs_devices = clone_fs_devices(fs_devices);
+       if (IS_ERR(fs_devices)) {
+               ret = PTR_ERR(fs_devices);
+               goto out;
+       }
+
+       ret = __btrfs_open_devices(fs_devices, FMODE_READ,
+                                  root->fs_info->bdev_holder);
+       if (ret)
+               goto out;
+
+       if (!fs_devices->seeding) {
+               __btrfs_close_devices(fs_devices);
+               free_fs_devices(fs_devices);
+               ret = -EINVAL;
+               goto out;
+       }
+
+       fs_devices->seed = root->fs_info->fs_devices->seed;
+       root->fs_info->fs_devices->seed = fs_devices;
+out:
+       mutex_unlock(&uuid_mutex);
+       return ret;
+}
+
 static int read_one_dev(struct btrfs_root *root,
                        struct extent_buffer *leaf,
                        struct btrfs_dev_item *dev_item)
@@ -2184,30 +3023,50 @@ static int read_one_dev(struct btrfs_root *root,
        struct btrfs_device *device;
        u64 devid;
        int ret;
+       u8 fs_uuid[BTRFS_UUID_SIZE];
        u8 dev_uuid[BTRFS_UUID_SIZE];
 
        devid = btrfs_device_id(leaf, dev_item);
        read_extent_buffer(leaf, dev_uuid,
                           (unsigned long)btrfs_device_uuid(dev_item),
                           BTRFS_UUID_SIZE);
-       device = btrfs_find_device(root, devid, dev_uuid);
-       if (!device) {
-               printk("warning devid %Lu missing\n", devid);
-               device = add_missing_dev(root, devid, dev_uuid);
-               if (!device)
-                       return -ENOMEM;
+       read_extent_buffer(leaf, fs_uuid,
+                          (unsigned long)btrfs_device_fsid(dev_item),
+                          BTRFS_UUID_SIZE);
+
+       if (memcmp(fs_uuid, root->fs_info->fsid, BTRFS_UUID_SIZE)) {
+               ret = open_seed_devices(root, fs_uuid);
+               if (ret && !btrfs_test_opt(root, DEGRADED))
+                       return ret;
+       }
+
+       device = btrfs_find_device(root, devid, dev_uuid, fs_uuid);
+       if (!device || !device->bdev) {
+               if (!btrfs_test_opt(root, DEGRADED))
+                       return -EIO;
+
+               if (!device) {
+                       printk(KERN_WARNING "warning devid %llu missing\n",
+                              (unsigned long long)devid);
+                       device = add_missing_dev(root, devid, dev_uuid);
+                       if (!device)
+                               return -ENOMEM;
+               }
+       }
+
+       if (device->fs_devices != root->fs_info->fs_devices) {
+               BUG_ON(device->writeable);
+               if (device->generation !=
+                   btrfs_device_generation(leaf, dev_item))
+                       return -EINVAL;
        }
 
        fill_device_from_item(leaf, dev_item, device);
        device->dev_root = root->fs_info->dev_root;
        device->in_fs_metadata = 1;
+       if (device->writeable)
+               device->fs_devices->total_rw_bytes += device->total_bytes;
        ret = 0;
-#if 0
-       ret = btrfs_open_device(device);
-       if (ret) {
-               kfree(device);
-       }
-#endif
        return ret;
 }
 
@@ -2298,7 +3157,7 @@ int btrfs_read_chunk_tree(struct btrfs_root *root)
        key.type = 0;
 again:
        ret = btrfs_search_slot(NULL, root, &key, path, 0, 0);
-       while(1) {
+       while (1) {
                leaf = path->nodes[0];
                slot = path->slots[0];
                if (slot >= btrfs_header_nritems(leaf)) {
@@ -2318,12 +3177,15 @@ again:
                                dev_item = btrfs_item_ptr(leaf, slot,
                                                  struct btrfs_dev_item);
                                ret = read_one_dev(root, leaf, dev_item);
-                               BUG_ON(ret);
+                               if (ret)
+                                       goto error;
                        }
                } else if (found_key.type == BTRFS_CHUNK_ITEM_KEY) {
                        struct btrfs_chunk *chunk;
                        chunk = btrfs_item_ptr(leaf, slot, struct btrfs_chunk);
                        ret = read_one_chunk(root, &found_key, leaf, chunk);
+                       if (ret)
+                               goto error;
                }
                path->slots[0]++;
        }
@@ -2332,10 +3194,8 @@ again:
                btrfs_release_path(root, path);
                goto again;
        }
-
-       btrfs_free_path(path);
        ret = 0;
 error:
+       btrfs_free_path(path);
        return ret;
 }
-