Merge branch 'for-linus' of git://git390.marist.edu/pub/scm/linux-2.6
[safe/jmp/linux-2.6] / fs / btrfs / volumes.c
index 3451e1c..d6e3af8 100644 (file)
  */
 #include <linux/sched.h>
 #include <linux/bio.h>
+#include <linux/slab.h>
 #include <linux/buffer_head.h>
 #include <linux/blkdev.h>
 #include <linux/random.h>
-#include <linux/version.h>
+#include <linux/iocontext.h>
 #include <asm/div64.h>
 #include "compat.h"
 #include "ctree.h"
@@ -104,10 +105,8 @@ static noinline struct btrfs_device *__find_device(struct list_head *head,
                                                   u64 devid, u8 *uuid)
 {
        struct btrfs_device *dev;
-       struct list_head *cur;
 
-       list_for_each(cur, head) {
-               dev = list_entry(cur, struct btrfs_device, dev_list);
+       list_for_each_entry(dev, head, dev_list) {
                if (dev->devid == devid &&
                    (!uuid || !memcmp(dev->uuid, uuid, BTRFS_UUID_SIZE))) {
                        return dev;
@@ -118,17 +117,29 @@ static noinline struct btrfs_device *__find_device(struct list_head *head,
 
 static noinline struct btrfs_fs_devices *find_fsid(u8 *fsid)
 {
-       struct list_head *cur;
        struct btrfs_fs_devices *fs_devices;
 
-       list_for_each(cur, &fs_uuids) {
-               fs_devices = list_entry(cur, struct btrfs_fs_devices, list);
+       list_for_each_entry(fs_devices, &fs_uuids, list) {
                if (memcmp(fsid, fs_devices->fsid, BTRFS_FSID_SIZE) == 0)
                        return fs_devices;
        }
        return NULL;
 }
 
+static void requeue_list(struct btrfs_pending_bios *pending_bios,
+                       struct bio *head, struct bio *tail)
+{
+
+       struct bio *old_head;
+
+       old_head = pending_bios->head;
+       pending_bios->head = head;
+       if (pending_bios->tail)
+               tail->bi_next = old_head;
+       else
+               pending_bios->tail = tail;
+}
+
 /*
  * we try to collect pending bios for a device so we don't get a large
  * number of procs sending bios down to the same device.  This greatly
@@ -145,30 +156,49 @@ static noinline int run_scheduled_bios(struct btrfs_device *device)
        struct bio *pending;
        struct backing_dev_info *bdi;
        struct btrfs_fs_info *fs_info;
+       struct btrfs_pending_bios *pending_bios;
        struct bio *tail;
        struct bio *cur;
        int again = 0;
-       unsigned long num_run = 0;
+       unsigned long num_run;
+       unsigned long num_sync_run;
+       unsigned long batch_run = 0;
        unsigned long limit;
+       unsigned long last_waited = 0;
+       int force_reg = 0;
 
-       bdi = device->bdev->bd_inode->i_mapping->backing_dev_info;
+       bdi = blk_get_backing_dev_info(device->bdev);
        fs_info = device->dev_root->fs_info;
        limit = btrfs_async_submit_limit(fs_info);
        limit = limit * 2 / 3;
 
+       /* we want to make sure that every time we switch from the sync
+        * list to the normal list, we unplug
+        */
+       num_sync_run = 0;
+
 loop:
        spin_lock(&device->io_lock);
 
+loop_lock:
+       num_run = 0;
+
        /* take all the bios off the list at once and process them
         * later on (without the lock held).  But, remember the
         * tail and other pointers so the bios can be properly reinserted
         * into the list if we hit congestion
         */
-       pending = device->pending_bios;
-       tail = device->pending_bio_tail;
+       if (!force_reg && device->pending_sync_bios.head) {
+               pending_bios = &device->pending_sync_bios;
+               force_reg = 1;
+       } else {
+               pending_bios = &device->pending_bios;
+               force_reg = 0;
+       }
+
+       pending = pending_bios->head;
+       tail = pending_bios->tail;
        WARN_ON(pending && !tail);
-       device->pending_bios = NULL;
-       device->pending_bio_tail = NULL;
 
        /*
         * if pending was null this time around, no bios need processing
@@ -178,16 +208,45 @@ loop:
         * device->running_pending is used to synchronize with the
         * schedule_bio code.
         */
-       if (pending) {
-               again = 1;
-               device->running_pending = 1;
-       } else {
+       if (device->pending_sync_bios.head == NULL &&
+           device->pending_bios.head == NULL) {
                again = 0;
                device->running_pending = 0;
+       } else {
+               again = 1;
+               device->running_pending = 1;
        }
+
+       pending_bios->head = NULL;
+       pending_bios->tail = NULL;
+
        spin_unlock(&device->io_lock);
 
+       /*
+        * if we're doing the regular priority list, make sure we unplug
+        * for any high prio bios we've sent down
+        */
+       if (pending_bios == &device->pending_bios && num_sync_run > 0) {
+               num_sync_run = 0;
+               blk_run_backing_dev(bdi, NULL);
+       }
+
        while (pending) {
+
+               rmb();
+               /* we want to work on both lists, but do more bios on the
+                * sync list than the regular list
+                */
+               if ((num_run > 32 &&
+                   pending_bios != &device->pending_sync_bios &&
+                   device->pending_sync_bios.head) ||
+                  (num_run > 64 && pending_bios == &device->pending_sync_bios &&
+                   device->pending_bios.head)) {
+                       spin_lock(&device->io_lock);
+                       requeue_list(pending_bios, pending, tail);
+                       goto loop_lock;
+               }
+
                cur = pending;
                pending = pending->bi_next;
                cur->bi_next = NULL;
@@ -198,37 +257,96 @@ loop:
                        wake_up(&fs_info->async_submit_wait);
 
                BUG_ON(atomic_read(&cur->bi_cnt) == 0);
-               bio_get(cur);
+
+               if (bio_rw_flagged(cur, BIO_RW_SYNCIO))
+                       num_sync_run++;
+
                submit_bio(cur->bi_rw, cur);
-               bio_put(cur);
                num_run++;
+               batch_run++;
+               if (need_resched()) {
+                       if (num_sync_run) {
+                               blk_run_backing_dev(bdi, NULL);
+                               num_sync_run = 0;
+                       }
+                       cond_resched();
+               }
 
                /*
                 * we made progress, there is more work to do and the bdi
                 * is now congested.  Back off and let other work structs
                 * run instead
                 */
-               if (pending && bdi_write_congested(bdi) &&
+               if (pending && bdi_write_congested(bdi) && batch_run > 8 &&
                    fs_info->fs_devices->open_devices > 1) {
-                       struct bio *old_head;
+                       struct io_context *ioc;
 
-                       spin_lock(&device->io_lock);
+                       ioc = current->io_context;
 
-                       old_head = device->pending_bios;
-                       device->pending_bios = pending;
-                       if (device->pending_bio_tail)
-                               tail->bi_next = old_head;
-                       else
-                               device->pending_bio_tail = tail;
-                       device->running_pending = 0;
+                       /*
+                        * the main goal here is that we don't want to
+                        * block if we're going to be able to submit
+                        * more requests without blocking.
+                        *
+                        * This code does two great things, it pokes into
+                        * the elevator code from a filesystem _and_
+                        * it makes assumptions about how batching works.
+                        */
+                       if (ioc && ioc->nr_batch_requests > 0 &&
+                           time_before(jiffies, ioc->last_waited + HZ/50UL) &&
+                           (last_waited == 0 ||
+                            ioc->last_waited == last_waited)) {
+                               /*
+                                * we want to go through our batch of
+                                * requests and stop.  So, we copy out
+                                * the ioc->last_waited time and test
+                                * against it before looping
+                                */
+                               last_waited = ioc->last_waited;
+                               if (need_resched()) {
+                                       if (num_sync_run) {
+                                               blk_run_backing_dev(bdi, NULL);
+                                               num_sync_run = 0;
+                                       }
+                                       cond_resched();
+                               }
+                               continue;
+                       }
+                       spin_lock(&device->io_lock);
+                       requeue_list(pending_bios, pending, tail);
+                       device->running_pending = 1;
 
                        spin_unlock(&device->io_lock);
                        btrfs_requeue_work(&device->work);
                        goto done;
                }
        }
+
+       if (num_sync_run) {
+               num_sync_run = 0;
+               blk_run_backing_dev(bdi, NULL);
+       }
+       /*
+        * IO has already been through a long path to get here.  Checksumming,
+        * async helper threads, perhaps compression.  We've done a pretty
+        * good job of collecting a batch of IO and should just unplug
+        * the device right away.
+        *
+        * This will help anyone who is waiting on the IO, they might have
+        * already unplugged, but managed to do so before the bio they
+        * cared about found its way down here.
+        */
+       blk_run_backing_dev(bdi, NULL);
+
+       cond_resched();
        if (again)
                goto loop;
+
+       spin_lock(&device->io_lock);
+       if (device->pending_bios.head || device->pending_sync_bios.head)
+               goto loop_lock;
+       spin_unlock(&device->io_lock);
+
 done:
        return 0;
 }
@@ -248,6 +366,7 @@ static noinline int device_list_add(const char *path,
        struct btrfs_device *device;
        struct btrfs_fs_devices *fs_devices;
        u64 found_transid = btrfs_super_generation(disk_super);
+       char *name;
 
        fs_devices = find_fsid(disk_super->fsid);
        if (!fs_devices) {
@@ -260,6 +379,7 @@ static noinline int device_list_add(const char *path,
                memcpy(fs_devices->fsid, disk_super->fsid, BTRFS_FSID_SIZE);
                fs_devices->latest_devid = devid;
                fs_devices->latest_trans = found_transid;
+               mutex_init(&fs_devices->device_list_mutex);
                device = NULL;
        } else {
                device = __find_device(&fs_devices->devices, devid,
@@ -286,9 +406,19 @@ static noinline int device_list_add(const char *path,
                        return -ENOMEM;
                }
                INIT_LIST_HEAD(&device->dev_alloc_list);
+
+               mutex_lock(&fs_devices->device_list_mutex);
                list_add(&device->dev_list, &fs_devices->devices);
+               mutex_unlock(&fs_devices->device_list_mutex);
+
                device->fs_devices = fs_devices;
                fs_devices->num_devices++;
+       } else if (strcmp(device->name, path)) {
+               name = kstrdup(path, GFP_NOFS);
+               if (!name)
+                       return -ENOMEM;
+               kfree(device->name);
+               device->name = name;
        }
 
        if (found_transid > fs_devices->latest_trans) {
@@ -312,18 +442,22 @@ static struct btrfs_fs_devices *clone_fs_devices(struct btrfs_fs_devices *orig)
        INIT_LIST_HEAD(&fs_devices->devices);
        INIT_LIST_HEAD(&fs_devices->alloc_list);
        INIT_LIST_HEAD(&fs_devices->list);
+       mutex_init(&fs_devices->device_list_mutex);
        fs_devices->latest_devid = orig->latest_devid;
        fs_devices->latest_trans = orig->latest_trans;
        memcpy(fs_devices->fsid, orig->fsid, sizeof(fs_devices->fsid));
 
+       mutex_lock(&orig->device_list_mutex);
        list_for_each_entry(orig_dev, &orig->devices, dev_list) {
                device = kzalloc(sizeof(*device), GFP_NOFS);
                if (!device)
                        goto error;
 
                device->name = kstrdup(orig_dev->name, GFP_NOFS);
-               if (!device->name)
+               if (!device->name) {
+                       kfree(device);
                        goto error;
+               }
 
                device->devid = orig_dev->devid;
                device->work.func = pending_bios_fn;
@@ -337,22 +471,22 @@ static struct btrfs_fs_devices *clone_fs_devices(struct btrfs_fs_devices *orig)
                device->fs_devices = fs_devices;
                fs_devices->num_devices++;
        }
+       mutex_unlock(&orig->device_list_mutex);
        return fs_devices;
 error:
+       mutex_unlock(&orig->device_list_mutex);
        free_fs_devices(fs_devices);
        return ERR_PTR(-ENOMEM);
 }
 
 int btrfs_close_extra_devices(struct btrfs_fs_devices *fs_devices)
 {
-       struct list_head *tmp;
-       struct list_head *cur;
-       struct btrfs_device *device;
+       struct btrfs_device *device, *next;
 
        mutex_lock(&uuid_mutex);
 again:
-       list_for_each_safe(cur, tmp, &fs_devices->devices) {
-               device = list_entry(cur, struct btrfs_device, dev_list);
+       mutex_lock(&fs_devices->device_list_mutex);
+       list_for_each_entry_safe(device, next, &fs_devices->devices, dev_list) {
                if (device->in_fs_metadata)
                        continue;
 
@@ -371,6 +505,7 @@ again:
                kfree(device->name);
                kfree(device);
        }
+       mutex_unlock(&fs_devices->device_list_mutex);
 
        if (fs_devices->seed) {
                fs_devices = fs_devices->seed;
@@ -383,14 +518,12 @@ again:
 
 static int __btrfs_close_devices(struct btrfs_fs_devices *fs_devices)
 {
-       struct list_head *cur;
        struct btrfs_device *device;
 
        if (--fs_devices->opened > 0)
                return 0;
 
-       list_for_each(cur, &fs_devices->devices) {
-               device = list_entry(cur, struct btrfs_device, dev_list);
+       list_for_each_entry(device, &fs_devices->devices, dev_list) {
                if (device->bdev) {
                        close_bdev_exclusive(device->bdev, device->mode);
                        fs_devices->open_devices--;
@@ -439,7 +572,6 @@ static int __btrfs_open_devices(struct btrfs_fs_devices *fs_devices,
 {
        struct block_device *bdev;
        struct list_head *head = &fs_devices->devices;
-       struct list_head *cur;
        struct btrfs_device *device;
        struct block_device *latest_bdev = NULL;
        struct buffer_head *bh;
@@ -450,8 +582,7 @@ static int __btrfs_open_devices(struct btrfs_fs_devices *fs_devices,
        int seeding = 1;
        int ret = 0;
 
-       list_for_each(cur, head) {
-               device = list_entry(cur, struct btrfs_device, dev_list);
+       list_for_each_entry(device, head, dev_list) {
                if (device->bdev)
                        continue;
                if (!device->name)
@@ -469,7 +600,7 @@ static int __btrfs_open_devices(struct btrfs_fs_devices *fs_devices,
                        goto error_close;
 
                disk_super = (struct btrfs_super_block *)bh->b_data;
-               devid = le64_to_cpu(disk_super->dev_item.devid);
+               devid = btrfs_stack_device_id(&disk_super->dev_item);
                if (devid != device->devid)
                        goto error_brelse;
 
@@ -495,6 +626,9 @@ static int __btrfs_open_devices(struct btrfs_fs_devices *fs_devices,
                device->in_fs_metadata = 0;
                device->mode = flags;
 
+               if (!blk_queue_nonrot(bdev_get_queue(bdev)))
+                       fs_devices->rotating = 1;
+
                fs_devices->open_devices++;
                if (device->writeable) {
                        fs_devices->rw_devices++;
@@ -568,7 +702,7 @@ int btrfs_scan_one_device(const char *path, fmode_t flags, void *holder,
                goto error_close;
        }
        disk_super = (struct btrfs_super_block *)bh->b_data;
-       devid = le64_to_cpu(disk_super->dev_item.devid);
+       devid = btrfs_stack_device_id(&disk_super->dev_item);
        transid = btrfs_super_generation(disk_super);
        if (disk_super->label[0])
                printk(KERN_INFO "device label %s ", disk_super->label);
@@ -578,7 +712,7 @@ int btrfs_scan_one_device(const char *path, fmode_t flags, void *holder,
                       *(unsigned long long *)disk_super->fsid,
                       *(unsigned long long *)(disk_super->fsid + 8));
        }
-       printk(KERN_INFO "devid %llu transid %llu %s\n",
+       printk(KERN_CONT "devid %llu transid %llu %s\n",
               (unsigned long long)devid, (unsigned long long)transid, path);
        ret = device_list_add(path, disk_super, devid, fs_devices_ret);
 
@@ -595,9 +729,9 @@ error:
  * called very infrequently and that a given device has a small number
  * of extents
  */
-static noinline int find_free_dev_extent(struct btrfs_trans_handle *trans,
-                                        struct btrfs_device *device,
-                                        u64 num_bytes, u64 *start)
+int find_free_dev_extent(struct btrfs_trans_handle *trans,
+                        struct btrfs_device *device, u64 num_bytes,
+                        u64 *start, u64 *max_avail)
 {
        struct btrfs_key key;
        struct btrfs_root *root = device->dev_root;
@@ -634,9 +768,13 @@ static noinline int find_free_dev_extent(struct btrfs_trans_handle *trans,
        ret = btrfs_search_slot(trans, root, &key, path, 0, 0);
        if (ret < 0)
                goto error;
-       ret = btrfs_previous_item(root, path, 0, key.type);
-       if (ret < 0)
-               goto error;
+       if (ret > 0) {
+               ret = btrfs_previous_item(root, path, key.objectid, key.type);
+               if (ret < 0)
+                       goto error;
+               if (ret > 0)
+                       start_found = 1;
+       }
        l = path->nodes[0];
        btrfs_item_key_to_cpu(l, &key, path->slots[0]);
        while (1) {
@@ -679,6 +817,10 @@ no_more_items:
                        if (last_byte < search_start)
                                last_byte = search_start;
                        hole_size = key.offset - last_byte;
+
+                       if (hole_size > *max_avail)
+                               *max_avail = hole_size;
+
                        if (key.offset > last_byte &&
                            hole_size >= num_bytes) {
                                *start = last_byte;
@@ -955,7 +1097,7 @@ static int btrfs_rm_dev_item(struct btrfs_root *root,
        if (!path)
                return -ENOMEM;
 
-       trans = btrfs_start_transaction(root, 1);
+       trans = btrfs_start_transaction(root, 0);
        key.objectid = BTRFS_DEV_ITEMS_OBJECTID;
        key.type = BTRFS_DEV_ITEM_KEY;
        key.offset = device->devid;
@@ -1001,7 +1143,7 @@ int btrfs_rm_device(struct btrfs_root *root, char *device_path)
                root->fs_info->avail_metadata_alloc_bits;
 
        if ((all_avail & BTRFS_BLOCK_GROUP_RAID10) &&
-           root->fs_info->fs_devices->rw_devices <= 4) {
+           root->fs_info->fs_devices->num_devices <= 4) {
                printk(KERN_ERR "btrfs: unable to go below four devices "
                       "on raid10\n");
                ret = -EINVAL;
@@ -1009,7 +1151,7 @@ int btrfs_rm_device(struct btrfs_root *root, char *device_path)
        }
 
        if ((all_avail & BTRFS_BLOCK_GROUP_RAID1) &&
-           root->fs_info->fs_devices->rw_devices <= 2) {
+           root->fs_info->fs_devices->num_devices <= 2) {
                printk(KERN_ERR "btrfs: unable to go below two "
                       "devices on raid1\n");
                ret = -EINVAL;
@@ -1017,19 +1159,19 @@ int btrfs_rm_device(struct btrfs_root *root, char *device_path)
        }
 
        if (strcmp(device_path, "missing") == 0) {
-               struct list_head *cur;
                struct list_head *devices;
                struct btrfs_device *tmp;
 
                device = NULL;
                devices = &root->fs_info->fs_devices->devices;
-               list_for_each(cur, devices) {
-                       tmp = list_entry(cur, struct btrfs_device, dev_list);
+               mutex_lock(&root->fs_info->fs_devices->device_list_mutex);
+               list_for_each_entry(tmp, devices, dev_list) {
                        if (tmp->in_fs_metadata && !tmp->bdev) {
                                device = tmp;
                                break;
                        }
                }
+               mutex_unlock(&root->fs_info->fs_devices->device_list_mutex);
                bdev = NULL;
                bh = NULL;
                disk_super = NULL;
@@ -1053,7 +1195,7 @@ int btrfs_rm_device(struct btrfs_root *root, char *device_path)
                        goto error_close;
                }
                disk_super = (struct btrfs_super_block *)bh->b_data;
-               devid = le64_to_cpu(disk_super->dev_item.devid);
+               devid = btrfs_stack_device_id(&disk_super->dev_item);
                dev_uuid = disk_super->dev_item.uuid;
                device = btrfs_find_device(root, devid, dev_uuid,
                                           disk_super->fsid);
@@ -1084,7 +1226,16 @@ int btrfs_rm_device(struct btrfs_root *root, char *device_path)
                goto error_brelse;
 
        device->in_fs_metadata = 0;
+
+       /*
+        * the device list mutex makes sure that we don't change
+        * the device list while someone else is writing out all
+        * the device supers.
+        */
+       mutex_lock(&root->fs_info->fs_devices->device_list_mutex);
        list_del_init(&device->dev_list);
+       mutex_unlock(&root->fs_info->fs_devices->device_list_mutex);
+
        device->fs_devices->num_devices--;
 
        next_device = list_entry(root->fs_info->fs_devices->devices.next,
@@ -1178,6 +1329,7 @@ static int btrfs_prepare_sprout(struct btrfs_trans_handle *trans,
        seed_devices->opened = 1;
        INIT_LIST_HEAD(&seed_devices->devices);
        INIT_LIST_HEAD(&seed_devices->alloc_list);
+       mutex_init(&seed_devices->device_list_mutex);
        list_splice_init(&fs_devices->devices, &seed_devices->devices);
        list_splice_init(&fs_devices->alloc_list, &seed_devices->alloc_list);
        list_for_each_entry(device, &seed_devices->devices, dev_list) {
@@ -1280,7 +1432,6 @@ int btrfs_init_new_device(struct btrfs_root *root, char *device_path)
        struct btrfs_trans_handle *trans;
        struct btrfs_device *device;
        struct block_device *bdev;
-       struct list_head *cur;
        struct list_head *devices;
        struct super_block *sb = root->fs_info->sb;
        u64 total_bytes;
@@ -1291,8 +1442,8 @@ int btrfs_init_new_device(struct btrfs_root *root, char *device_path)
                return -EINVAL;
 
        bdev = open_bdev_exclusive(device_path, 0, root->fs_info->bdev_holder);
-       if (!bdev)
-               return -EIO;
+       if (IS_ERR(bdev))
+               return PTR_ERR(bdev);
 
        if (root->fs_info->fs_devices->seeding) {
                seeding_dev = 1;
@@ -1304,8 +1455,11 @@ int btrfs_init_new_device(struct btrfs_root *root, char *device_path)
        mutex_lock(&root->fs_info->volume_mutex);
 
        devices = &root->fs_info->fs_devices->devices;
-       list_for_each(cur, devices) {
-               device = list_entry(cur, struct btrfs_device, dev_list);
+       /*
+        * we have the volume lock, so we don't need the extra
+        * device list mutex while reading the list here.
+        */
+       list_for_each_entry(device, devices, dev_list) {
                if (device->bdev == bdev) {
                        ret = -EEXIST;
                        goto error;
@@ -1332,7 +1486,7 @@ int btrfs_init_new_device(struct btrfs_root *root, char *device_path)
                goto error;
        }
 
-       trans = btrfs_start_transaction(root, 1);
+       trans = btrfs_start_transaction(root, 0);
        lock_chunks(root);
 
        device->barriers = 1;
@@ -1345,6 +1499,7 @@ int btrfs_init_new_device(struct btrfs_root *root, char *device_path)
        device->io_align = root->sectorsize;
        device->sector_size = root->sectorsize;
        device->total_bytes = i_size_read(bdev->bd_inode);
+       device->disk_total_bytes = device->total_bytes;
        device->dev_root = root->fs_info->dev_root;
        device->bdev = bdev;
        device->in_fs_metadata = 1;
@@ -1358,6 +1513,12 @@ int btrfs_init_new_device(struct btrfs_root *root, char *device_path)
        }
 
        device->fs_devices = root->fs_info->fs_devices;
+
+       /*
+        * we don't want write_supers to jump in here with our device
+        * half setup
+        */
+       mutex_lock(&root->fs_info->fs_devices->device_list_mutex);
        list_add(&device->dev_list, &root->fs_info->fs_devices->devices);
        list_add(&device->dev_alloc_list,
                 &root->fs_info->fs_devices->alloc_list);
@@ -1366,6 +1527,9 @@ int btrfs_init_new_device(struct btrfs_root *root, char *device_path)
        root->fs_info->fs_devices->rw_devices++;
        root->fs_info->fs_devices->total_rw_bytes += device->total_bytes;
 
+       if (!blk_queue_nonrot(bdev_get_queue(bdev)))
+               root->fs_info->fs_devices->rotating = 1;
+
        total_bytes = btrfs_super_total_bytes(&root->fs_info->super_copy);
        btrfs_set_super_total_bytes(&root->fs_info->super_copy,
                                    total_bytes + device->total_bytes);
@@ -1373,6 +1537,7 @@ int btrfs_init_new_device(struct btrfs_root *root, char *device_path)
        total_bytes = btrfs_super_num_devices(&root->fs_info->super_copy);
        btrfs_set_super_num_devices(&root->fs_info->super_copy,
                                    total_bytes + 1);
+       mutex_unlock(&root->fs_info->fs_devices->device_list_mutex);
 
        if (seeding_dev) {
                ret = init_first_rw_device(trans, root, device);
@@ -1383,6 +1548,12 @@ int btrfs_init_new_device(struct btrfs_root *root, char *device_path)
                ret = btrfs_add_device(trans, root, device);
        }
 
+       /*
+        * we've got more storage, clear any full flags on the space
+        * infos
+        */
+       btrfs_clear_space_info_full(root->fs_info);
+
        unlock_chunks(root);
        btrfs_commit_transaction(trans, root);
 
@@ -1442,7 +1613,7 @@ static noinline int btrfs_update_device(struct btrfs_trans_handle *trans,
        btrfs_set_device_io_align(leaf, dev_item, device->io_align);
        btrfs_set_device_io_width(leaf, dev_item, device->io_width);
        btrfs_set_device_sector_size(leaf, dev_item, device->sector_size);
-       btrfs_set_device_total_bytes(leaf, dev_item, device->total_bytes);
+       btrfs_set_device_total_bytes(leaf, dev_item, device->disk_total_bytes);
        btrfs_set_device_bytes_used(leaf, dev_item, device->bytes_used);
        btrfs_mark_buffer_dirty(leaf);
 
@@ -1468,6 +1639,9 @@ static int __btrfs_grow_device(struct btrfs_trans_handle *trans,
        device->fs_devices->total_rw_bytes += diff;
 
        device->total_bytes = new_size;
+       device->disk_total_bytes = new_size;
+       btrfs_clear_space_info_full(device->dev_root->fs_info);
+
        return btrfs_update_device(trans, device);
 }
 
@@ -1567,17 +1741,20 @@ static int btrfs_relocate_chunk(struct btrfs_root *root,
        int ret;
        int i;
 
-       printk(KERN_INFO "btrfs relocating chunk %llu\n",
-              (unsigned long long)chunk_offset);
        root = root->fs_info->chunk_root;
        extent_root = root->fs_info->extent_root;
        em_tree = &root->fs_info->mapping_tree.map_tree;
 
+       ret = btrfs_can_relocate(extent_root, chunk_offset);
+       if (ret)
+               return -ENOSPC;
+
        /* step one, relocate all the extents inside this chunk */
        ret = btrfs_relocate_block_group(extent_root, chunk_offset);
-       BUG_ON(ret);
+       if (ret)
+               return ret;
 
-       trans = btrfs_start_transaction(root, 1);
+       trans = btrfs_start_transaction(root, 0);
        BUG_ON(!trans);
 
        lock_chunks(root);
@@ -1586,9 +1763,9 @@ static int btrfs_relocate_chunk(struct btrfs_root *root,
         * step two, delete the device extents and the
         * chunk tree entries
         */
-       spin_lock(&em_tree->lock);
+       read_lock(&em_tree->lock);
        em = lookup_extent_mapping(em_tree, chunk_offset, 1);
-       spin_unlock(&em_tree->lock);
+       read_unlock(&em_tree->lock);
 
        BUG_ON(em->start > chunk_offset ||
               em->start + em->len < chunk_offset);
@@ -1617,9 +1794,9 @@ static int btrfs_relocate_chunk(struct btrfs_root *root,
        ret = btrfs_remove_block_group(trans, extent_root, chunk_offset);
        BUG_ON(ret);
 
-       spin_lock(&em_tree->lock);
+       write_lock(&em_tree->lock);
        remove_extent_mapping(em_tree, em);
-       spin_unlock(&em_tree->lock);
+       write_unlock(&em_tree->lock);
 
        kfree(map);
        em->bdev = NULL;
@@ -1644,12 +1821,15 @@ static int btrfs_relocate_sys_chunks(struct btrfs_root *root)
        struct btrfs_key found_key;
        u64 chunk_tree = chunk_root->root_key.objectid;
        u64 chunk_type;
+       bool retried = false;
+       int failed = 0;
        int ret;
 
        path = btrfs_alloc_path();
        if (!path)
                return -ENOMEM;
 
+again:
        key.objectid = BTRFS_FIRST_CHUNK_TREE_OBJECTID;
        key.offset = (u64)-1;
        key.type = BTRFS_CHUNK_ITEM_KEY;
@@ -1679,7 +1859,10 @@ static int btrfs_relocate_sys_chunks(struct btrfs_root *root)
                        ret = btrfs_relocate_chunk(chunk_root, chunk_tree,
                                                   found_key.objectid,
                                                   found_key.offset);
-                       BUG_ON(ret);
+                       if (ret == -ENOSPC)
+                               failed++;
+                       else if (ret)
+                               BUG();
                }
 
                if (found_key.offset == 0)
@@ -1687,6 +1870,14 @@ static int btrfs_relocate_sys_chunks(struct btrfs_root *root)
                key.offset = found_key.offset - 1;
        }
        ret = 0;
+       if (failed && !retried) {
+               failed = 0;
+               retried = true;
+               goto again;
+       } else if (failed && retried) {
+               WARN_ON(1);
+               ret = -ENOSPC;
+       }
 error:
        btrfs_free_path(path);
        return ret;
@@ -1704,7 +1895,6 @@ static u64 div_factor(u64 num, int factor)
 int btrfs_balance(struct btrfs_root *dev_root)
 {
        int ret;
-       struct list_head *cur;
        struct list_head *devices = &dev_root->fs_info->fs_devices->devices;
        struct btrfs_device *device;
        u64 old_size;
@@ -1723,8 +1913,7 @@ int btrfs_balance(struct btrfs_root *dev_root)
        dev_root = dev_root->fs_info->dev_root;
 
        /* step one make some room on all the devices */
-       list_for_each(cur, devices) {
-               device = list_entry(cur, struct btrfs_device, dev_list);
+       list_for_each_entry(device, devices, dev_list) {
                old_size = device->total_bytes;
                size_to_free = div_factor(old_size, 1);
                size_to_free = min(size_to_free, (u64)1 * 1024 * 1024);
@@ -1733,9 +1922,11 @@ int btrfs_balance(struct btrfs_root *dev_root)
                        continue;
 
                ret = btrfs_shrink_device(device, old_size - size_to_free);
+               if (ret == -ENOSPC)
+                       break;
                BUG_ON(ret);
 
-               trans = btrfs_start_transaction(dev_root, 1);
+               trans = btrfs_start_transaction(dev_root, 0);
                BUG_ON(!trans);
 
                ret = btrfs_grow_device(trans, device, old_size);
@@ -1777,9 +1968,8 @@ int btrfs_balance(struct btrfs_root *dev_root)
                chunk = btrfs_item_ptr(path->nodes[0],
                                       path->slots[0],
                                       struct btrfs_chunk);
-               key.offset = found_key.offset;
                /* chunk zero is special */
-               if (key.offset == 0)
+               if (found_key.offset == 0)
                        break;
 
                btrfs_release_path(chunk_root, path);
@@ -1787,7 +1977,8 @@ int btrfs_balance(struct btrfs_root *dev_root)
                                           chunk_root->root_key.objectid,
                                           found_key.objectid,
                                           found_key.offset);
-               BUG_ON(ret);
+               BUG_ON(ret && ret != -ENOSPC);
+               key.offset = found_key.offset - 1;
        }
        ret = 0;
 error:
@@ -1813,10 +2004,13 @@ int btrfs_shrink_device(struct btrfs_device *device, u64 new_size)
        u64 chunk_offset;
        int ret;
        int slot;
+       int failed = 0;
+       bool retried = false;
        struct extent_buffer *l;
        struct btrfs_key key;
        struct btrfs_super_block *super_copy = &root->fs_info->super_copy;
        u64 old_total = btrfs_super_total_bytes(super_copy);
+       u64 old_size = device->total_bytes;
        u64 diff = device->total_bytes - new_size;
 
        if (new_size >= device->total_bytes)
@@ -1826,12 +2020,6 @@ int btrfs_shrink_device(struct btrfs_device *device, u64 new_size)
        if (!path)
                return -ENOMEM;
 
-       trans = btrfs_start_transaction(root, 1);
-       if (!trans) {
-               ret = -ENOMEM;
-               goto done;
-       }
-
        path->reada = 2;
 
        lock_chunks(root);
@@ -1839,17 +2027,9 @@ int btrfs_shrink_device(struct btrfs_device *device, u64 new_size)
        device->total_bytes = new_size;
        if (device->writeable)
                device->fs_devices->total_rw_bytes -= diff;
-       ret = btrfs_update_device(trans, device);
-       if (ret) {
-               unlock_chunks(root);
-               btrfs_end_transaction(trans, root);
-               goto done;
-       }
-       WARN_ON(diff > old_total);
-       btrfs_set_super_total_bytes(super_copy, old_total - diff);
        unlock_chunks(root);
-       btrfs_end_transaction(trans, root);
 
+again:
        key.objectid = device->devid;
        key.offset = (u64)-1;
        key.type = BTRFS_DEV_EXTENT_KEY;
@@ -1864,21 +2044,26 @@ int btrfs_shrink_device(struct btrfs_device *device, u64 new_size)
                        goto done;
                if (ret) {
                        ret = 0;
-                       goto done;
+                       btrfs_release_path(root, path);
+                       break;
                }
 
                l = path->nodes[0];
                slot = path->slots[0];
                btrfs_item_key_to_cpu(l, &key, path->slots[0]);
 
-               if (key.objectid != device->devid)
-                       goto done;
+               if (key.objectid != device->devid) {
+                       btrfs_release_path(root, path);
+                       break;
+               }
 
                dev_extent = btrfs_item_ptr(l, slot, struct btrfs_dev_extent);
                length = btrfs_dev_extent_length(l, dev_extent);
 
-               if (key.offset + length <= new_size)
-                       goto done;
+               if (key.offset + length <= new_size) {
+                       btrfs_release_path(root, path);
+                       break;
+               }
 
                chunk_tree = btrfs_dev_extent_chunk_tree(l, dev_extent);
                chunk_objectid = btrfs_dev_extent_chunk_objectid(l, dev_extent);
@@ -1887,10 +2072,44 @@ int btrfs_shrink_device(struct btrfs_device *device, u64 new_size)
 
                ret = btrfs_relocate_chunk(root, chunk_tree, chunk_objectid,
                                           chunk_offset);
-               if (ret)
+               if (ret && ret != -ENOSPC)
                        goto done;
+               if (ret == -ENOSPC)
+                       failed++;
+               key.offset -= 1;
        }
 
+       if (failed && !retried) {
+               failed = 0;
+               retried = true;
+               goto again;
+       } else if (failed && retried) {
+               ret = -ENOSPC;
+               lock_chunks(root);
+
+               device->total_bytes = old_size;
+               if (device->writeable)
+                       device->fs_devices->total_rw_bytes += diff;
+               unlock_chunks(root);
+               goto done;
+       }
+
+       /* Shrinking succeeded, else we would be at "done". */
+       trans = btrfs_start_transaction(root, 0);
+       lock_chunks(root);
+
+       device->disk_total_bytes = new_size;
+       /* Now btrfs_update_device() will change the on-disk size. */
+       ret = btrfs_update_device(trans, device);
+       if (ret) {
+               unlock_chunks(root);
+               btrfs_end_transaction(trans, root);
+               goto done;
+       }
+       WARN_ON(diff > old_total);
+       btrfs_set_super_total_bytes(super_copy, old_total - diff);
+       unlock_chunks(root);
+       btrfs_end_transaction(trans, root);
 done:
        btrfs_free_path(path);
        return ret;
@@ -1977,9 +2196,9 @@ static int __btrfs_alloc_chunk(struct btrfs_trans_handle *trans,
                min_stripes = 2;
        }
        if (type & (BTRFS_BLOCK_GROUP_RAID1)) {
-               num_stripes = min_t(u64, 2, fs_devices->rw_devices);
-               if (num_stripes < 2)
+               if (fs_devices->rw_devices < 2)
                        return -ENOSPC;
+               num_stripes = 2;
                min_stripes = 2;
        }
        if (type & (BTRFS_BLOCK_GROUP_RAID10)) {
@@ -1995,7 +2214,7 @@ static int __btrfs_alloc_chunk(struct btrfs_trans_handle *trans,
                max_chunk_size = 10 * calc_size;
                min_stripe_size = 64 * 1024 * 1024;
        } else if (type & BTRFS_BLOCK_GROUP_METADATA) {
-               max_chunk_size = 4 * calc_size;
+               max_chunk_size = 256 * 1024 * 1024;
                min_stripe_size = 32 * 1024 * 1024;
        } else if (type & BTRFS_BLOCK_GROUP_SYSTEM) {
                calc_size = 8 * 1024 * 1024;
@@ -2008,6 +2227,7 @@ static int __btrfs_alloc_chunk(struct btrfs_trans_handle *trans,
                             max_chunk_size);
 
 again:
+       max_avail = 0;
        if (!map || map->num_stripes != num_stripes) {
                kfree(map);
                map = kmalloc(map_lookup_size(num_stripes), GFP_NOFS);
@@ -2022,8 +2242,16 @@ again:
                do_div(calc_size, stripe_len);
                calc_size *= stripe_len;
        }
+
        /* we don't want tiny stripes */
-       calc_size = max_t(u64, min_stripe_size, calc_size);
+       if (!looped)
+               calc_size = max_t(u64, min_stripe_size, calc_size);
+
+       /*
+        * we're about to do_div by the stripe_len so lets make sure
+        * we end up with something bigger than a stripe
+        */
+       calc_size = max_t(u64, calc_size, stripe_len * 4);
 
        do_div(calc_size, stripe_len);
        calc_size *= stripe_len;
@@ -2056,7 +2284,8 @@ again:
 
                if (device->in_fs_metadata && avail >= min_free) {
                        ret = find_free_dev_extent(trans, device,
-                                                  min_free, &dev_offset);
+                                                  min_free, &dev_offset,
+                                                  &max_avail);
                        if (ret == 0) {
                                list_move_tail(&device->dev_alloc_list,
                                               &private_devs);
@@ -2119,9 +2348,9 @@ again:
        em->block_len = em->len;
 
        em_tree = &extent_root->fs_info->mapping_tree.map_tree;
-       spin_lock(&em_tree->lock);
+       write_lock(&em_tree->lock);
        ret = add_extent_mapping(em_tree, em);
-       spin_unlock(&em_tree->lock);
+       write_unlock(&em_tree->lock);
        BUG_ON(ret);
        free_extent_map(em);
 
@@ -2316,12 +2545,17 @@ int btrfs_chunk_readonly(struct btrfs_root *root, u64 chunk_offset)
        int readonly = 0;
        int i;
 
-       spin_lock(&map_tree->map_tree.lock);
+       read_lock(&map_tree->map_tree.lock);
        em = lookup_extent_mapping(&map_tree->map_tree, chunk_offset, 1);
-       spin_unlock(&map_tree->map_tree.lock);
+       read_unlock(&map_tree->map_tree.lock);
        if (!em)
                return 1;
 
+       if (btrfs_test_opt(root, DEGRADED)) {
+               free_extent_map(em);
+               return 0;
+       }
+
        map = (struct map_lookup *)em->bdev;
        for (i = 0; i < map->num_stripes; i++) {
                if (!map->stripes[i].dev->writeable) {
@@ -2343,11 +2577,11 @@ void btrfs_mapping_tree_free(struct btrfs_mapping_tree *tree)
        struct extent_map *em;
 
        while (1) {
-               spin_lock(&tree->map_tree.lock);
+               write_lock(&tree->map_tree.lock);
                em = lookup_extent_mapping(&tree->map_tree, 0, (u64)-1);
                if (em)
                        remove_extent_mapping(&tree->map_tree, em);
-               spin_unlock(&tree->map_tree.lock);
+               write_unlock(&tree->map_tree.lock);
                if (!em)
                        break;
                kfree(em->bdev);
@@ -2365,9 +2599,9 @@ int btrfs_num_copies(struct btrfs_mapping_tree *map_tree, u64 logical, u64 len)
        struct extent_map_tree *em_tree = &map_tree->map_tree;
        int ret;
 
-       spin_lock(&em_tree->lock);
+       read_lock(&em_tree->lock);
        em = lookup_extent_mapping(em_tree, logical, len);
-       spin_unlock(&em_tree->lock);
+       read_unlock(&em_tree->lock);
        BUG_ON(!em);
 
        BUG_ON(em->start > logical || em->start + em->len < logical);
@@ -2429,12 +2663,14 @@ again:
                atomic_set(&multi->error, 0);
        }
 
-       spin_lock(&em_tree->lock);
+       read_lock(&em_tree->lock);
        em = lookup_extent_mapping(em_tree, logical, *length);
-       spin_unlock(&em_tree->lock);
+       read_unlock(&em_tree->lock);
 
-       if (!em && unplug_page)
+       if (!em && unplug_page) {
+               kfree(multi);
                return 0;
+       }
 
        if (!em) {
                printk(KERN_CRIT "unable to find logical %llu len %llu\n",
@@ -2461,7 +2697,7 @@ again:
                        max_errors = 1;
                }
        }
-       if (multi_ret && rw == WRITE &&
+       if (multi_ret && (rw & (1 << BIO_RW)) &&
            stripes_allocated < stripes_required) {
                stripes_allocated = map->num_stripes;
                free_extent_map(em);
@@ -2588,9 +2824,9 @@ int btrfs_rmap_block(struct btrfs_mapping_tree *map_tree,
        u64 stripe_nr;
        int i, j, nr = 0;
 
-       spin_lock(&em_tree->lock);
+       read_lock(&em_tree->lock);
        em = lookup_extent_mapping(em_tree, chunk_start, 1);
-       spin_unlock(&em_tree->lock);
+       read_unlock(&em_tree->lock);
 
        BUG_ON(!em || em->start != chunk_start);
        map = (struct map_lookup *)em->bdev;
@@ -2632,26 +2868,6 @@ int btrfs_rmap_block(struct btrfs_mapping_tree *map_tree,
                }
        }
 
-       for (i = 0; i > nr; i++) {
-               struct btrfs_multi_bio *multi;
-               struct btrfs_bio_stripe *stripe;
-               int ret;
-
-               length = 1;
-               ret = btrfs_map_block(map_tree, WRITE, buf[i],
-                                     &length, &multi, 0);
-               BUG_ON(ret);
-
-               stripe = multi->stripes;
-               for (j = 0; j < multi->num_stripes; j++) {
-                       if (stripe->physical >= physical &&
-                           physical < stripe->physical + length)
-                               break;
-               }
-               BUG_ON(j >= multi->num_stripes);
-               kfree(multi);
-       }
-
        *logical = buf;
        *naddrs = nr;
        *stripe_len = map->stripe_len;
@@ -2726,6 +2942,7 @@ static noinline int schedule_bio(struct btrfs_root *root,
                                 int rw, struct bio *bio)
 {
        int should_queue = 1;
+       struct btrfs_pending_bios *pending_bios;
 
        /* don't bother with additional async steps for reads, right now */
        if (!(rw & (1 << BIO_RW))) {
@@ -2747,13 +2964,17 @@ static noinline int schedule_bio(struct btrfs_root *root,
        bio->bi_rw |= rw;
 
        spin_lock(&device->io_lock);
+       if (bio_rw_flagged(bio, BIO_RW_SYNCIO))
+               pending_bios = &device->pending_sync_bios;
+       else
+               pending_bios = &device->pending_bios;
 
-       if (device->pending_bio_tail)
-               device->pending_bio_tail->bi_next = bio;
+       if (pending_bios->tail)
+               pending_bios->tail->bi_next = bio;
 
-       device->pending_bio_tail = bio;
-       if (!device->pending_bios)
-               device->pending_bios = bio;
+       pending_bios->tail = bio;
+       if (!pending_bios->head)
+               pending_bios->head = bio;
        if (device->running_pending)
                should_queue = 0;
 
@@ -2893,9 +3114,9 @@ static int read_one_chunk(struct btrfs_root *root, struct btrfs_key *key,
        logical = key->offset;
        length = btrfs_chunk_length(leaf, chunk);
 
-       spin_lock(&map_tree->map_tree.lock);
+       read_lock(&map_tree->map_tree.lock);
        em = lookup_extent_mapping(&map_tree->map_tree, logical, 1);
-       spin_unlock(&map_tree->map_tree.lock);
+       read_unlock(&map_tree->map_tree.lock);
 
        /* already mapped? */
        if (em && em->start <= logical && em->start + em->len > logical) {
@@ -2905,10 +3126,6 @@ static int read_one_chunk(struct btrfs_root *root, struct btrfs_key *key,
                free_extent_map(em);
        }
 
-       map = kzalloc(sizeof(*map), GFP_NOFS);
-       if (!map)
-               return -ENOMEM;
-
        em = alloc_extent_map(GFP_NOFS);
        if (!em)
                return -ENOMEM;
@@ -2958,9 +3175,9 @@ static int read_one_chunk(struct btrfs_root *root, struct btrfs_key *key,
                map->stripes[i].dev->in_fs_metadata = 1;
        }
 
-       spin_lock(&map_tree->map_tree.lock);
+       write_lock(&map_tree->map_tree.lock);
        ret = add_extent_mapping(&map_tree->map_tree, em);
-       spin_unlock(&map_tree->map_tree.lock);
+       write_unlock(&map_tree->map_tree.lock);
        BUG_ON(ret);
        free_extent_map(em);
 
@@ -2974,7 +3191,8 @@ static int fill_device_from_item(struct extent_buffer *leaf,
        unsigned long ptr;
 
        device->devid = btrfs_device_id(leaf, dev_item);
-       device->total_bytes = btrfs_device_total_bytes(leaf, dev_item);
+       device->disk_total_bytes = btrfs_device_total_bytes(leaf, dev_item);
+       device->total_bytes = device->disk_total_bytes;
        device->bytes_used = btrfs_device_bytes_used(leaf, dev_item);
        device->type = btrfs_device_type(leaf, dev_item);
        device->io_align = btrfs_device_io_align(leaf, dev_item);
@@ -3117,6 +3335,8 @@ int btrfs_read_sys_array(struct btrfs_root *root)
        if (!sb)
                return -ENOMEM;
        btrfs_set_buffer_uptodate(sb);
+       btrfs_set_buffer_lockdep_class(sb, 0);
+
        write_extent_buffer(sb, super_copy, 0, BTRFS_SUPER_INFO_SIZE);
        array_size = btrfs_super_sys_array_size(super_copy);
 
@@ -3175,6 +3395,8 @@ int btrfs_read_chunk_tree(struct btrfs_root *root)
        key.type = 0;
 again:
        ret = btrfs_search_slot(NULL, root, &key, path, 0, 0);
+       if (ret < 0)
+               goto error;
        while (1) {
                leaf = path->nodes[0];
                slot = path->slots[0];