Merge branch 'for-linus' of git://git.kernel.dk/linux-2.6-block
[safe/jmp/linux-2.6] / fs / btrfs / volumes.c
index a6d35b0..7eda483 100644 (file)
@@ -161,8 +161,10 @@ static noinline int run_scheduled_bios(struct btrfs_device *device)
        int again = 0;
        unsigned long num_run;
        unsigned long num_sync_run;
+       unsigned long batch_run = 0;
        unsigned long limit;
        unsigned long last_waited = 0;
+       int force_reg = 0;
 
        bdi = blk_get_backing_dev_info(device->bdev);
        fs_info = device->dev_root->fs_info;
@@ -176,19 +178,22 @@ static noinline int run_scheduled_bios(struct btrfs_device *device)
 
 loop:
        spin_lock(&device->io_lock);
-       num_run = 0;
 
 loop_lock:
+       num_run = 0;
 
        /* take all the bios off the list at once and process them
         * later on (without the lock held).  But, remember the
         * tail and other pointers so the bios can be properly reinserted
         * into the list if we hit congestion
         */
-       if (device->pending_sync_bios.head)
+       if (!force_reg && device->pending_sync_bios.head) {
                pending_bios = &device->pending_sync_bios;
-       else
+               force_reg = 1;
+       } else {
                pending_bios = &device->pending_bios;
+               force_reg = 0;
+       }
 
        pending = pending_bios->head;
        tail = pending_bios->tail;
@@ -228,10 +233,14 @@ loop_lock:
        while (pending) {
 
                rmb();
-               if (pending_bios != &device->pending_sync_bios &&
-                   device->pending_sync_bios.head &&
-                   num_run > 16) {
-                       cond_resched();
+               /* we want to work on both lists, but do more bios on the
+                * sync list than the regular list
+                */
+               if ((num_run > 32 &&
+                   pending_bios != &device->pending_sync_bios &&
+                   device->pending_sync_bios.head) ||
+                  (num_run > 64 && pending_bios == &device->pending_sync_bios &&
+                   device->pending_bios.head)) {
                        spin_lock(&device->io_lock);
                        requeue_list(pending_bios, pending, tail);
                        goto loop_lock;
@@ -249,7 +258,9 @@ loop_lock:
                BUG_ON(atomic_read(&cur->bi_cnt) == 0);
                submit_bio(cur->bi_rw, cur);
                num_run++;
-               if (bio_sync(cur))
+               batch_run++;
+
+               if (bio_rw_flagged(cur, BIO_RW_SYNCIO))
                        num_sync_run++;
 
                if (need_resched()) {
@@ -265,7 +276,7 @@ loop_lock:
                 * is now congested.  Back off and let other work structs
                 * run instead
                 */
-               if (pending && bdi_write_congested(bdi) && num_run > 16 &&
+               if (pending && bdi_write_congested(bdi) && batch_run > 8 &&
                    fs_info->fs_devices->open_devices > 1) {
                        struct io_context *ioc;
 
@@ -366,6 +377,7 @@ static noinline int device_list_add(const char *path,
                memcpy(fs_devices->fsid, disk_super->fsid, BTRFS_FSID_SIZE);
                fs_devices->latest_devid = devid;
                fs_devices->latest_trans = found_transid;
+               mutex_init(&fs_devices->device_list_mutex);
                device = NULL;
        } else {
                device = __find_device(&fs_devices->devices, devid,
@@ -392,7 +404,11 @@ static noinline int device_list_add(const char *path,
                        return -ENOMEM;
                }
                INIT_LIST_HEAD(&device->dev_alloc_list);
+
+               mutex_lock(&fs_devices->device_list_mutex);
                list_add(&device->dev_list, &fs_devices->devices);
+               mutex_unlock(&fs_devices->device_list_mutex);
+
                device->fs_devices = fs_devices;
                fs_devices->num_devices++;
        }
@@ -418,18 +434,22 @@ static struct btrfs_fs_devices *clone_fs_devices(struct btrfs_fs_devices *orig)
        INIT_LIST_HEAD(&fs_devices->devices);
        INIT_LIST_HEAD(&fs_devices->alloc_list);
        INIT_LIST_HEAD(&fs_devices->list);
+       mutex_init(&fs_devices->device_list_mutex);
        fs_devices->latest_devid = orig->latest_devid;
        fs_devices->latest_trans = orig->latest_trans;
        memcpy(fs_devices->fsid, orig->fsid, sizeof(fs_devices->fsid));
 
+       mutex_lock(&orig->device_list_mutex);
        list_for_each_entry(orig_dev, &orig->devices, dev_list) {
                device = kzalloc(sizeof(*device), GFP_NOFS);
                if (!device)
                        goto error;
 
                device->name = kstrdup(orig_dev->name, GFP_NOFS);
-               if (!device->name)
+               if (!device->name) {
+                       kfree(device);
                        goto error;
+               }
 
                device->devid = orig_dev->devid;
                device->work.func = pending_bios_fn;
@@ -443,8 +463,10 @@ static struct btrfs_fs_devices *clone_fs_devices(struct btrfs_fs_devices *orig)
                device->fs_devices = fs_devices;
                fs_devices->num_devices++;
        }
+       mutex_unlock(&orig->device_list_mutex);
        return fs_devices;
 error:
+       mutex_unlock(&orig->device_list_mutex);
        free_fs_devices(fs_devices);
        return ERR_PTR(-ENOMEM);
 }
@@ -455,6 +477,7 @@ int btrfs_close_extra_devices(struct btrfs_fs_devices *fs_devices)
 
        mutex_lock(&uuid_mutex);
 again:
+       mutex_lock(&fs_devices->device_list_mutex);
        list_for_each_entry_safe(device, next, &fs_devices->devices, dev_list) {
                if (device->in_fs_metadata)
                        continue;
@@ -474,6 +497,7 @@ again:
                kfree(device->name);
                kfree(device);
        }
+       mutex_unlock(&fs_devices->device_list_mutex);
 
        if (fs_devices->seed) {
                fs_devices = fs_devices->seed;
@@ -594,6 +618,9 @@ static int __btrfs_open_devices(struct btrfs_fs_devices *fs_devices,
                device->in_fs_metadata = 0;
                device->mode = flags;
 
+               if (!blk_queue_nonrot(bdev_get_queue(bdev)))
+                       fs_devices->rotating = 1;
+
                fs_devices->open_devices++;
                if (device->writeable) {
                        fs_devices->rw_devices++;
@@ -694,9 +721,9 @@ error:
  * called very infrequently and that a given device has a small number
  * of extents
  */
-static noinline int find_free_dev_extent(struct btrfs_trans_handle *trans,
-                                        struct btrfs_device *device,
-                                        u64 num_bytes, u64 *start)
+int find_free_dev_extent(struct btrfs_trans_handle *trans,
+                        struct btrfs_device *device, u64 num_bytes,
+                        u64 *start, u64 *max_avail)
 {
        struct btrfs_key key;
        struct btrfs_root *root = device->dev_root;
@@ -733,9 +760,13 @@ static noinline int find_free_dev_extent(struct btrfs_trans_handle *trans,
        ret = btrfs_search_slot(trans, root, &key, path, 0, 0);
        if (ret < 0)
                goto error;
-       ret = btrfs_previous_item(root, path, 0, key.type);
-       if (ret < 0)
-               goto error;
+       if (ret > 0) {
+               ret = btrfs_previous_item(root, path, key.objectid, key.type);
+               if (ret < 0)
+                       goto error;
+               if (ret > 0)
+                       start_found = 1;
+       }
        l = path->nodes[0];
        btrfs_item_key_to_cpu(l, &key, path->slots[0]);
        while (1) {
@@ -778,6 +809,10 @@ no_more_items:
                        if (last_byte < search_start)
                                last_byte = search_start;
                        hole_size = key.offset - last_byte;
+
+                       if (hole_size > *max_avail)
+                               *max_avail = hole_size;
+
                        if (key.offset > last_byte &&
                            hole_size >= num_bytes) {
                                *start = last_byte;
@@ -1121,12 +1156,14 @@ int btrfs_rm_device(struct btrfs_root *root, char *device_path)
 
                device = NULL;
                devices = &root->fs_info->fs_devices->devices;
+               mutex_lock(&root->fs_info->fs_devices->device_list_mutex);
                list_for_each_entry(tmp, devices, dev_list) {
                        if (tmp->in_fs_metadata && !tmp->bdev) {
                                device = tmp;
                                break;
                        }
                }
+               mutex_unlock(&root->fs_info->fs_devices->device_list_mutex);
                bdev = NULL;
                bh = NULL;
                disk_super = NULL;
@@ -1181,7 +1218,16 @@ int btrfs_rm_device(struct btrfs_root *root, char *device_path)
                goto error_brelse;
 
        device->in_fs_metadata = 0;
+
+       /*
+        * the device list mutex makes sure that we don't change
+        * the device list while someone else is writing out all
+        * the device supers.
+        */
+       mutex_lock(&root->fs_info->fs_devices->device_list_mutex);
        list_del_init(&device->dev_list);
+       mutex_unlock(&root->fs_info->fs_devices->device_list_mutex);
+
        device->fs_devices->num_devices--;
 
        next_device = list_entry(root->fs_info->fs_devices->devices.next,
@@ -1275,6 +1321,7 @@ static int btrfs_prepare_sprout(struct btrfs_trans_handle *trans,
        seed_devices->opened = 1;
        INIT_LIST_HEAD(&seed_devices->devices);
        INIT_LIST_HEAD(&seed_devices->alloc_list);
+       mutex_init(&seed_devices->device_list_mutex);
        list_splice_init(&fs_devices->devices, &seed_devices->devices);
        list_splice_init(&fs_devices->alloc_list, &seed_devices->alloc_list);
        list_for_each_entry(device, &seed_devices->devices, dev_list) {
@@ -1400,6 +1447,10 @@ int btrfs_init_new_device(struct btrfs_root *root, char *device_path)
        mutex_lock(&root->fs_info->volume_mutex);
 
        devices = &root->fs_info->fs_devices->devices;
+       /*
+        * we have the volume lock, so we don't need the extra
+        * device list mutex while reading the list here.
+        */
        list_for_each_entry(device, devices, dev_list) {
                if (device->bdev == bdev) {
                        ret = -EEXIST;
@@ -1454,6 +1505,12 @@ int btrfs_init_new_device(struct btrfs_root *root, char *device_path)
        }
 
        device->fs_devices = root->fs_info->fs_devices;
+
+       /*
+        * we don't want write_supers to jump in here with our device
+        * half setup
+        */
+       mutex_lock(&root->fs_info->fs_devices->device_list_mutex);
        list_add(&device->dev_list, &root->fs_info->fs_devices->devices);
        list_add(&device->dev_alloc_list,
                 &root->fs_info->fs_devices->alloc_list);
@@ -1462,6 +1519,9 @@ int btrfs_init_new_device(struct btrfs_root *root, char *device_path)
        root->fs_info->fs_devices->rw_devices++;
        root->fs_info->fs_devices->total_rw_bytes += device->total_bytes;
 
+       if (!blk_queue_nonrot(bdev_get_queue(bdev)))
+               root->fs_info->fs_devices->rotating = 1;
+
        total_bytes = btrfs_super_total_bytes(&root->fs_info->super_copy);
        btrfs_set_super_total_bytes(&root->fs_info->super_copy,
                                    total_bytes + device->total_bytes);
@@ -1469,6 +1529,7 @@ int btrfs_init_new_device(struct btrfs_root *root, char *device_path)
        total_bytes = btrfs_super_num_devices(&root->fs_info->super_copy);
        btrfs_set_super_num_devices(&root->fs_info->super_copy,
                                    total_bytes + 1);
+       mutex_unlock(&root->fs_info->fs_devices->device_list_mutex);
 
        if (seeding_dev) {
                ret = init_first_rw_device(trans, root, device);
@@ -1570,6 +1631,7 @@ static int __btrfs_grow_device(struct btrfs_trans_handle *trans,
        device->fs_devices->total_rw_bytes += diff;
 
        device->total_bytes = new_size;
+       device->disk_total_bytes = new_size;
        btrfs_clear_space_info_full(device->dev_root->fs_info);
 
        return btrfs_update_device(trans, device);
@@ -1671,12 +1733,14 @@ static int btrfs_relocate_chunk(struct btrfs_root *root,
        int ret;
        int i;
 
-       printk(KERN_INFO "btrfs relocating chunk %llu\n",
-              (unsigned long long)chunk_offset);
        root = root->fs_info->chunk_root;
        extent_root = root->fs_info->extent_root;
        em_tree = &root->fs_info->mapping_tree.map_tree;
 
+       ret = btrfs_can_relocate(extent_root, chunk_offset);
+       if (ret)
+               return -ENOSPC;
+
        /* step one, relocate all the extents inside this chunk */
        ret = btrfs_relocate_block_group(extent_root, chunk_offset);
        BUG_ON(ret);
@@ -1690,9 +1754,9 @@ static int btrfs_relocate_chunk(struct btrfs_root *root,
         * step two, delete the device extents and the
         * chunk tree entries
         */
-       spin_lock(&em_tree->lock);
+       read_lock(&em_tree->lock);
        em = lookup_extent_mapping(em_tree, chunk_offset, 1);
-       spin_unlock(&em_tree->lock);
+       read_unlock(&em_tree->lock);
 
        BUG_ON(em->start > chunk_offset ||
               em->start + em->len < chunk_offset);
@@ -1721,9 +1785,9 @@ static int btrfs_relocate_chunk(struct btrfs_root *root,
        ret = btrfs_remove_block_group(trans, extent_root, chunk_offset);
        BUG_ON(ret);
 
-       spin_lock(&em_tree->lock);
+       write_lock(&em_tree->lock);
        remove_extent_mapping(em_tree, em);
-       spin_unlock(&em_tree->lock);
+       write_unlock(&em_tree->lock);
 
        kfree(map);
        em->bdev = NULL;
@@ -1748,12 +1812,15 @@ static int btrfs_relocate_sys_chunks(struct btrfs_root *root)
        struct btrfs_key found_key;
        u64 chunk_tree = chunk_root->root_key.objectid;
        u64 chunk_type;
+       bool retried = false;
+       int failed = 0;
        int ret;
 
        path = btrfs_alloc_path();
        if (!path)
                return -ENOMEM;
 
+again:
        key.objectid = BTRFS_FIRST_CHUNK_TREE_OBJECTID;
        key.offset = (u64)-1;
        key.type = BTRFS_CHUNK_ITEM_KEY;
@@ -1783,7 +1850,10 @@ static int btrfs_relocate_sys_chunks(struct btrfs_root *root)
                        ret = btrfs_relocate_chunk(chunk_root, chunk_tree,
                                                   found_key.objectid,
                                                   found_key.offset);
-                       BUG_ON(ret);
+                       if (ret == -ENOSPC)
+                               failed++;
+                       else if (ret)
+                               BUG();
                }
 
                if (found_key.offset == 0)
@@ -1791,6 +1861,14 @@ static int btrfs_relocate_sys_chunks(struct btrfs_root *root)
                key.offset = found_key.offset - 1;
        }
        ret = 0;
+       if (failed && !retried) {
+               failed = 0;
+               retried = true;
+               goto again;
+       } else if (failed && retried) {
+               WARN_ON(1);
+               ret = -ENOSPC;
+       }
 error:
        btrfs_free_path(path);
        return ret;
@@ -1835,6 +1913,8 @@ int btrfs_balance(struct btrfs_root *dev_root)
                        continue;
 
                ret = btrfs_shrink_device(device, old_size - size_to_free);
+               if (ret == -ENOSPC)
+                       break;
                BUG_ON(ret);
 
                trans = btrfs_start_transaction(dev_root, 1);
@@ -1879,9 +1959,8 @@ int btrfs_balance(struct btrfs_root *dev_root)
                chunk = btrfs_item_ptr(path->nodes[0],
                                       path->slots[0],
                                       struct btrfs_chunk);
-               key.offset = found_key.offset;
                /* chunk zero is special */
-               if (key.offset == 0)
+               if (found_key.offset == 0)
                        break;
 
                btrfs_release_path(chunk_root, path);
@@ -1889,7 +1968,8 @@ int btrfs_balance(struct btrfs_root *dev_root)
                                           chunk_root->root_key.objectid,
                                           found_key.objectid,
                                           found_key.offset);
-               BUG_ON(ret);
+               BUG_ON(ret && ret != -ENOSPC);
+               key.offset = found_key.offset - 1;
        }
        ret = 0;
 error:
@@ -1915,10 +1995,13 @@ int btrfs_shrink_device(struct btrfs_device *device, u64 new_size)
        u64 chunk_offset;
        int ret;
        int slot;
+       int failed = 0;
+       bool retried = false;
        struct extent_buffer *l;
        struct btrfs_key key;
        struct btrfs_super_block *super_copy = &root->fs_info->super_copy;
        u64 old_total = btrfs_super_total_bytes(super_copy);
+       u64 old_size = device->total_bytes;
        u64 diff = device->total_bytes - new_size;
 
        if (new_size >= device->total_bytes)
@@ -1928,12 +2011,6 @@ int btrfs_shrink_device(struct btrfs_device *device, u64 new_size)
        if (!path)
                return -ENOMEM;
 
-       trans = btrfs_start_transaction(root, 1);
-       if (!trans) {
-               ret = -ENOMEM;
-               goto done;
-       }
-
        path->reada = 2;
 
        lock_chunks(root);
@@ -1942,8 +2019,8 @@ int btrfs_shrink_device(struct btrfs_device *device, u64 new_size)
        if (device->writeable)
                device->fs_devices->total_rw_bytes -= diff;
        unlock_chunks(root);
-       btrfs_end_transaction(trans, root);
 
+again:
        key.objectid = device->devid;
        key.offset = (u64)-1;
        key.type = BTRFS_DEV_EXTENT_KEY;
@@ -1958,21 +2035,26 @@ int btrfs_shrink_device(struct btrfs_device *device, u64 new_size)
                        goto done;
                if (ret) {
                        ret = 0;
-                       goto done;
+                       btrfs_release_path(root, path);
+                       break;
                }
 
                l = path->nodes[0];
                slot = path->slots[0];
                btrfs_item_key_to_cpu(l, &key, path->slots[0]);
 
-               if (key.objectid != device->devid)
-                       goto done;
+               if (key.objectid != device->devid) {
+                       btrfs_release_path(root, path);
+                       break;
+               }
 
                dev_extent = btrfs_item_ptr(l, slot, struct btrfs_dev_extent);
                length = btrfs_dev_extent_length(l, dev_extent);
 
-               if (key.offset + length <= new_size)
+               if (key.offset + length <= new_size) {
+                       btrfs_release_path(root, path);
                        break;
+               }
 
                chunk_tree = btrfs_dev_extent_chunk_tree(l, dev_extent);
                chunk_objectid = btrfs_dev_extent_chunk_objectid(l, dev_extent);
@@ -1981,8 +2063,26 @@ int btrfs_shrink_device(struct btrfs_device *device, u64 new_size)
 
                ret = btrfs_relocate_chunk(root, chunk_tree, chunk_objectid,
                                           chunk_offset);
-               if (ret)
+               if (ret && ret != -ENOSPC)
                        goto done;
+               if (ret == -ENOSPC)
+                       failed++;
+               key.offset -= 1;
+       }
+
+       if (failed && !retried) {
+               failed = 0;
+               retried = true;
+               goto again;
+       } else if (failed && retried) {
+               ret = -ENOSPC;
+               lock_chunks(root);
+
+               device->total_bytes = old_size;
+               if (device->writeable)
+                       device->fs_devices->total_rw_bytes += diff;
+               unlock_chunks(root);
+               goto done;
        }
 
        /* Shrinking succeeded, else we would be at "done". */
@@ -2122,6 +2222,7 @@ static int __btrfs_alloc_chunk(struct btrfs_trans_handle *trans,
                             max_chunk_size);
 
 again:
+       max_avail = 0;
        if (!map || map->num_stripes != num_stripes) {
                kfree(map);
                map = kmalloc(map_lookup_size(num_stripes), GFP_NOFS);
@@ -2170,7 +2271,8 @@ again:
 
                if (device->in_fs_metadata && avail >= min_free) {
                        ret = find_free_dev_extent(trans, device,
-                                                  min_free, &dev_offset);
+                                                  min_free, &dev_offset,
+                                                  &max_avail);
                        if (ret == 0) {
                                list_move_tail(&device->dev_alloc_list,
                                               &private_devs);
@@ -2233,9 +2335,9 @@ again:
        em->block_len = em->len;
 
        em_tree = &extent_root->fs_info->mapping_tree.map_tree;
-       spin_lock(&em_tree->lock);
+       write_lock(&em_tree->lock);
        ret = add_extent_mapping(em_tree, em);
-       spin_unlock(&em_tree->lock);
+       write_unlock(&em_tree->lock);
        BUG_ON(ret);
        free_extent_map(em);
 
@@ -2430,9 +2532,9 @@ int btrfs_chunk_readonly(struct btrfs_root *root, u64 chunk_offset)
        int readonly = 0;
        int i;
 
-       spin_lock(&map_tree->map_tree.lock);
+       read_lock(&map_tree->map_tree.lock);
        em = lookup_extent_mapping(&map_tree->map_tree, chunk_offset, 1);
-       spin_unlock(&map_tree->map_tree.lock);
+       read_unlock(&map_tree->map_tree.lock);
        if (!em)
                return 1;
 
@@ -2457,11 +2559,11 @@ void btrfs_mapping_tree_free(struct btrfs_mapping_tree *tree)
        struct extent_map *em;
 
        while (1) {
-               spin_lock(&tree->map_tree.lock);
+               write_lock(&tree->map_tree.lock);
                em = lookup_extent_mapping(&tree->map_tree, 0, (u64)-1);
                if (em)
                        remove_extent_mapping(&tree->map_tree, em);
-               spin_unlock(&tree->map_tree.lock);
+               write_unlock(&tree->map_tree.lock);
                if (!em)
                        break;
                kfree(em->bdev);
@@ -2479,9 +2581,9 @@ int btrfs_num_copies(struct btrfs_mapping_tree *map_tree, u64 logical, u64 len)
        struct extent_map_tree *em_tree = &map_tree->map_tree;
        int ret;
 
-       spin_lock(&em_tree->lock);
+       read_lock(&em_tree->lock);
        em = lookup_extent_mapping(em_tree, logical, len);
-       spin_unlock(&em_tree->lock);
+       read_unlock(&em_tree->lock);
        BUG_ON(!em);
 
        BUG_ON(em->start > logical || em->start + em->len < logical);
@@ -2543,9 +2645,9 @@ again:
                atomic_set(&multi->error, 0);
        }
 
-       spin_lock(&em_tree->lock);
+       read_lock(&em_tree->lock);
        em = lookup_extent_mapping(em_tree, logical, *length);
-       spin_unlock(&em_tree->lock);
+       read_unlock(&em_tree->lock);
 
        if (!em && unplug_page)
                return 0;
@@ -2702,9 +2804,9 @@ int btrfs_rmap_block(struct btrfs_mapping_tree *map_tree,
        u64 stripe_nr;
        int i, j, nr = 0;
 
-       spin_lock(&em_tree->lock);
+       read_lock(&em_tree->lock);
        em = lookup_extent_mapping(em_tree, chunk_start, 1);
-       spin_unlock(&em_tree->lock);
+       read_unlock(&em_tree->lock);
 
        BUG_ON(!em || em->start != chunk_start);
        map = (struct map_lookup *)em->bdev;
@@ -2746,26 +2848,6 @@ int btrfs_rmap_block(struct btrfs_mapping_tree *map_tree,
                }
        }
 
-       for (i = 0; i > nr; i++) {
-               struct btrfs_multi_bio *multi;
-               struct btrfs_bio_stripe *stripe;
-               int ret;
-
-               length = 1;
-               ret = btrfs_map_block(map_tree, WRITE, buf[i],
-                                     &length, &multi, 0);
-               BUG_ON(ret);
-
-               stripe = multi->stripes;
-               for (j = 0; j < multi->num_stripes; j++) {
-                       if (stripe->physical >= physical &&
-                           physical < stripe->physical + length)
-                               break;
-               }
-               BUG_ON(j >= multi->num_stripes);
-               kfree(multi);
-       }
-
        *logical = buf;
        *naddrs = nr;
        *stripe_len = map->stripe_len;
@@ -2862,7 +2944,7 @@ static noinline int schedule_bio(struct btrfs_root *root,
        bio->bi_rw |= rw;
 
        spin_lock(&device->io_lock);
-       if (bio_sync(bio))
+       if (bio_rw_flagged(bio, BIO_RW_SYNCIO))
                pending_bios = &device->pending_sync_bios;
        else
                pending_bios = &device->pending_bios;
@@ -3012,9 +3094,9 @@ static int read_one_chunk(struct btrfs_root *root, struct btrfs_key *key,
        logical = key->offset;
        length = btrfs_chunk_length(leaf, chunk);
 
-       spin_lock(&map_tree->map_tree.lock);
+       read_lock(&map_tree->map_tree.lock);
        em = lookup_extent_mapping(&map_tree->map_tree, logical, 1);
-       spin_unlock(&map_tree->map_tree.lock);
+       read_unlock(&map_tree->map_tree.lock);
 
        /* already mapped? */
        if (em && em->start <= logical && em->start + em->len > logical) {
@@ -3073,9 +3155,9 @@ static int read_one_chunk(struct btrfs_root *root, struct btrfs_key *key,
                map->stripes[i].dev->in_fs_metadata = 1;
        }
 
-       spin_lock(&map_tree->map_tree.lock);
+       write_lock(&map_tree->map_tree.lock);
        ret = add_extent_mapping(&map_tree->map_tree, em);
-       spin_unlock(&map_tree->map_tree.lock);
+       write_unlock(&map_tree->map_tree.lock);
        BUG_ON(ret);
        free_extent_map(em);