Btrfs: Avoid unplug storms during commit
[safe/jmp/linux-2.6] / fs / btrfs / volumes.c
1 /*
2  * Copyright (C) 2007 Oracle.  All rights reserved.
3  *
4  * This program is free software; you can redistribute it and/or
5  * modify it under the terms of the GNU General Public
6  * License v2 as published by the Free Software Foundation.
7  *
8  * This program is distributed in the hope that it will be useful,
9  * but WITHOUT ANY WARRANTY; without even the implied warranty of
10  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
11  * General Public License for more details.
12  *
13  * You should have received a copy of the GNU General Public
14  * License along with this program; if not, write to the
15  * Free Software Foundation, Inc., 59 Temple Place - Suite 330,
16  * Boston, MA 021110-1307, USA.
17  */
18 #include <linux/sched.h>
19 #include <linux/bio.h>
20 #include <linux/buffer_head.h>
21 #include <linux/blkdev.h>
22 #include <linux/random.h>
23 #include <asm/div64.h>
24 #include "ctree.h"
25 #include "extent_map.h"
26 #include "disk-io.h"
27 #include "transaction.h"
28 #include "print-tree.h"
29 #include "volumes.h"
30 #include "async-thread.h"
31
32 struct map_lookup {
33         u64 type;
34         int io_align;
35         int io_width;
36         int stripe_len;
37         int sector_size;
38         int num_stripes;
39         int sub_stripes;
40         struct btrfs_bio_stripe stripes[];
41 };
42
43 #define map_lookup_size(n) (sizeof(struct map_lookup) + \
44                             (sizeof(struct btrfs_bio_stripe) * (n)))
45
46 static DEFINE_MUTEX(uuid_mutex);
47 static LIST_HEAD(fs_uuids);
48
49 void btrfs_lock_volumes(void)
50 {
51         mutex_lock(&uuid_mutex);
52 }
53
54 void btrfs_unlock_volumes(void)
55 {
56         mutex_unlock(&uuid_mutex);
57 }
58
59 static void lock_chunks(struct btrfs_root *root)
60 {
61         mutex_lock(&root->fs_info->chunk_mutex);
62 }
63
64 static void unlock_chunks(struct btrfs_root *root)
65 {
66         mutex_unlock(&root->fs_info->chunk_mutex);
67 }
68
69 int btrfs_cleanup_fs_uuids(void)
70 {
71         struct btrfs_fs_devices *fs_devices;
72         struct list_head *uuid_cur;
73         struct list_head *devices_cur;
74         struct btrfs_device *dev;
75
76         list_for_each(uuid_cur, &fs_uuids) {
77                 fs_devices = list_entry(uuid_cur, struct btrfs_fs_devices,
78                                         list);
79                 while(!list_empty(&fs_devices->devices)) {
80                         devices_cur = fs_devices->devices.next;
81                         dev = list_entry(devices_cur, struct btrfs_device,
82                                          dev_list);
83                         if (dev->bdev) {
84                                 close_bdev_excl(dev->bdev);
85                                 fs_devices->open_devices--;
86                         }
87                         list_del(&dev->dev_list);
88                         kfree(dev->name);
89                         kfree(dev);
90                 }
91         }
92         return 0;
93 }
94
95 static noinline struct btrfs_device *__find_device(struct list_head *head,
96                                                    u64 devid, u8 *uuid)
97 {
98         struct btrfs_device *dev;
99         struct list_head *cur;
100
101         list_for_each(cur, head) {
102                 dev = list_entry(cur, struct btrfs_device, dev_list);
103                 if (dev->devid == devid &&
104                     (!uuid || !memcmp(dev->uuid, uuid, BTRFS_UUID_SIZE))) {
105                         return dev;
106                 }
107         }
108         return NULL;
109 }
110
111 static noinline struct btrfs_fs_devices *find_fsid(u8 *fsid)
112 {
113         struct list_head *cur;
114         struct btrfs_fs_devices *fs_devices;
115
116         list_for_each(cur, &fs_uuids) {
117                 fs_devices = list_entry(cur, struct btrfs_fs_devices, list);
118                 if (memcmp(fsid, fs_devices->fsid, BTRFS_FSID_SIZE) == 0)
119                         return fs_devices;
120         }
121         return NULL;
122 }
123
124 /*
125  * we try to collect pending bios for a device so we don't get a large
126  * number of procs sending bios down to the same device.  This greatly
127  * improves the schedulers ability to collect and merge the bios.
128  *
129  * But, it also turns into a long list of bios to process and that is sure
130  * to eventually make the worker thread block.  The solution here is to
131  * make some progress and then put this work struct back at the end of
132  * the list if the block device is congested.  This way, multiple devices
133  * can make progress from a single worker thread.
134  */
135 static int noinline run_scheduled_bios(struct btrfs_device *device)
136 {
137         struct bio *pending;
138         struct backing_dev_info *bdi;
139         struct btrfs_fs_info *fs_info;
140         struct bio *tail;
141         struct bio *cur;
142         int again = 0;
143         unsigned long num_run = 0;
144         unsigned long limit;
145
146         bdi = device->bdev->bd_inode->i_mapping->backing_dev_info;
147         fs_info = device->dev_root->fs_info;
148         limit = btrfs_async_submit_limit(fs_info);
149         limit = limit * 2 / 3;
150
151 loop:
152         spin_lock(&device->io_lock);
153
154         /* take all the bios off the list at once and process them
155          * later on (without the lock held).  But, remember the
156          * tail and other pointers so the bios can be properly reinserted
157          * into the list if we hit congestion
158          */
159         pending = device->pending_bios;
160         tail = device->pending_bio_tail;
161         WARN_ON(pending && !tail);
162         device->pending_bios = NULL;
163         device->pending_bio_tail = NULL;
164
165         /*
166          * if pending was null this time around, no bios need processing
167          * at all and we can stop.  Otherwise it'll loop back up again
168          * and do an additional check so no bios are missed.
169          *
170          * device->running_pending is used to synchronize with the
171          * schedule_bio code.
172          */
173         if (pending) {
174                 again = 1;
175                 device->running_pending = 1;
176         } else {
177                 again = 0;
178                 device->running_pending = 0;
179         }
180         spin_unlock(&device->io_lock);
181
182         while(pending) {
183                 cur = pending;
184                 pending = pending->bi_next;
185                 cur->bi_next = NULL;
186                 atomic_dec(&fs_info->nr_async_bios);
187
188                 if (atomic_read(&fs_info->nr_async_bios) < limit &&
189                     waitqueue_active(&fs_info->async_submit_wait))
190                         wake_up(&fs_info->async_submit_wait);
191
192                 BUG_ON(atomic_read(&cur->bi_cnt) == 0);
193                 bio_get(cur);
194                 submit_bio(cur->bi_rw, cur);
195                 bio_put(cur);
196                 num_run++;
197
198                 /*
199                  * we made progress, there is more work to do and the bdi
200                  * is now congested.  Back off and let other work structs
201                  * run instead
202                  */
203                 if (pending && bdi_write_congested(bdi) &&
204                     fs_info->fs_devices->open_devices > 1) {
205                         struct bio *old_head;
206
207                         spin_lock(&device->io_lock);
208
209                         old_head = device->pending_bios;
210                         device->pending_bios = pending;
211                         if (device->pending_bio_tail)
212                                 tail->bi_next = old_head;
213                         else
214                                 device->pending_bio_tail = tail;
215
216                         spin_unlock(&device->io_lock);
217                         btrfs_requeue_work(&device->work);
218                         goto done;
219                 }
220         }
221         if (again)
222                 goto loop;
223 done:
224         return 0;
225 }
226
227 void pending_bios_fn(struct btrfs_work *work)
228 {
229         struct btrfs_device *device;
230
231         device = container_of(work, struct btrfs_device, work);
232         run_scheduled_bios(device);
233 }
234
235 static noinline int device_list_add(const char *path,
236                            struct btrfs_super_block *disk_super,
237                            u64 devid, struct btrfs_fs_devices **fs_devices_ret)
238 {
239         struct btrfs_device *device;
240         struct btrfs_fs_devices *fs_devices;
241         u64 found_transid = btrfs_super_generation(disk_super);
242
243         fs_devices = find_fsid(disk_super->fsid);
244         if (!fs_devices) {
245                 fs_devices = kzalloc(sizeof(*fs_devices), GFP_NOFS);
246                 if (!fs_devices)
247                         return -ENOMEM;
248                 INIT_LIST_HEAD(&fs_devices->devices);
249                 INIT_LIST_HEAD(&fs_devices->alloc_list);
250                 list_add(&fs_devices->list, &fs_uuids);
251                 memcpy(fs_devices->fsid, disk_super->fsid, BTRFS_FSID_SIZE);
252                 fs_devices->latest_devid = devid;
253                 fs_devices->latest_trans = found_transid;
254                 device = NULL;
255         } else {
256                 device = __find_device(&fs_devices->devices, devid,
257                                        disk_super->dev_item.uuid);
258         }
259         if (!device) {
260                 device = kzalloc(sizeof(*device), GFP_NOFS);
261                 if (!device) {
262                         /* we can safely leave the fs_devices entry around */
263                         return -ENOMEM;
264                 }
265                 device->devid = devid;
266                 device->work.func = pending_bios_fn;
267                 memcpy(device->uuid, disk_super->dev_item.uuid,
268                        BTRFS_UUID_SIZE);
269                 device->barriers = 1;
270                 spin_lock_init(&device->io_lock);
271                 device->name = kstrdup(path, GFP_NOFS);
272                 if (!device->name) {
273                         kfree(device);
274                         return -ENOMEM;
275                 }
276                 list_add(&device->dev_list, &fs_devices->devices);
277                 list_add(&device->dev_alloc_list, &fs_devices->alloc_list);
278                 fs_devices->num_devices++;
279         }
280
281         if (found_transid > fs_devices->latest_trans) {
282                 fs_devices->latest_devid = devid;
283                 fs_devices->latest_trans = found_transid;
284         }
285         *fs_devices_ret = fs_devices;
286         return 0;
287 }
288
289 int btrfs_close_extra_devices(struct btrfs_fs_devices *fs_devices)
290 {
291         struct list_head *head = &fs_devices->devices;
292         struct list_head *cur;
293         struct btrfs_device *device;
294
295         mutex_lock(&uuid_mutex);
296 again:
297         list_for_each(cur, head) {
298                 device = list_entry(cur, struct btrfs_device, dev_list);
299                 if (!device->in_fs_metadata) {
300                         struct block_device *bdev;
301                         list_del(&device->dev_list);
302                         list_del(&device->dev_alloc_list);
303                         fs_devices->num_devices--;
304                         if (device->bdev) {
305                                 bdev = device->bdev;
306                                 fs_devices->open_devices--;
307                                 mutex_unlock(&uuid_mutex);
308                                 close_bdev_excl(bdev);
309                                 mutex_lock(&uuid_mutex);
310                         }
311                         kfree(device->name);
312                         kfree(device);
313                         goto again;
314                 }
315         }
316         mutex_unlock(&uuid_mutex);
317         return 0;
318 }
319
320 int btrfs_close_devices(struct btrfs_fs_devices *fs_devices)
321 {
322         struct list_head *head = &fs_devices->devices;
323         struct list_head *cur;
324         struct btrfs_device *device;
325
326         mutex_lock(&uuid_mutex);
327         list_for_each(cur, head) {
328                 device = list_entry(cur, struct btrfs_device, dev_list);
329                 if (device->bdev) {
330                         close_bdev_excl(device->bdev);
331                         fs_devices->open_devices--;
332                 }
333                 device->bdev = NULL;
334                 device->in_fs_metadata = 0;
335         }
336         fs_devices->mounted = 0;
337         mutex_unlock(&uuid_mutex);
338         return 0;
339 }
340
341 int btrfs_open_devices(struct btrfs_fs_devices *fs_devices,
342                        int flags, void *holder)
343 {
344         struct block_device *bdev;
345         struct list_head *head = &fs_devices->devices;
346         struct list_head *cur;
347         struct btrfs_device *device;
348         struct block_device *latest_bdev = NULL;
349         struct buffer_head *bh;
350         struct btrfs_super_block *disk_super;
351         u64 latest_devid = 0;
352         u64 latest_transid = 0;
353         u64 transid;
354         u64 devid;
355         int ret = 0;
356
357         mutex_lock(&uuid_mutex);
358         if (fs_devices->mounted)
359                 goto out;
360
361         list_for_each(cur, head) {
362                 device = list_entry(cur, struct btrfs_device, dev_list);
363                 if (device->bdev)
364                         continue;
365
366                 if (!device->name)
367                         continue;
368
369                 bdev = open_bdev_excl(device->name, flags, holder);
370
371                 if (IS_ERR(bdev)) {
372                         printk("open %s failed\n", device->name);
373                         goto error;
374                 }
375                 set_blocksize(bdev, 4096);
376
377                 bh = __bread(bdev, BTRFS_SUPER_INFO_OFFSET / 4096, 4096);
378                 if (!bh)
379                         goto error_close;
380
381                 disk_super = (struct btrfs_super_block *)bh->b_data;
382                 if (strncmp((char *)(&disk_super->magic), BTRFS_MAGIC,
383                     sizeof(disk_super->magic)))
384                         goto error_brelse;
385
386                 devid = le64_to_cpu(disk_super->dev_item.devid);
387                 if (devid != device->devid)
388                         goto error_brelse;
389
390                 transid = btrfs_super_generation(disk_super);
391                 if (!latest_transid || transid > latest_transid) {
392                         latest_devid = devid;
393                         latest_transid = transid;
394                         latest_bdev = bdev;
395                 }
396
397                 device->bdev = bdev;
398                 device->in_fs_metadata = 0;
399                 fs_devices->open_devices++;
400                 continue;
401
402 error_brelse:
403                 brelse(bh);
404 error_close:
405                 close_bdev_excl(bdev);
406 error:
407                 continue;
408         }
409         if (fs_devices->open_devices == 0) {
410                 ret = -EIO;
411                 goto out;
412         }
413         fs_devices->mounted = 1;
414         fs_devices->latest_bdev = latest_bdev;
415         fs_devices->latest_devid = latest_devid;
416         fs_devices->latest_trans = latest_transid;
417 out:
418         mutex_unlock(&uuid_mutex);
419         return ret;
420 }
421
422 int btrfs_scan_one_device(const char *path, int flags, void *holder,
423                           struct btrfs_fs_devices **fs_devices_ret)
424 {
425         struct btrfs_super_block *disk_super;
426         struct block_device *bdev;
427         struct buffer_head *bh;
428         int ret;
429         u64 devid;
430         u64 transid;
431
432         mutex_lock(&uuid_mutex);
433
434         bdev = open_bdev_excl(path, flags, holder);
435
436         if (IS_ERR(bdev)) {
437                 ret = PTR_ERR(bdev);
438                 goto error;
439         }
440
441         ret = set_blocksize(bdev, 4096);
442         if (ret)
443                 goto error_close;
444         bh = __bread(bdev, BTRFS_SUPER_INFO_OFFSET / 4096, 4096);
445         if (!bh) {
446                 ret = -EIO;
447                 goto error_close;
448         }
449         disk_super = (struct btrfs_super_block *)bh->b_data;
450         if (strncmp((char *)(&disk_super->magic), BTRFS_MAGIC,
451             sizeof(disk_super->magic))) {
452                 ret = -EINVAL;
453                 goto error_brelse;
454         }
455         devid = le64_to_cpu(disk_super->dev_item.devid);
456         transid = btrfs_super_generation(disk_super);
457         if (disk_super->label[0])
458                 printk("device label %s ", disk_super->label);
459         else {
460                 /* FIXME, make a readl uuid parser */
461                 printk("device fsid %llx-%llx ",
462                        *(unsigned long long *)disk_super->fsid,
463                        *(unsigned long long *)(disk_super->fsid + 8));
464         }
465         printk("devid %Lu transid %Lu %s\n", devid, transid, path);
466         ret = device_list_add(path, disk_super, devid, fs_devices_ret);
467
468 error_brelse:
469         brelse(bh);
470 error_close:
471         close_bdev_excl(bdev);
472 error:
473         mutex_unlock(&uuid_mutex);
474         return ret;
475 }
476
477 /*
478  * this uses a pretty simple search, the expectation is that it is
479  * called very infrequently and that a given device has a small number
480  * of extents
481  */
482 static noinline int find_free_dev_extent(struct btrfs_trans_handle *trans,
483                                          struct btrfs_device *device,
484                                          struct btrfs_path *path,
485                                          u64 num_bytes, u64 *start)
486 {
487         struct btrfs_key key;
488         struct btrfs_root *root = device->dev_root;
489         struct btrfs_dev_extent *dev_extent = NULL;
490         u64 hole_size = 0;
491         u64 last_byte = 0;
492         u64 search_start = 0;
493         u64 search_end = device->total_bytes;
494         int ret;
495         int slot = 0;
496         int start_found;
497         struct extent_buffer *l;
498
499         start_found = 0;
500         path->reada = 2;
501
502         /* FIXME use last free of some kind */
503
504         /* we don't want to overwrite the superblock on the drive,
505          * so we make sure to start at an offset of at least 1MB
506          */
507         search_start = max((u64)1024 * 1024, search_start);
508
509         if (root->fs_info->alloc_start + num_bytes <= device->total_bytes)
510                 search_start = max(root->fs_info->alloc_start, search_start);
511
512         key.objectid = device->devid;
513         key.offset = search_start;
514         key.type = BTRFS_DEV_EXTENT_KEY;
515         ret = btrfs_search_slot(trans, root, &key, path, 0, 0);
516         if (ret < 0)
517                 goto error;
518         ret = btrfs_previous_item(root, path, 0, key.type);
519         if (ret < 0)
520                 goto error;
521         l = path->nodes[0];
522         btrfs_item_key_to_cpu(l, &key, path->slots[0]);
523         while (1) {
524                 l = path->nodes[0];
525                 slot = path->slots[0];
526                 if (slot >= btrfs_header_nritems(l)) {
527                         ret = btrfs_next_leaf(root, path);
528                         if (ret == 0)
529                                 continue;
530                         if (ret < 0)
531                                 goto error;
532 no_more_items:
533                         if (!start_found) {
534                                 if (search_start >= search_end) {
535                                         ret = -ENOSPC;
536                                         goto error;
537                                 }
538                                 *start = search_start;
539                                 start_found = 1;
540                                 goto check_pending;
541                         }
542                         *start = last_byte > search_start ?
543                                 last_byte : search_start;
544                         if (search_end <= *start) {
545                                 ret = -ENOSPC;
546                                 goto error;
547                         }
548                         goto check_pending;
549                 }
550                 btrfs_item_key_to_cpu(l, &key, slot);
551
552                 if (key.objectid < device->devid)
553                         goto next;
554
555                 if (key.objectid > device->devid)
556                         goto no_more_items;
557
558                 if (key.offset >= search_start && key.offset > last_byte &&
559                     start_found) {
560                         if (last_byte < search_start)
561                                 last_byte = search_start;
562                         hole_size = key.offset - last_byte;
563                         if (key.offset > last_byte &&
564                             hole_size >= num_bytes) {
565                                 *start = last_byte;
566                                 goto check_pending;
567                         }
568                 }
569                 if (btrfs_key_type(&key) != BTRFS_DEV_EXTENT_KEY) {
570                         goto next;
571                 }
572
573                 start_found = 1;
574                 dev_extent = btrfs_item_ptr(l, slot, struct btrfs_dev_extent);
575                 last_byte = key.offset + btrfs_dev_extent_length(l, dev_extent);
576 next:
577                 path->slots[0]++;
578                 cond_resched();
579         }
580 check_pending:
581         /* we have to make sure we didn't find an extent that has already
582          * been allocated by the map tree or the original allocation
583          */
584         btrfs_release_path(root, path);
585         BUG_ON(*start < search_start);
586
587         if (*start + num_bytes > search_end) {
588                 ret = -ENOSPC;
589                 goto error;
590         }
591         /* check for pending inserts here */
592         return 0;
593
594 error:
595         btrfs_release_path(root, path);
596         return ret;
597 }
598
599 int btrfs_free_dev_extent(struct btrfs_trans_handle *trans,
600                           struct btrfs_device *device,
601                           u64 start)
602 {
603         int ret;
604         struct btrfs_path *path;
605         struct btrfs_root *root = device->dev_root;
606         struct btrfs_key key;
607         struct btrfs_key found_key;
608         struct extent_buffer *leaf = NULL;
609         struct btrfs_dev_extent *extent = NULL;
610
611         path = btrfs_alloc_path();
612         if (!path)
613                 return -ENOMEM;
614
615         key.objectid = device->devid;
616         key.offset = start;
617         key.type = BTRFS_DEV_EXTENT_KEY;
618
619         ret = btrfs_search_slot(trans, root, &key, path, -1, 1);
620         if (ret > 0) {
621                 ret = btrfs_previous_item(root, path, key.objectid,
622                                           BTRFS_DEV_EXTENT_KEY);
623                 BUG_ON(ret);
624                 leaf = path->nodes[0];
625                 btrfs_item_key_to_cpu(leaf, &found_key, path->slots[0]);
626                 extent = btrfs_item_ptr(leaf, path->slots[0],
627                                         struct btrfs_dev_extent);
628                 BUG_ON(found_key.offset > start || found_key.offset +
629                        btrfs_dev_extent_length(leaf, extent) < start);
630                 ret = 0;
631         } else if (ret == 0) {
632                 leaf = path->nodes[0];
633                 extent = btrfs_item_ptr(leaf, path->slots[0],
634                                         struct btrfs_dev_extent);
635         }
636         BUG_ON(ret);
637
638         if (device->bytes_used > 0)
639                 device->bytes_used -= btrfs_dev_extent_length(leaf, extent);
640         ret = btrfs_del_item(trans, root, path);
641         BUG_ON(ret);
642
643         btrfs_free_path(path);
644         return ret;
645 }
646
647 int noinline btrfs_alloc_dev_extent(struct btrfs_trans_handle *trans,
648                            struct btrfs_device *device,
649                            u64 chunk_tree, u64 chunk_objectid,
650                            u64 chunk_offset,
651                            u64 num_bytes, u64 *start)
652 {
653         int ret;
654         struct btrfs_path *path;
655         struct btrfs_root *root = device->dev_root;
656         struct btrfs_dev_extent *extent;
657         struct extent_buffer *leaf;
658         struct btrfs_key key;
659
660         WARN_ON(!device->in_fs_metadata);
661         path = btrfs_alloc_path();
662         if (!path)
663                 return -ENOMEM;
664
665         ret = find_free_dev_extent(trans, device, path, num_bytes, start);
666         if (ret) {
667                 goto err;
668         }
669
670         key.objectid = device->devid;
671         key.offset = *start;
672         key.type = BTRFS_DEV_EXTENT_KEY;
673         ret = btrfs_insert_empty_item(trans, root, path, &key,
674                                       sizeof(*extent));
675         BUG_ON(ret);
676
677         leaf = path->nodes[0];
678         extent = btrfs_item_ptr(leaf, path->slots[0],
679                                 struct btrfs_dev_extent);
680         btrfs_set_dev_extent_chunk_tree(leaf, extent, chunk_tree);
681         btrfs_set_dev_extent_chunk_objectid(leaf, extent, chunk_objectid);
682         btrfs_set_dev_extent_chunk_offset(leaf, extent, chunk_offset);
683
684         write_extent_buffer(leaf, root->fs_info->chunk_tree_uuid,
685                     (unsigned long)btrfs_dev_extent_chunk_tree_uuid(extent),
686                     BTRFS_UUID_SIZE);
687
688         btrfs_set_dev_extent_length(leaf, extent, num_bytes);
689         btrfs_mark_buffer_dirty(leaf);
690 err:
691         btrfs_free_path(path);
692         return ret;
693 }
694
695 static noinline int find_next_chunk(struct btrfs_root *root,
696                                     u64 objectid, u64 *offset)
697 {
698         struct btrfs_path *path;
699         int ret;
700         struct btrfs_key key;
701         struct btrfs_chunk *chunk;
702         struct btrfs_key found_key;
703
704         path = btrfs_alloc_path();
705         BUG_ON(!path);
706
707         key.objectid = objectid;
708         key.offset = (u64)-1;
709         key.type = BTRFS_CHUNK_ITEM_KEY;
710
711         ret = btrfs_search_slot(NULL, root, &key, path, 0, 0);
712         if (ret < 0)
713                 goto error;
714
715         BUG_ON(ret == 0);
716
717         ret = btrfs_previous_item(root, path, 0, BTRFS_CHUNK_ITEM_KEY);
718         if (ret) {
719                 *offset = 0;
720         } else {
721                 btrfs_item_key_to_cpu(path->nodes[0], &found_key,
722                                       path->slots[0]);
723                 if (found_key.objectid != objectid)
724                         *offset = 0;
725                 else {
726                         chunk = btrfs_item_ptr(path->nodes[0], path->slots[0],
727                                                struct btrfs_chunk);
728                         *offset = found_key.offset +
729                                 btrfs_chunk_length(path->nodes[0], chunk);
730                 }
731         }
732         ret = 0;
733 error:
734         btrfs_free_path(path);
735         return ret;
736 }
737
738 static noinline int find_next_devid(struct btrfs_root *root,
739                                     struct btrfs_path *path, u64 *objectid)
740 {
741         int ret;
742         struct btrfs_key key;
743         struct btrfs_key found_key;
744
745         key.objectid = BTRFS_DEV_ITEMS_OBJECTID;
746         key.type = BTRFS_DEV_ITEM_KEY;
747         key.offset = (u64)-1;
748
749         ret = btrfs_search_slot(NULL, root, &key, path, 0, 0);
750         if (ret < 0)
751                 goto error;
752
753         BUG_ON(ret == 0);
754
755         ret = btrfs_previous_item(root, path, BTRFS_DEV_ITEMS_OBJECTID,
756                                   BTRFS_DEV_ITEM_KEY);
757         if (ret) {
758                 *objectid = 1;
759         } else {
760                 btrfs_item_key_to_cpu(path->nodes[0], &found_key,
761                                       path->slots[0]);
762                 *objectid = found_key.offset + 1;
763         }
764         ret = 0;
765 error:
766         btrfs_release_path(root, path);
767         return ret;
768 }
769
770 /*
771  * the device information is stored in the chunk root
772  * the btrfs_device struct should be fully filled in
773  */
774 int btrfs_add_device(struct btrfs_trans_handle *trans,
775                      struct btrfs_root *root,
776                      struct btrfs_device *device)
777 {
778         int ret;
779         struct btrfs_path *path;
780         struct btrfs_dev_item *dev_item;
781         struct extent_buffer *leaf;
782         struct btrfs_key key;
783         unsigned long ptr;
784         u64 free_devid = 0;
785
786         root = root->fs_info->chunk_root;
787
788         path = btrfs_alloc_path();
789         if (!path)
790                 return -ENOMEM;
791
792         ret = find_next_devid(root, path, &free_devid);
793         if (ret)
794                 goto out;
795
796         key.objectid = BTRFS_DEV_ITEMS_OBJECTID;
797         key.type = BTRFS_DEV_ITEM_KEY;
798         key.offset = free_devid;
799
800         ret = btrfs_insert_empty_item(trans, root, path, &key,
801                                       sizeof(*dev_item));
802         if (ret)
803                 goto out;
804
805         leaf = path->nodes[0];
806         dev_item = btrfs_item_ptr(leaf, path->slots[0], struct btrfs_dev_item);
807
808         device->devid = free_devid;
809         btrfs_set_device_id(leaf, dev_item, device->devid);
810         btrfs_set_device_type(leaf, dev_item, device->type);
811         btrfs_set_device_io_align(leaf, dev_item, device->io_align);
812         btrfs_set_device_io_width(leaf, dev_item, device->io_width);
813         btrfs_set_device_sector_size(leaf, dev_item, device->sector_size);
814         btrfs_set_device_total_bytes(leaf, dev_item, device->total_bytes);
815         btrfs_set_device_bytes_used(leaf, dev_item, device->bytes_used);
816         btrfs_set_device_group(leaf, dev_item, 0);
817         btrfs_set_device_seek_speed(leaf, dev_item, 0);
818         btrfs_set_device_bandwidth(leaf, dev_item, 0);
819
820         ptr = (unsigned long)btrfs_device_uuid(dev_item);
821         write_extent_buffer(leaf, device->uuid, ptr, BTRFS_UUID_SIZE);
822         btrfs_mark_buffer_dirty(leaf);
823         ret = 0;
824
825 out:
826         btrfs_free_path(path);
827         return ret;
828 }
829
830 static int btrfs_rm_dev_item(struct btrfs_root *root,
831                              struct btrfs_device *device)
832 {
833         int ret;
834         struct btrfs_path *path;
835         struct block_device *bdev = device->bdev;
836         struct btrfs_device *next_dev;
837         struct btrfs_key key;
838         u64 total_bytes;
839         struct btrfs_fs_devices *fs_devices;
840         struct btrfs_trans_handle *trans;
841
842         root = root->fs_info->chunk_root;
843
844         path = btrfs_alloc_path();
845         if (!path)
846                 return -ENOMEM;
847
848         trans = btrfs_start_transaction(root, 1);
849         key.objectid = BTRFS_DEV_ITEMS_OBJECTID;
850         key.type = BTRFS_DEV_ITEM_KEY;
851         key.offset = device->devid;
852         lock_chunks(root);
853
854         ret = btrfs_search_slot(trans, root, &key, path, -1, 1);
855         if (ret < 0)
856                 goto out;
857
858         if (ret > 0) {
859                 ret = -ENOENT;
860                 goto out;
861         }
862
863         ret = btrfs_del_item(trans, root, path);
864         if (ret)
865                 goto out;
866
867         /*
868          * at this point, the device is zero sized.  We want to
869          * remove it from the devices list and zero out the old super
870          */
871         list_del_init(&device->dev_list);
872         list_del_init(&device->dev_alloc_list);
873         fs_devices = root->fs_info->fs_devices;
874
875         next_dev = list_entry(fs_devices->devices.next, struct btrfs_device,
876                               dev_list);
877         if (bdev == root->fs_info->sb->s_bdev)
878                 root->fs_info->sb->s_bdev = next_dev->bdev;
879         if (bdev == fs_devices->latest_bdev)
880                 fs_devices->latest_bdev = next_dev->bdev;
881
882         total_bytes = btrfs_super_num_devices(&root->fs_info->super_copy);
883         btrfs_set_super_num_devices(&root->fs_info->super_copy,
884                                     total_bytes - 1);
885 out:
886         btrfs_free_path(path);
887         unlock_chunks(root);
888         btrfs_commit_transaction(trans, root);
889         return ret;
890 }
891
892 int btrfs_rm_device(struct btrfs_root *root, char *device_path)
893 {
894         struct btrfs_device *device;
895         struct block_device *bdev;
896         struct buffer_head *bh = NULL;
897         struct btrfs_super_block *disk_super;
898         u64 all_avail;
899         u64 devid;
900         int ret = 0;
901
902         mutex_lock(&uuid_mutex);
903         mutex_lock(&root->fs_info->volume_mutex);
904
905         all_avail = root->fs_info->avail_data_alloc_bits |
906                 root->fs_info->avail_system_alloc_bits |
907                 root->fs_info->avail_metadata_alloc_bits;
908
909         if ((all_avail & BTRFS_BLOCK_GROUP_RAID10) &&
910             btrfs_super_num_devices(&root->fs_info->super_copy) <= 4) {
911                 printk("btrfs: unable to go below four devices on raid10\n");
912                 ret = -EINVAL;
913                 goto out;
914         }
915
916         if ((all_avail & BTRFS_BLOCK_GROUP_RAID1) &&
917             btrfs_super_num_devices(&root->fs_info->super_copy) <= 2) {
918                 printk("btrfs: unable to go below two devices on raid1\n");
919                 ret = -EINVAL;
920                 goto out;
921         }
922
923         if (strcmp(device_path, "missing") == 0) {
924                 struct list_head *cur;
925                 struct list_head *devices;
926                 struct btrfs_device *tmp;
927
928                 device = NULL;
929                 devices = &root->fs_info->fs_devices->devices;
930                 list_for_each(cur, devices) {
931                         tmp = list_entry(cur, struct btrfs_device, dev_list);
932                         if (tmp->in_fs_metadata && !tmp->bdev) {
933                                 device = tmp;
934                                 break;
935                         }
936                 }
937                 bdev = NULL;
938                 bh = NULL;
939                 disk_super = NULL;
940                 if (!device) {
941                         printk("btrfs: no missing devices found to remove\n");
942                         goto out;
943                 }
944
945         } else {
946                 bdev = open_bdev_excl(device_path, 0,
947                                       root->fs_info->bdev_holder);
948                 if (IS_ERR(bdev)) {
949                         ret = PTR_ERR(bdev);
950                         goto out;
951                 }
952
953                 bh = __bread(bdev, BTRFS_SUPER_INFO_OFFSET / 4096, 4096);
954                 if (!bh) {
955                         ret = -EIO;
956                         goto error_close;
957                 }
958                 disk_super = (struct btrfs_super_block *)bh->b_data;
959                 if (strncmp((char *)(&disk_super->magic), BTRFS_MAGIC,
960                     sizeof(disk_super->magic))) {
961                         ret = -ENOENT;
962                         goto error_brelse;
963                 }
964                 if (memcmp(disk_super->fsid, root->fs_info->fsid,
965                            BTRFS_FSID_SIZE)) {
966                         ret = -ENOENT;
967                         goto error_brelse;
968                 }
969                 devid = le64_to_cpu(disk_super->dev_item.devid);
970                 device = btrfs_find_device(root, devid, NULL);
971                 if (!device) {
972                         ret = -ENOENT;
973                         goto error_brelse;
974                 }
975
976         }
977         root->fs_info->fs_devices->num_devices--;
978         root->fs_info->fs_devices->open_devices--;
979
980         ret = btrfs_shrink_device(device, 0);
981         if (ret)
982                 goto error_brelse;
983
984
985         ret = btrfs_rm_dev_item(root->fs_info->chunk_root, device);
986         if (ret)
987                 goto error_brelse;
988
989         if (bh) {
990                 /* make sure this device isn't detected as part of
991                  * the FS anymore
992                  */
993                 memset(&disk_super->magic, 0, sizeof(disk_super->magic));
994                 set_buffer_dirty(bh);
995                 sync_dirty_buffer(bh);
996
997                 brelse(bh);
998         }
999
1000         if (device->bdev) {
1001                 /* one close for the device struct or super_block */
1002                 close_bdev_excl(device->bdev);
1003         }
1004         if (bdev) {
1005                 /* one close for us */
1006                 close_bdev_excl(bdev);
1007         }
1008         kfree(device->name);
1009         kfree(device);
1010         ret = 0;
1011         goto out;
1012
1013 error_brelse:
1014         brelse(bh);
1015 error_close:
1016         if (bdev)
1017                 close_bdev_excl(bdev);
1018 out:
1019         mutex_unlock(&root->fs_info->volume_mutex);
1020         mutex_unlock(&uuid_mutex);
1021         return ret;
1022 }
1023
1024 int btrfs_init_new_device(struct btrfs_root *root, char *device_path)
1025 {
1026         struct btrfs_trans_handle *trans;
1027         struct btrfs_device *device;
1028         struct block_device *bdev;
1029         struct list_head *cur;
1030         struct list_head *devices;
1031         u64 total_bytes;
1032         int ret = 0;
1033
1034
1035         bdev = open_bdev_excl(device_path, 0, root->fs_info->bdev_holder);
1036         if (!bdev) {
1037                 return -EIO;
1038         }
1039
1040         filemap_write_and_wait(bdev->bd_inode->i_mapping);
1041         mutex_lock(&root->fs_info->volume_mutex);
1042
1043         trans = btrfs_start_transaction(root, 1);
1044         lock_chunks(root);
1045         devices = &root->fs_info->fs_devices->devices;
1046         list_for_each(cur, devices) {
1047                 device = list_entry(cur, struct btrfs_device, dev_list);
1048                 if (device->bdev == bdev) {
1049                         ret = -EEXIST;
1050                         goto out;
1051                 }
1052         }
1053
1054         device = kzalloc(sizeof(*device), GFP_NOFS);
1055         if (!device) {
1056                 /* we can safely leave the fs_devices entry around */
1057                 ret = -ENOMEM;
1058                 goto out_close_bdev;
1059         }
1060
1061         device->barriers = 1;
1062         device->work.func = pending_bios_fn;
1063         generate_random_uuid(device->uuid);
1064         spin_lock_init(&device->io_lock);
1065         device->name = kstrdup(device_path, GFP_NOFS);
1066         if (!device->name) {
1067                 kfree(device);
1068                 goto out_close_bdev;
1069         }
1070         device->io_width = root->sectorsize;
1071         device->io_align = root->sectorsize;
1072         device->sector_size = root->sectorsize;
1073         device->total_bytes = i_size_read(bdev->bd_inode);
1074         device->dev_root = root->fs_info->dev_root;
1075         device->bdev = bdev;
1076         device->in_fs_metadata = 1;
1077
1078         ret = btrfs_add_device(trans, root, device);
1079         if (ret)
1080                 goto out_close_bdev;
1081
1082         set_blocksize(device->bdev, 4096);
1083
1084         total_bytes = btrfs_super_total_bytes(&root->fs_info->super_copy);
1085         btrfs_set_super_total_bytes(&root->fs_info->super_copy,
1086                                     total_bytes + device->total_bytes);
1087
1088         total_bytes = btrfs_super_num_devices(&root->fs_info->super_copy);
1089         btrfs_set_super_num_devices(&root->fs_info->super_copy,
1090                                     total_bytes + 1);
1091
1092         list_add(&device->dev_list, &root->fs_info->fs_devices->devices);
1093         list_add(&device->dev_alloc_list,
1094                  &root->fs_info->fs_devices->alloc_list);
1095         root->fs_info->fs_devices->num_devices++;
1096         root->fs_info->fs_devices->open_devices++;
1097 out:
1098         unlock_chunks(root);
1099         btrfs_end_transaction(trans, root);
1100         mutex_unlock(&root->fs_info->volume_mutex);
1101
1102         return ret;
1103
1104 out_close_bdev:
1105         close_bdev_excl(bdev);
1106         goto out;
1107 }
1108
1109 int noinline btrfs_update_device(struct btrfs_trans_handle *trans,
1110                                  struct btrfs_device *device)
1111 {
1112         int ret;
1113         struct btrfs_path *path;
1114         struct btrfs_root *root;
1115         struct btrfs_dev_item *dev_item;
1116         struct extent_buffer *leaf;
1117         struct btrfs_key key;
1118
1119         root = device->dev_root->fs_info->chunk_root;
1120
1121         path = btrfs_alloc_path();
1122         if (!path)
1123                 return -ENOMEM;
1124
1125         key.objectid = BTRFS_DEV_ITEMS_OBJECTID;
1126         key.type = BTRFS_DEV_ITEM_KEY;
1127         key.offset = device->devid;
1128
1129         ret = btrfs_search_slot(trans, root, &key, path, 0, 1);
1130         if (ret < 0)
1131                 goto out;
1132
1133         if (ret > 0) {
1134                 ret = -ENOENT;
1135                 goto out;
1136         }
1137
1138         leaf = path->nodes[0];
1139         dev_item = btrfs_item_ptr(leaf, path->slots[0], struct btrfs_dev_item);
1140
1141         btrfs_set_device_id(leaf, dev_item, device->devid);
1142         btrfs_set_device_type(leaf, dev_item, device->type);
1143         btrfs_set_device_io_align(leaf, dev_item, device->io_align);
1144         btrfs_set_device_io_width(leaf, dev_item, device->io_width);
1145         btrfs_set_device_sector_size(leaf, dev_item, device->sector_size);
1146         btrfs_set_device_total_bytes(leaf, dev_item, device->total_bytes);
1147         btrfs_set_device_bytes_used(leaf, dev_item, device->bytes_used);
1148         btrfs_mark_buffer_dirty(leaf);
1149
1150 out:
1151         btrfs_free_path(path);
1152         return ret;
1153 }
1154
1155 static int __btrfs_grow_device(struct btrfs_trans_handle *trans,
1156                       struct btrfs_device *device, u64 new_size)
1157 {
1158         struct btrfs_super_block *super_copy =
1159                 &device->dev_root->fs_info->super_copy;
1160         u64 old_total = btrfs_super_total_bytes(super_copy);
1161         u64 diff = new_size - device->total_bytes;
1162
1163         btrfs_set_super_total_bytes(super_copy, old_total + diff);
1164         return btrfs_update_device(trans, device);
1165 }
1166
1167 int btrfs_grow_device(struct btrfs_trans_handle *trans,
1168                       struct btrfs_device *device, u64 new_size)
1169 {
1170         int ret;
1171         lock_chunks(device->dev_root);
1172         ret = __btrfs_grow_device(trans, device, new_size);
1173         unlock_chunks(device->dev_root);
1174         return ret;
1175 }
1176
1177 static int btrfs_free_chunk(struct btrfs_trans_handle *trans,
1178                             struct btrfs_root *root,
1179                             u64 chunk_tree, u64 chunk_objectid,
1180                             u64 chunk_offset)
1181 {
1182         int ret;
1183         struct btrfs_path *path;
1184         struct btrfs_key key;
1185
1186         root = root->fs_info->chunk_root;
1187         path = btrfs_alloc_path();
1188         if (!path)
1189                 return -ENOMEM;
1190
1191         key.objectid = chunk_objectid;
1192         key.offset = chunk_offset;
1193         key.type = BTRFS_CHUNK_ITEM_KEY;
1194
1195         ret = btrfs_search_slot(trans, root, &key, path, -1, 1);
1196         BUG_ON(ret);
1197
1198         ret = btrfs_del_item(trans, root, path);
1199         BUG_ON(ret);
1200
1201         btrfs_free_path(path);
1202         return 0;
1203 }
1204
1205 int btrfs_del_sys_chunk(struct btrfs_root *root, u64 chunk_objectid, u64
1206                         chunk_offset)
1207 {
1208         struct btrfs_super_block *super_copy = &root->fs_info->super_copy;
1209         struct btrfs_disk_key *disk_key;
1210         struct btrfs_chunk *chunk;
1211         u8 *ptr;
1212         int ret = 0;
1213         u32 num_stripes;
1214         u32 array_size;
1215         u32 len = 0;
1216         u32 cur;
1217         struct btrfs_key key;
1218
1219         array_size = btrfs_super_sys_array_size(super_copy);
1220
1221         ptr = super_copy->sys_chunk_array;
1222         cur = 0;
1223
1224         while (cur < array_size) {
1225                 disk_key = (struct btrfs_disk_key *)ptr;
1226                 btrfs_disk_key_to_cpu(&key, disk_key);
1227
1228                 len = sizeof(*disk_key);
1229
1230                 if (key.type == BTRFS_CHUNK_ITEM_KEY) {
1231                         chunk = (struct btrfs_chunk *)(ptr + len);
1232                         num_stripes = btrfs_stack_chunk_num_stripes(chunk);
1233                         len += btrfs_chunk_item_size(num_stripes);
1234                 } else {
1235                         ret = -EIO;
1236                         break;
1237                 }
1238                 if (key.objectid == chunk_objectid &&
1239                     key.offset == chunk_offset) {
1240                         memmove(ptr, ptr + len, array_size - (cur + len));
1241                         array_size -= len;
1242                         btrfs_set_super_sys_array_size(super_copy, array_size);
1243                 } else {
1244                         ptr += len;
1245                         cur += len;
1246                 }
1247         }
1248         return ret;
1249 }
1250
1251
1252 int btrfs_relocate_chunk(struct btrfs_root *root,
1253                          u64 chunk_tree, u64 chunk_objectid,
1254                          u64 chunk_offset)
1255 {
1256         struct extent_map_tree *em_tree;
1257         struct btrfs_root *extent_root;
1258         struct btrfs_trans_handle *trans;
1259         struct extent_map *em;
1260         struct map_lookup *map;
1261         int ret;
1262         int i;
1263
1264         printk("btrfs relocating chunk %llu\n",
1265                (unsigned long long)chunk_offset);
1266         root = root->fs_info->chunk_root;
1267         extent_root = root->fs_info->extent_root;
1268         em_tree = &root->fs_info->mapping_tree.map_tree;
1269
1270         /* step one, relocate all the extents inside this chunk */
1271         ret = btrfs_relocate_block_group(extent_root, chunk_offset);
1272         BUG_ON(ret);
1273
1274         trans = btrfs_start_transaction(root, 1);
1275         BUG_ON(!trans);
1276
1277         lock_chunks(root);
1278
1279         /*
1280          * step two, delete the device extents and the
1281          * chunk tree entries
1282          */
1283         spin_lock(&em_tree->lock);
1284         em = lookup_extent_mapping(em_tree, chunk_offset, 1);
1285         spin_unlock(&em_tree->lock);
1286
1287         BUG_ON(em->start > chunk_offset ||
1288                em->start + em->len < chunk_offset);
1289         map = (struct map_lookup *)em->bdev;
1290
1291         for (i = 0; i < map->num_stripes; i++) {
1292                 ret = btrfs_free_dev_extent(trans, map->stripes[i].dev,
1293                                             map->stripes[i].physical);
1294                 BUG_ON(ret);
1295
1296                 if (map->stripes[i].dev) {
1297                         ret = btrfs_update_device(trans, map->stripes[i].dev);
1298                         BUG_ON(ret);
1299                 }
1300         }
1301         ret = btrfs_free_chunk(trans, root, chunk_tree, chunk_objectid,
1302                                chunk_offset);
1303
1304         BUG_ON(ret);
1305
1306         if (map->type & BTRFS_BLOCK_GROUP_SYSTEM) {
1307                 ret = btrfs_del_sys_chunk(root, chunk_objectid, chunk_offset);
1308                 BUG_ON(ret);
1309         }
1310
1311         ret = btrfs_remove_block_group(trans, extent_root, chunk_offset);
1312         BUG_ON(ret);
1313
1314         spin_lock(&em_tree->lock);
1315         remove_extent_mapping(em_tree, em);
1316         spin_unlock(&em_tree->lock);
1317
1318         kfree(map);
1319         em->bdev = NULL;
1320
1321         /* once for the tree */
1322         free_extent_map(em);
1323         /* once for us */
1324         free_extent_map(em);
1325
1326         unlock_chunks(root);
1327         btrfs_end_transaction(trans, root);
1328         return 0;
1329 }
1330
1331 static u64 div_factor(u64 num, int factor)
1332 {
1333         if (factor == 10)
1334                 return num;
1335         num *= factor;
1336         do_div(num, 10);
1337         return num;
1338 }
1339
1340
1341 int btrfs_balance(struct btrfs_root *dev_root)
1342 {
1343         int ret;
1344         struct list_head *cur;
1345         struct list_head *devices = &dev_root->fs_info->fs_devices->devices;
1346         struct btrfs_device *device;
1347         u64 old_size;
1348         u64 size_to_free;
1349         struct btrfs_path *path;
1350         struct btrfs_key key;
1351         struct btrfs_chunk *chunk;
1352         struct btrfs_root *chunk_root = dev_root->fs_info->chunk_root;
1353         struct btrfs_trans_handle *trans;
1354         struct btrfs_key found_key;
1355
1356
1357         mutex_lock(&dev_root->fs_info->volume_mutex);
1358         dev_root = dev_root->fs_info->dev_root;
1359
1360         /* step one make some room on all the devices */
1361         list_for_each(cur, devices) {
1362                 device = list_entry(cur, struct btrfs_device, dev_list);
1363                 old_size = device->total_bytes;
1364                 size_to_free = div_factor(old_size, 1);
1365                 size_to_free = min(size_to_free, (u64)1 * 1024 * 1024);
1366                 if (device->total_bytes - device->bytes_used > size_to_free)
1367                         continue;
1368
1369                 ret = btrfs_shrink_device(device, old_size - size_to_free);
1370                 BUG_ON(ret);
1371
1372                 trans = btrfs_start_transaction(dev_root, 1);
1373                 BUG_ON(!trans);
1374
1375                 ret = btrfs_grow_device(trans, device, old_size);
1376                 BUG_ON(ret);
1377
1378                 btrfs_end_transaction(trans, dev_root);
1379         }
1380
1381         /* step two, relocate all the chunks */
1382         path = btrfs_alloc_path();
1383         BUG_ON(!path);
1384
1385         key.objectid = BTRFS_FIRST_CHUNK_TREE_OBJECTID;
1386         key.offset = (u64)-1;
1387         key.type = BTRFS_CHUNK_ITEM_KEY;
1388
1389         while(1) {
1390                 ret = btrfs_search_slot(NULL, chunk_root, &key, path, 0, 0);
1391                 if (ret < 0)
1392                         goto error;
1393
1394                 /*
1395                  * this shouldn't happen, it means the last relocate
1396                  * failed
1397                  */
1398                 if (ret == 0)
1399                         break;
1400
1401                 ret = btrfs_previous_item(chunk_root, path, 0,
1402                                           BTRFS_CHUNK_ITEM_KEY);
1403                 if (ret)
1404                         break;
1405
1406                 btrfs_item_key_to_cpu(path->nodes[0], &found_key,
1407                                       path->slots[0]);
1408                 if (found_key.objectid != key.objectid)
1409                         break;
1410
1411                 chunk = btrfs_item_ptr(path->nodes[0],
1412                                        path->slots[0],
1413                                        struct btrfs_chunk);
1414                 key.offset = found_key.offset;
1415                 /* chunk zero is special */
1416                 if (key.offset == 0)
1417                         break;
1418
1419                 btrfs_release_path(chunk_root, path);
1420                 ret = btrfs_relocate_chunk(chunk_root,
1421                                            chunk_root->root_key.objectid,
1422                                            found_key.objectid,
1423                                            found_key.offset);
1424                 BUG_ON(ret);
1425         }
1426         ret = 0;
1427 error:
1428         btrfs_free_path(path);
1429         mutex_unlock(&dev_root->fs_info->volume_mutex);
1430         return ret;
1431 }
1432
1433 /*
1434  * shrinking a device means finding all of the device extents past
1435  * the new size, and then following the back refs to the chunks.
1436  * The chunk relocation code actually frees the device extent
1437  */
1438 int btrfs_shrink_device(struct btrfs_device *device, u64 new_size)
1439 {
1440         struct btrfs_trans_handle *trans;
1441         struct btrfs_root *root = device->dev_root;
1442         struct btrfs_dev_extent *dev_extent = NULL;
1443         struct btrfs_path *path;
1444         u64 length;
1445         u64 chunk_tree;
1446         u64 chunk_objectid;
1447         u64 chunk_offset;
1448         int ret;
1449         int slot;
1450         struct extent_buffer *l;
1451         struct btrfs_key key;
1452         struct btrfs_super_block *super_copy = &root->fs_info->super_copy;
1453         u64 old_total = btrfs_super_total_bytes(super_copy);
1454         u64 diff = device->total_bytes - new_size;
1455
1456
1457         path = btrfs_alloc_path();
1458         if (!path)
1459                 return -ENOMEM;
1460
1461         trans = btrfs_start_transaction(root, 1);
1462         if (!trans) {
1463                 ret = -ENOMEM;
1464                 goto done;
1465         }
1466
1467         path->reada = 2;
1468
1469         lock_chunks(root);
1470
1471         device->total_bytes = new_size;
1472         ret = btrfs_update_device(trans, device);
1473         if (ret) {
1474                 unlock_chunks(root);
1475                 btrfs_end_transaction(trans, root);
1476                 goto done;
1477         }
1478         WARN_ON(diff > old_total);
1479         btrfs_set_super_total_bytes(super_copy, old_total - diff);
1480         unlock_chunks(root);
1481         btrfs_end_transaction(trans, root);
1482
1483         key.objectid = device->devid;
1484         key.offset = (u64)-1;
1485         key.type = BTRFS_DEV_EXTENT_KEY;
1486
1487         while (1) {
1488                 ret = btrfs_search_slot(NULL, root, &key, path, 0, 0);
1489                 if (ret < 0)
1490                         goto done;
1491
1492                 ret = btrfs_previous_item(root, path, 0, key.type);
1493                 if (ret < 0)
1494                         goto done;
1495                 if (ret) {
1496                         ret = 0;
1497                         goto done;
1498                 }
1499
1500                 l = path->nodes[0];
1501                 slot = path->slots[0];
1502                 btrfs_item_key_to_cpu(l, &key, path->slots[0]);
1503
1504                 if (key.objectid != device->devid)
1505                         goto done;
1506
1507                 dev_extent = btrfs_item_ptr(l, slot, struct btrfs_dev_extent);
1508                 length = btrfs_dev_extent_length(l, dev_extent);
1509
1510                 if (key.offset + length <= new_size)
1511                         goto done;
1512
1513                 chunk_tree = btrfs_dev_extent_chunk_tree(l, dev_extent);
1514                 chunk_objectid = btrfs_dev_extent_chunk_objectid(l, dev_extent);
1515                 chunk_offset = btrfs_dev_extent_chunk_offset(l, dev_extent);
1516                 btrfs_release_path(root, path);
1517
1518                 ret = btrfs_relocate_chunk(root, chunk_tree, chunk_objectid,
1519                                            chunk_offset);
1520                 if (ret)
1521                         goto done;
1522         }
1523
1524 done:
1525         btrfs_free_path(path);
1526         return ret;
1527 }
1528
1529 int btrfs_add_system_chunk(struct btrfs_trans_handle *trans,
1530                            struct btrfs_root *root,
1531                            struct btrfs_key *key,
1532                            struct btrfs_chunk *chunk, int item_size)
1533 {
1534         struct btrfs_super_block *super_copy = &root->fs_info->super_copy;
1535         struct btrfs_disk_key disk_key;
1536         u32 array_size;
1537         u8 *ptr;
1538
1539         array_size = btrfs_super_sys_array_size(super_copy);
1540         if (array_size + item_size > BTRFS_SYSTEM_CHUNK_ARRAY_SIZE)
1541                 return -EFBIG;
1542
1543         ptr = super_copy->sys_chunk_array + array_size;
1544         btrfs_cpu_key_to_disk(&disk_key, key);
1545         memcpy(ptr, &disk_key, sizeof(disk_key));
1546         ptr += sizeof(disk_key);
1547         memcpy(ptr, chunk, item_size);
1548         item_size += sizeof(disk_key);
1549         btrfs_set_super_sys_array_size(super_copy, array_size + item_size);
1550         return 0;
1551 }
1552
1553 static u64 noinline chunk_bytes_by_type(u64 type, u64 calc_size,
1554                                         int num_stripes, int sub_stripes)
1555 {
1556         if (type & (BTRFS_BLOCK_GROUP_RAID1 | BTRFS_BLOCK_GROUP_DUP))
1557                 return calc_size;
1558         else if (type & BTRFS_BLOCK_GROUP_RAID10)
1559                 return calc_size * (num_stripes / sub_stripes);
1560         else
1561                 return calc_size * num_stripes;
1562 }
1563
1564
1565 int btrfs_alloc_chunk(struct btrfs_trans_handle *trans,
1566                       struct btrfs_root *extent_root, u64 *start,
1567                       u64 *num_bytes, u64 type)
1568 {
1569         u64 dev_offset;
1570         struct btrfs_fs_info *info = extent_root->fs_info;
1571         struct btrfs_root *chunk_root = extent_root->fs_info->chunk_root;
1572         struct btrfs_path *path;
1573         struct btrfs_stripe *stripes;
1574         struct btrfs_device *device = NULL;
1575         struct btrfs_chunk *chunk;
1576         struct list_head private_devs;
1577         struct list_head *dev_list;
1578         struct list_head *cur;
1579         struct extent_map_tree *em_tree;
1580         struct map_lookup *map;
1581         struct extent_map *em;
1582         int min_stripe_size = 1 * 1024 * 1024;
1583         u64 physical;
1584         u64 calc_size = 1024 * 1024 * 1024;
1585         u64 max_chunk_size = calc_size;
1586         u64 min_free;
1587         u64 avail;
1588         u64 max_avail = 0;
1589         u64 percent_max;
1590         int num_stripes = 1;
1591         int min_stripes = 1;
1592         int sub_stripes = 0;
1593         int looped = 0;
1594         int ret;
1595         int index;
1596         int stripe_len = 64 * 1024;
1597         struct btrfs_key key;
1598
1599         if ((type & BTRFS_BLOCK_GROUP_RAID1) &&
1600             (type & BTRFS_BLOCK_GROUP_DUP)) {
1601                 WARN_ON(1);
1602                 type &= ~BTRFS_BLOCK_GROUP_DUP;
1603         }
1604         dev_list = &extent_root->fs_info->fs_devices->alloc_list;
1605         if (list_empty(dev_list))
1606                 return -ENOSPC;
1607
1608         if (type & (BTRFS_BLOCK_GROUP_RAID0)) {
1609                 num_stripes = extent_root->fs_info->fs_devices->open_devices;
1610                 min_stripes = 2;
1611         }
1612         if (type & (BTRFS_BLOCK_GROUP_DUP)) {
1613                 num_stripes = 2;
1614                 min_stripes = 2;
1615         }
1616         if (type & (BTRFS_BLOCK_GROUP_RAID1)) {
1617                 num_stripes = min_t(u64, 2,
1618                             extent_root->fs_info->fs_devices->open_devices);
1619                 if (num_stripes < 2)
1620                         return -ENOSPC;
1621                 min_stripes = 2;
1622         }
1623         if (type & (BTRFS_BLOCK_GROUP_RAID10)) {
1624                 num_stripes = extent_root->fs_info->fs_devices->open_devices;
1625                 if (num_stripes < 4)
1626                         return -ENOSPC;
1627                 num_stripes &= ~(u32)1;
1628                 sub_stripes = 2;
1629                 min_stripes = 4;
1630         }
1631
1632         if (type & BTRFS_BLOCK_GROUP_DATA) {
1633                 max_chunk_size = 10 * calc_size;
1634                 min_stripe_size = 64 * 1024 * 1024;
1635         } else if (type & BTRFS_BLOCK_GROUP_METADATA) {
1636                 max_chunk_size = 4 * calc_size;
1637                 min_stripe_size = 32 * 1024 * 1024;
1638         } else if (type & BTRFS_BLOCK_GROUP_SYSTEM) {
1639                 calc_size = 8 * 1024 * 1024;
1640                 max_chunk_size = calc_size * 2;
1641                 min_stripe_size = 1 * 1024 * 1024;
1642         }
1643
1644         path = btrfs_alloc_path();
1645         if (!path)
1646                 return -ENOMEM;
1647
1648         /* we don't want a chunk larger than 10% of the FS */
1649         percent_max = div_factor(btrfs_super_total_bytes(&info->super_copy), 1);
1650         max_chunk_size = min(percent_max, max_chunk_size);
1651
1652 again:
1653         if (calc_size * num_stripes > max_chunk_size) {
1654                 calc_size = max_chunk_size;
1655                 do_div(calc_size, num_stripes);
1656                 do_div(calc_size, stripe_len);
1657                 calc_size *= stripe_len;
1658         }
1659         /* we don't want tiny stripes */
1660         calc_size = max_t(u64, min_stripe_size, calc_size);
1661
1662         do_div(calc_size, stripe_len);
1663         calc_size *= stripe_len;
1664
1665         INIT_LIST_HEAD(&private_devs);
1666         cur = dev_list->next;
1667         index = 0;
1668
1669         if (type & BTRFS_BLOCK_GROUP_DUP)
1670                 min_free = calc_size * 2;
1671         else
1672                 min_free = calc_size;
1673
1674         /*
1675          * we add 1MB because we never use the first 1MB of the device, unless
1676          * we've looped, then we are likely allocating the maximum amount of
1677          * space left already
1678          */
1679         if (!looped)
1680                 min_free += 1024 * 1024;
1681
1682         /* build a private list of devices we will allocate from */
1683         while(index < num_stripes) {
1684                 device = list_entry(cur, struct btrfs_device, dev_alloc_list);
1685
1686                 if (device->total_bytes > device->bytes_used)
1687                         avail = device->total_bytes - device->bytes_used;
1688                 else
1689                         avail = 0;
1690                 cur = cur->next;
1691
1692                 if (device->in_fs_metadata && avail >= min_free) {
1693                         u64 ignored_start = 0;
1694                         ret = find_free_dev_extent(trans, device, path,
1695                                                    min_free,
1696                                                    &ignored_start);
1697                         if (ret == 0) {
1698                                 list_move_tail(&device->dev_alloc_list,
1699                                                &private_devs);
1700                                 index++;
1701                                 if (type & BTRFS_BLOCK_GROUP_DUP)
1702                                         index++;
1703                         }
1704                 } else if (device->in_fs_metadata && avail > max_avail)
1705                         max_avail = avail;
1706                 if (cur == dev_list)
1707                         break;
1708         }
1709         if (index < num_stripes) {
1710                 list_splice(&private_devs, dev_list);
1711                 if (index >= min_stripes) {
1712                         num_stripes = index;
1713                         if (type & (BTRFS_BLOCK_GROUP_RAID10)) {
1714                                 num_stripes /= sub_stripes;
1715                                 num_stripes *= sub_stripes;
1716                         }
1717                         looped = 1;
1718                         goto again;
1719                 }
1720                 if (!looped && max_avail > 0) {
1721                         looped = 1;
1722                         calc_size = max_avail;
1723                         goto again;
1724                 }
1725                 btrfs_free_path(path);
1726                 return -ENOSPC;
1727         }
1728         key.objectid = BTRFS_FIRST_CHUNK_TREE_OBJECTID;
1729         key.type = BTRFS_CHUNK_ITEM_KEY;
1730         ret = find_next_chunk(chunk_root, BTRFS_FIRST_CHUNK_TREE_OBJECTID,
1731                               &key.offset);
1732         if (ret) {
1733                 btrfs_free_path(path);
1734                 return ret;
1735         }
1736
1737         chunk = kmalloc(btrfs_chunk_item_size(num_stripes), GFP_NOFS);
1738         if (!chunk) {
1739                 btrfs_free_path(path);
1740                 return -ENOMEM;
1741         }
1742
1743         map = kmalloc(map_lookup_size(num_stripes), GFP_NOFS);
1744         if (!map) {
1745                 kfree(chunk);
1746                 btrfs_free_path(path);
1747                 return -ENOMEM;
1748         }
1749         btrfs_free_path(path);
1750         path = NULL;
1751
1752         stripes = &chunk->stripe;
1753         *num_bytes = chunk_bytes_by_type(type, calc_size,
1754                                          num_stripes, sub_stripes);
1755
1756         index = 0;
1757         while(index < num_stripes) {
1758                 struct btrfs_stripe *stripe;
1759                 BUG_ON(list_empty(&private_devs));
1760                 cur = private_devs.next;
1761                 device = list_entry(cur, struct btrfs_device, dev_alloc_list);
1762
1763                 /* loop over this device again if we're doing a dup group */
1764                 if (!(type & BTRFS_BLOCK_GROUP_DUP) ||
1765                     (index == num_stripes - 1))
1766                         list_move_tail(&device->dev_alloc_list, dev_list);
1767
1768                 ret = btrfs_alloc_dev_extent(trans, device,
1769                              info->chunk_root->root_key.objectid,
1770                              BTRFS_FIRST_CHUNK_TREE_OBJECTID, key.offset,
1771                              calc_size, &dev_offset);
1772                 BUG_ON(ret);
1773                 device->bytes_used += calc_size;
1774                 ret = btrfs_update_device(trans, device);
1775                 BUG_ON(ret);
1776
1777                 map->stripes[index].dev = device;
1778                 map->stripes[index].physical = dev_offset;
1779                 stripe = stripes + index;
1780                 btrfs_set_stack_stripe_devid(stripe, device->devid);
1781                 btrfs_set_stack_stripe_offset(stripe, dev_offset);
1782                 memcpy(stripe->dev_uuid, device->uuid, BTRFS_UUID_SIZE);
1783                 physical = dev_offset;
1784                 index++;
1785         }
1786         BUG_ON(!list_empty(&private_devs));
1787
1788         /* key was set above */
1789         btrfs_set_stack_chunk_length(chunk, *num_bytes);
1790         btrfs_set_stack_chunk_owner(chunk, extent_root->root_key.objectid);
1791         btrfs_set_stack_chunk_stripe_len(chunk, stripe_len);
1792         btrfs_set_stack_chunk_type(chunk, type);
1793         btrfs_set_stack_chunk_num_stripes(chunk, num_stripes);
1794         btrfs_set_stack_chunk_io_align(chunk, stripe_len);
1795         btrfs_set_stack_chunk_io_width(chunk, stripe_len);
1796         btrfs_set_stack_chunk_sector_size(chunk, extent_root->sectorsize);
1797         btrfs_set_stack_chunk_sub_stripes(chunk, sub_stripes);
1798         map->sector_size = extent_root->sectorsize;
1799         map->stripe_len = stripe_len;
1800         map->io_align = stripe_len;
1801         map->io_width = stripe_len;
1802         map->type = type;
1803         map->num_stripes = num_stripes;
1804         map->sub_stripes = sub_stripes;
1805
1806         ret = btrfs_insert_item(trans, chunk_root, &key, chunk,
1807                                 btrfs_chunk_item_size(num_stripes));
1808         BUG_ON(ret);
1809         *start = key.offset;;
1810
1811         em = alloc_extent_map(GFP_NOFS);
1812         if (!em)
1813                 return -ENOMEM;
1814         em->bdev = (struct block_device *)map;
1815         em->start = key.offset;
1816         em->len = *num_bytes;
1817         em->block_start = 0;
1818         em->block_len = em->len;
1819
1820         if (type & BTRFS_BLOCK_GROUP_SYSTEM) {
1821                 ret = btrfs_add_system_chunk(trans, chunk_root, &key,
1822                                     chunk, btrfs_chunk_item_size(num_stripes));
1823                 BUG_ON(ret);
1824         }
1825         kfree(chunk);
1826
1827         em_tree = &extent_root->fs_info->mapping_tree.map_tree;
1828         spin_lock(&em_tree->lock);
1829         ret = add_extent_mapping(em_tree, em);
1830         spin_unlock(&em_tree->lock);
1831         BUG_ON(ret);
1832         free_extent_map(em);
1833         return ret;
1834 }
1835
1836 void btrfs_mapping_init(struct btrfs_mapping_tree *tree)
1837 {
1838         extent_map_tree_init(&tree->map_tree, GFP_NOFS);
1839 }
1840
1841 void btrfs_mapping_tree_free(struct btrfs_mapping_tree *tree)
1842 {
1843         struct extent_map *em;
1844
1845         while(1) {
1846                 spin_lock(&tree->map_tree.lock);
1847                 em = lookup_extent_mapping(&tree->map_tree, 0, (u64)-1);
1848                 if (em)
1849                         remove_extent_mapping(&tree->map_tree, em);
1850                 spin_unlock(&tree->map_tree.lock);
1851                 if (!em)
1852                         break;
1853                 kfree(em->bdev);
1854                 /* once for us */
1855                 free_extent_map(em);
1856                 /* once for the tree */
1857                 free_extent_map(em);
1858         }
1859 }
1860
1861 int btrfs_num_copies(struct btrfs_mapping_tree *map_tree, u64 logical, u64 len)
1862 {
1863         struct extent_map *em;
1864         struct map_lookup *map;
1865         struct extent_map_tree *em_tree = &map_tree->map_tree;
1866         int ret;
1867
1868         spin_lock(&em_tree->lock);
1869         em = lookup_extent_mapping(em_tree, logical, len);
1870         spin_unlock(&em_tree->lock);
1871         BUG_ON(!em);
1872
1873         BUG_ON(em->start > logical || em->start + em->len < logical);
1874         map = (struct map_lookup *)em->bdev;
1875         if (map->type & (BTRFS_BLOCK_GROUP_DUP | BTRFS_BLOCK_GROUP_RAID1))
1876                 ret = map->num_stripes;
1877         else if (map->type & BTRFS_BLOCK_GROUP_RAID10)
1878                 ret = map->sub_stripes;
1879         else
1880                 ret = 1;
1881         free_extent_map(em);
1882         return ret;
1883 }
1884
1885 static int find_live_mirror(struct map_lookup *map, int first, int num,
1886                             int optimal)
1887 {
1888         int i;
1889         if (map->stripes[optimal].dev->bdev)
1890                 return optimal;
1891         for (i = first; i < first + num; i++) {
1892                 if (map->stripes[i].dev->bdev)
1893                         return i;
1894         }
1895         /* we couldn't find one that doesn't fail.  Just return something
1896          * and the io error handling code will clean up eventually
1897          */
1898         return optimal;
1899 }
1900
1901 static int __btrfs_map_block(struct btrfs_mapping_tree *map_tree, int rw,
1902                              u64 logical, u64 *length,
1903                              struct btrfs_multi_bio **multi_ret,
1904                              int mirror_num, struct page *unplug_page)
1905 {
1906         struct extent_map *em;
1907         struct map_lookup *map;
1908         struct extent_map_tree *em_tree = &map_tree->map_tree;
1909         u64 offset;
1910         u64 stripe_offset;
1911         u64 stripe_nr;
1912         int stripes_allocated = 8;
1913         int stripes_required = 1;
1914         int stripe_index;
1915         int i;
1916         int num_stripes;
1917         int max_errors = 0;
1918         struct btrfs_multi_bio *multi = NULL;
1919
1920         if (multi_ret && !(rw & (1 << BIO_RW))) {
1921                 stripes_allocated = 1;
1922         }
1923 again:
1924         if (multi_ret) {
1925                 multi = kzalloc(btrfs_multi_bio_size(stripes_allocated),
1926                                 GFP_NOFS);
1927                 if (!multi)
1928                         return -ENOMEM;
1929
1930                 atomic_set(&multi->error, 0);
1931         }
1932
1933         spin_lock(&em_tree->lock);
1934         em = lookup_extent_mapping(em_tree, logical, *length);
1935         spin_unlock(&em_tree->lock);
1936
1937         if (!em && unplug_page)
1938                 return 0;
1939
1940         if (!em) {
1941                 printk("unable to find logical %Lu len %Lu\n", logical, *length);
1942                 BUG();
1943         }
1944
1945         BUG_ON(em->start > logical || em->start + em->len < logical);
1946         map = (struct map_lookup *)em->bdev;
1947         offset = logical - em->start;
1948
1949         if (mirror_num > map->num_stripes)
1950                 mirror_num = 0;
1951
1952         /* if our multi bio struct is too small, back off and try again */
1953         if (rw & (1 << BIO_RW)) {
1954                 if (map->type & (BTRFS_BLOCK_GROUP_RAID1 |
1955                                  BTRFS_BLOCK_GROUP_DUP)) {
1956                         stripes_required = map->num_stripes;
1957                         max_errors = 1;
1958                 } else if (map->type & BTRFS_BLOCK_GROUP_RAID10) {
1959                         stripes_required = map->sub_stripes;
1960                         max_errors = 1;
1961                 }
1962         }
1963         if (multi_ret && rw == WRITE &&
1964             stripes_allocated < stripes_required) {
1965                 stripes_allocated = map->num_stripes;
1966                 free_extent_map(em);
1967                 kfree(multi);
1968                 goto again;
1969         }
1970         stripe_nr = offset;
1971         /*
1972          * stripe_nr counts the total number of stripes we have to stride
1973          * to get to this block
1974          */
1975         do_div(stripe_nr, map->stripe_len);
1976
1977         stripe_offset = stripe_nr * map->stripe_len;
1978         BUG_ON(offset < stripe_offset);
1979
1980         /* stripe_offset is the offset of this block in its stripe*/
1981         stripe_offset = offset - stripe_offset;
1982
1983         if (map->type & (BTRFS_BLOCK_GROUP_RAID0 | BTRFS_BLOCK_GROUP_RAID1 |
1984                          BTRFS_BLOCK_GROUP_RAID10 |
1985                          BTRFS_BLOCK_GROUP_DUP)) {
1986                 /* we limit the length of each bio to what fits in a stripe */
1987                 *length = min_t(u64, em->len - offset,
1988                               map->stripe_len - stripe_offset);
1989         } else {
1990                 *length = em->len - offset;
1991         }
1992
1993         if (!multi_ret && !unplug_page)
1994                 goto out;
1995
1996         num_stripes = 1;
1997         stripe_index = 0;
1998         if (map->type & BTRFS_BLOCK_GROUP_RAID1) {
1999                 if (unplug_page || (rw & (1 << BIO_RW)))
2000                         num_stripes = map->num_stripes;
2001                 else if (mirror_num)
2002                         stripe_index = mirror_num - 1;
2003                 else {
2004                         stripe_index = find_live_mirror(map, 0,
2005                                             map->num_stripes,
2006                                             current->pid % map->num_stripes);
2007                 }
2008
2009         } else if (map->type & BTRFS_BLOCK_GROUP_DUP) {
2010                 if (rw & (1 << BIO_RW))
2011                         num_stripes = map->num_stripes;
2012                 else if (mirror_num)
2013                         stripe_index = mirror_num - 1;
2014
2015         } else if (map->type & BTRFS_BLOCK_GROUP_RAID10) {
2016                 int factor = map->num_stripes / map->sub_stripes;
2017
2018                 stripe_index = do_div(stripe_nr, factor);
2019                 stripe_index *= map->sub_stripes;
2020
2021                 if (unplug_page || (rw & (1 << BIO_RW)))
2022                         num_stripes = map->sub_stripes;
2023                 else if (mirror_num)
2024                         stripe_index += mirror_num - 1;
2025                 else {
2026                         stripe_index = find_live_mirror(map, stripe_index,
2027                                               map->sub_stripes, stripe_index +
2028                                               current->pid % map->sub_stripes);
2029                 }
2030         } else {
2031                 /*
2032                  * after this do_div call, stripe_nr is the number of stripes
2033                  * on this device we have to walk to find the data, and
2034                  * stripe_index is the number of our device in the stripe array
2035                  */
2036                 stripe_index = do_div(stripe_nr, map->num_stripes);
2037         }
2038         BUG_ON(stripe_index >= map->num_stripes);
2039
2040         for (i = 0; i < num_stripes; i++) {
2041                 if (unplug_page) {
2042                         struct btrfs_device *device;
2043                         struct backing_dev_info *bdi;
2044
2045                         device = map->stripes[stripe_index].dev;
2046                         if (device->bdev) {
2047                                 bdi = blk_get_backing_dev_info(device->bdev);
2048                                 if (bdi->unplug_io_fn) {
2049                                         bdi->unplug_io_fn(bdi, unplug_page);
2050                                 }
2051                         }
2052                 } else {
2053                         multi->stripes[i].physical =
2054                                 map->stripes[stripe_index].physical +
2055                                 stripe_offset + stripe_nr * map->stripe_len;
2056                         multi->stripes[i].dev = map->stripes[stripe_index].dev;
2057                 }
2058                 stripe_index++;
2059         }
2060         if (multi_ret) {
2061                 *multi_ret = multi;
2062                 multi->num_stripes = num_stripes;
2063                 multi->max_errors = max_errors;
2064         }
2065 out:
2066         free_extent_map(em);
2067         return 0;
2068 }
2069
2070 int btrfs_map_block(struct btrfs_mapping_tree *map_tree, int rw,
2071                       u64 logical, u64 *length,
2072                       struct btrfs_multi_bio **multi_ret, int mirror_num)
2073 {
2074         return __btrfs_map_block(map_tree, rw, logical, length, multi_ret,
2075                                  mirror_num, NULL);
2076 }
2077
2078 int btrfs_unplug_page(struct btrfs_mapping_tree *map_tree,
2079                       u64 logical, struct page *page)
2080 {
2081         u64 length = PAGE_CACHE_SIZE;
2082         return __btrfs_map_block(map_tree, READ, logical, &length,
2083                                  NULL, 0, page);
2084 }
2085
2086
2087 static void end_bio_multi_stripe(struct bio *bio, int err)
2088 {
2089         struct btrfs_multi_bio *multi = bio->bi_private;
2090         int is_orig_bio = 0;
2091
2092         if (err)
2093                 atomic_inc(&multi->error);
2094
2095         if (bio == multi->orig_bio)
2096                 is_orig_bio = 1;
2097
2098         if (atomic_dec_and_test(&multi->stripes_pending)) {
2099                 if (!is_orig_bio) {
2100                         bio_put(bio);
2101                         bio = multi->orig_bio;
2102                 }
2103                 bio->bi_private = multi->private;
2104                 bio->bi_end_io = multi->end_io;
2105                 /* only send an error to the higher layers if it is
2106                  * beyond the tolerance of the multi-bio
2107                  */
2108                 if (atomic_read(&multi->error) > multi->max_errors) {
2109                         err = -EIO;
2110                 } else if (err) {
2111                         /*
2112                          * this bio is actually up to date, we didn't
2113                          * go over the max number of errors
2114                          */
2115                         set_bit(BIO_UPTODATE, &bio->bi_flags);
2116                         err = 0;
2117                 }
2118                 kfree(multi);
2119
2120                 bio_endio(bio, err);
2121         } else if (!is_orig_bio) {
2122                 bio_put(bio);
2123         }
2124 }
2125
2126 struct async_sched {
2127         struct bio *bio;
2128         int rw;
2129         struct btrfs_fs_info *info;
2130         struct btrfs_work work;
2131 };
2132
2133 /*
2134  * see run_scheduled_bios for a description of why bios are collected for
2135  * async submit.
2136  *
2137  * This will add one bio to the pending list for a device and make sure
2138  * the work struct is scheduled.
2139  */
2140 static int noinline schedule_bio(struct btrfs_root *root,
2141                                  struct btrfs_device *device,
2142                                  int rw, struct bio *bio)
2143 {
2144         int should_queue = 1;
2145
2146         /* don't bother with additional async steps for reads, right now */
2147         if (!(rw & (1 << BIO_RW))) {
2148                 bio_get(bio);
2149                 submit_bio(rw, bio);
2150                 bio_put(bio);
2151                 return 0;
2152         }
2153
2154         /*
2155          * nr_async_bios allows us to reliably return congestion to the
2156          * higher layers.  Otherwise, the async bio makes it appear we have
2157          * made progress against dirty pages when we've really just put it
2158          * on a queue for later
2159          */
2160         atomic_inc(&root->fs_info->nr_async_bios);
2161         WARN_ON(bio->bi_next);
2162         bio->bi_next = NULL;
2163         bio->bi_rw |= rw;
2164
2165         spin_lock(&device->io_lock);
2166
2167         if (device->pending_bio_tail)
2168                 device->pending_bio_tail->bi_next = bio;
2169
2170         device->pending_bio_tail = bio;
2171         if (!device->pending_bios)
2172                 device->pending_bios = bio;
2173         if (device->running_pending)
2174                 should_queue = 0;
2175
2176         spin_unlock(&device->io_lock);
2177
2178         if (should_queue)
2179                 btrfs_queue_worker(&root->fs_info->submit_workers,
2180                                    &device->work);
2181         return 0;
2182 }
2183
2184 int btrfs_map_bio(struct btrfs_root *root, int rw, struct bio *bio,
2185                   int mirror_num, int async_submit)
2186 {
2187         struct btrfs_mapping_tree *map_tree;
2188         struct btrfs_device *dev;
2189         struct bio *first_bio = bio;
2190         u64 logical = (u64)bio->bi_sector << 9;
2191         u64 length = 0;
2192         u64 map_length;
2193         struct btrfs_multi_bio *multi = NULL;
2194         int ret;
2195         int dev_nr = 0;
2196         int total_devs = 1;
2197
2198         length = bio->bi_size;
2199         map_tree = &root->fs_info->mapping_tree;
2200         map_length = length;
2201
2202         ret = btrfs_map_block(map_tree, rw, logical, &map_length, &multi,
2203                               mirror_num);
2204         BUG_ON(ret);
2205
2206         total_devs = multi->num_stripes;
2207         if (map_length < length) {
2208                 printk("mapping failed logical %Lu bio len %Lu "
2209                        "len %Lu\n", logical, length, map_length);
2210                 BUG();
2211         }
2212         multi->end_io = first_bio->bi_end_io;
2213         multi->private = first_bio->bi_private;
2214         multi->orig_bio = first_bio;
2215         atomic_set(&multi->stripes_pending, multi->num_stripes);
2216
2217         while(dev_nr < total_devs) {
2218                 if (total_devs > 1) {
2219                         if (dev_nr < total_devs - 1) {
2220                                 bio = bio_clone(first_bio, GFP_NOFS);
2221                                 BUG_ON(!bio);
2222                         } else {
2223                                 bio = first_bio;
2224                         }
2225                         bio->bi_private = multi;
2226                         bio->bi_end_io = end_bio_multi_stripe;
2227                 }
2228                 bio->bi_sector = multi->stripes[dev_nr].physical >> 9;
2229                 dev = multi->stripes[dev_nr].dev;
2230                 if (dev && dev->bdev) {
2231                         bio->bi_bdev = dev->bdev;
2232                         if (async_submit)
2233                                 schedule_bio(root, dev, rw, bio);
2234                         else
2235                                 submit_bio(rw, bio);
2236                 } else {
2237                         bio->bi_bdev = root->fs_info->fs_devices->latest_bdev;
2238                         bio->bi_sector = logical >> 9;
2239                         bio_endio(bio, -EIO);
2240                 }
2241                 dev_nr++;
2242         }
2243         if (total_devs == 1)
2244                 kfree(multi);
2245         return 0;
2246 }
2247
2248 struct btrfs_device *btrfs_find_device(struct btrfs_root *root, u64 devid,
2249                                        u8 *uuid)
2250 {
2251         struct list_head *head = &root->fs_info->fs_devices->devices;
2252
2253         return __find_device(head, devid, uuid);
2254 }
2255
2256 static struct btrfs_device *add_missing_dev(struct btrfs_root *root,
2257                                             u64 devid, u8 *dev_uuid)
2258 {
2259         struct btrfs_device *device;
2260         struct btrfs_fs_devices *fs_devices = root->fs_info->fs_devices;
2261
2262         device = kzalloc(sizeof(*device), GFP_NOFS);
2263         list_add(&device->dev_list,
2264                  &fs_devices->devices);
2265         list_add(&device->dev_alloc_list,
2266                  &fs_devices->alloc_list);
2267         device->barriers = 1;
2268         device->dev_root = root->fs_info->dev_root;
2269         device->devid = devid;
2270         device->work.func = pending_bios_fn;
2271         fs_devices->num_devices++;
2272         spin_lock_init(&device->io_lock);
2273         memcpy(device->uuid, dev_uuid, BTRFS_UUID_SIZE);
2274         return device;
2275 }
2276
2277
2278 static int read_one_chunk(struct btrfs_root *root, struct btrfs_key *key,
2279                           struct extent_buffer *leaf,
2280                           struct btrfs_chunk *chunk)
2281 {
2282         struct btrfs_mapping_tree *map_tree = &root->fs_info->mapping_tree;
2283         struct map_lookup *map;
2284         struct extent_map *em;
2285         u64 logical;
2286         u64 length;
2287         u64 devid;
2288         u8 uuid[BTRFS_UUID_SIZE];
2289         int num_stripes;
2290         int ret;
2291         int i;
2292
2293         logical = key->offset;
2294         length = btrfs_chunk_length(leaf, chunk);
2295
2296         spin_lock(&map_tree->map_tree.lock);
2297         em = lookup_extent_mapping(&map_tree->map_tree, logical, 1);
2298         spin_unlock(&map_tree->map_tree.lock);
2299
2300         /* already mapped? */
2301         if (em && em->start <= logical && em->start + em->len > logical) {
2302                 free_extent_map(em);
2303                 return 0;
2304         } else if (em) {
2305                 free_extent_map(em);
2306         }
2307
2308         map = kzalloc(sizeof(*map), GFP_NOFS);
2309         if (!map)
2310                 return -ENOMEM;
2311
2312         em = alloc_extent_map(GFP_NOFS);
2313         if (!em)
2314                 return -ENOMEM;
2315         num_stripes = btrfs_chunk_num_stripes(leaf, chunk);
2316         map = kmalloc(map_lookup_size(num_stripes), GFP_NOFS);
2317         if (!map) {
2318                 free_extent_map(em);
2319                 return -ENOMEM;
2320         }
2321
2322         em->bdev = (struct block_device *)map;
2323         em->start = logical;
2324         em->len = length;
2325         em->block_start = 0;
2326         em->block_len = em->len;
2327
2328         map->num_stripes = num_stripes;
2329         map->io_width = btrfs_chunk_io_width(leaf, chunk);
2330         map->io_align = btrfs_chunk_io_align(leaf, chunk);
2331         map->sector_size = btrfs_chunk_sector_size(leaf, chunk);
2332         map->stripe_len = btrfs_chunk_stripe_len(leaf, chunk);
2333         map->type = btrfs_chunk_type(leaf, chunk);
2334         map->sub_stripes = btrfs_chunk_sub_stripes(leaf, chunk);
2335         for (i = 0; i < num_stripes; i++) {
2336                 map->stripes[i].physical =
2337                         btrfs_stripe_offset_nr(leaf, chunk, i);
2338                 devid = btrfs_stripe_devid_nr(leaf, chunk, i);
2339                 read_extent_buffer(leaf, uuid, (unsigned long)
2340                                    btrfs_stripe_dev_uuid_nr(chunk, i),
2341                                    BTRFS_UUID_SIZE);
2342                 map->stripes[i].dev = btrfs_find_device(root, devid, uuid);
2343
2344                 if (!map->stripes[i].dev && !btrfs_test_opt(root, DEGRADED)) {
2345                         kfree(map);
2346                         free_extent_map(em);
2347                         return -EIO;
2348                 }
2349                 if (!map->stripes[i].dev) {
2350                         map->stripes[i].dev =
2351                                 add_missing_dev(root, devid, uuid);
2352                         if (!map->stripes[i].dev) {
2353                                 kfree(map);
2354                                 free_extent_map(em);
2355                                 return -EIO;
2356                         }
2357                 }
2358                 map->stripes[i].dev->in_fs_metadata = 1;
2359         }
2360
2361         spin_lock(&map_tree->map_tree.lock);
2362         ret = add_extent_mapping(&map_tree->map_tree, em);
2363         spin_unlock(&map_tree->map_tree.lock);
2364         BUG_ON(ret);
2365         free_extent_map(em);
2366
2367         return 0;
2368 }
2369
2370 static int fill_device_from_item(struct extent_buffer *leaf,
2371                                  struct btrfs_dev_item *dev_item,
2372                                  struct btrfs_device *device)
2373 {
2374         unsigned long ptr;
2375
2376         device->devid = btrfs_device_id(leaf, dev_item);
2377         device->total_bytes = btrfs_device_total_bytes(leaf, dev_item);
2378         device->bytes_used = btrfs_device_bytes_used(leaf, dev_item);
2379         device->type = btrfs_device_type(leaf, dev_item);
2380         device->io_align = btrfs_device_io_align(leaf, dev_item);
2381         device->io_width = btrfs_device_io_width(leaf, dev_item);
2382         device->sector_size = btrfs_device_sector_size(leaf, dev_item);
2383
2384         ptr = (unsigned long)btrfs_device_uuid(dev_item);
2385         read_extent_buffer(leaf, device->uuid, ptr, BTRFS_UUID_SIZE);
2386
2387         return 0;
2388 }
2389
2390 static int read_one_dev(struct btrfs_root *root,
2391                         struct extent_buffer *leaf,
2392                         struct btrfs_dev_item *dev_item)
2393 {
2394         struct btrfs_device *device;
2395         u64 devid;
2396         int ret;
2397         u8 dev_uuid[BTRFS_UUID_SIZE];
2398
2399         devid = btrfs_device_id(leaf, dev_item);
2400         read_extent_buffer(leaf, dev_uuid,
2401                            (unsigned long)btrfs_device_uuid(dev_item),
2402                            BTRFS_UUID_SIZE);
2403         device = btrfs_find_device(root, devid, dev_uuid);
2404         if (!device) {
2405                 printk("warning devid %Lu missing\n", devid);
2406                 device = add_missing_dev(root, devid, dev_uuid);
2407                 if (!device)
2408                         return -ENOMEM;
2409         }
2410
2411         fill_device_from_item(leaf, dev_item, device);
2412         device->dev_root = root->fs_info->dev_root;
2413         device->in_fs_metadata = 1;
2414         ret = 0;
2415 #if 0
2416         ret = btrfs_open_device(device);
2417         if (ret) {
2418                 kfree(device);
2419         }
2420 #endif
2421         return ret;
2422 }
2423
2424 int btrfs_read_super_device(struct btrfs_root *root, struct extent_buffer *buf)
2425 {
2426         struct btrfs_dev_item *dev_item;
2427
2428         dev_item = (struct btrfs_dev_item *)offsetof(struct btrfs_super_block,
2429                                                      dev_item);
2430         return read_one_dev(root, buf, dev_item);
2431 }
2432
2433 int btrfs_read_sys_array(struct btrfs_root *root)
2434 {
2435         struct btrfs_super_block *super_copy = &root->fs_info->super_copy;
2436         struct extent_buffer *sb;
2437         struct btrfs_disk_key *disk_key;
2438         struct btrfs_chunk *chunk;
2439         u8 *ptr;
2440         unsigned long sb_ptr;
2441         int ret = 0;
2442         u32 num_stripes;
2443         u32 array_size;
2444         u32 len = 0;
2445         u32 cur;
2446         struct btrfs_key key;
2447
2448         sb = btrfs_find_create_tree_block(root, BTRFS_SUPER_INFO_OFFSET,
2449                                           BTRFS_SUPER_INFO_SIZE);
2450         if (!sb)
2451                 return -ENOMEM;
2452         btrfs_set_buffer_uptodate(sb);
2453         write_extent_buffer(sb, super_copy, 0, BTRFS_SUPER_INFO_SIZE);
2454         array_size = btrfs_super_sys_array_size(super_copy);
2455
2456         ptr = super_copy->sys_chunk_array;
2457         sb_ptr = offsetof(struct btrfs_super_block, sys_chunk_array);
2458         cur = 0;
2459
2460         while (cur < array_size) {
2461                 disk_key = (struct btrfs_disk_key *)ptr;
2462                 btrfs_disk_key_to_cpu(&key, disk_key);
2463
2464                 len = sizeof(*disk_key); ptr += len;
2465                 sb_ptr += len;
2466                 cur += len;
2467
2468                 if (key.type == BTRFS_CHUNK_ITEM_KEY) {
2469                         chunk = (struct btrfs_chunk *)sb_ptr;
2470                         ret = read_one_chunk(root, &key, sb, chunk);
2471                         if (ret)
2472                                 break;
2473                         num_stripes = btrfs_chunk_num_stripes(sb, chunk);
2474                         len = btrfs_chunk_item_size(num_stripes);
2475                 } else {
2476                         ret = -EIO;
2477                         break;
2478                 }
2479                 ptr += len;
2480                 sb_ptr += len;
2481                 cur += len;
2482         }
2483         free_extent_buffer(sb);
2484         return ret;
2485 }
2486
2487 int btrfs_read_chunk_tree(struct btrfs_root *root)
2488 {
2489         struct btrfs_path *path;
2490         struct extent_buffer *leaf;
2491         struct btrfs_key key;
2492         struct btrfs_key found_key;
2493         int ret;
2494         int slot;
2495
2496         root = root->fs_info->chunk_root;
2497
2498         path = btrfs_alloc_path();
2499         if (!path)
2500                 return -ENOMEM;
2501
2502         /* first we search for all of the device items, and then we
2503          * read in all of the chunk items.  This way we can create chunk
2504          * mappings that reference all of the devices that are afound
2505          */
2506         key.objectid = BTRFS_DEV_ITEMS_OBJECTID;
2507         key.offset = 0;
2508         key.type = 0;
2509 again:
2510         ret = btrfs_search_slot(NULL, root, &key, path, 0, 0);
2511         while(1) {
2512                 leaf = path->nodes[0];
2513                 slot = path->slots[0];
2514                 if (slot >= btrfs_header_nritems(leaf)) {
2515                         ret = btrfs_next_leaf(root, path);
2516                         if (ret == 0)
2517                                 continue;
2518                         if (ret < 0)
2519                                 goto error;
2520                         break;
2521                 }
2522                 btrfs_item_key_to_cpu(leaf, &found_key, slot);
2523                 if (key.objectid == BTRFS_DEV_ITEMS_OBJECTID) {
2524                         if (found_key.objectid != BTRFS_DEV_ITEMS_OBJECTID)
2525                                 break;
2526                         if (found_key.type == BTRFS_DEV_ITEM_KEY) {
2527                                 struct btrfs_dev_item *dev_item;
2528                                 dev_item = btrfs_item_ptr(leaf, slot,
2529                                                   struct btrfs_dev_item);
2530                                 ret = read_one_dev(root, leaf, dev_item);
2531                                 BUG_ON(ret);
2532                         }
2533                 } else if (found_key.type == BTRFS_CHUNK_ITEM_KEY) {
2534                         struct btrfs_chunk *chunk;
2535                         chunk = btrfs_item_ptr(leaf, slot, struct btrfs_chunk);
2536                         ret = read_one_chunk(root, &found_key, leaf, chunk);
2537                 }
2538                 path->slots[0]++;
2539         }
2540         if (key.objectid == BTRFS_DEV_ITEMS_OBJECTID) {
2541                 key.objectid = 0;
2542                 btrfs_release_path(root, path);
2543                 goto again;
2544         }
2545
2546         btrfs_free_path(path);
2547         ret = 0;
2548 error:
2549         return ret;
2550 }