Btrfs: Throttle for async bio submits higher up the chain
[safe/jmp/linux-2.6] / fs / btrfs / volumes.c
1 /*
2  * Copyright (C) 2007 Oracle.  All rights reserved.
3  *
4  * This program is free software; you can redistribute it and/or
5  * modify it under the terms of the GNU General Public
6  * License v2 as published by the Free Software Foundation.
7  *
8  * This program is distributed in the hope that it will be useful,
9  * but WITHOUT ANY WARRANTY; without even the implied warranty of
10  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
11  * General Public License for more details.
12  *
13  * You should have received a copy of the GNU General Public
14  * License along with this program; if not, write to the
15  * Free Software Foundation, Inc., 59 Temple Place - Suite 330,
16  * Boston, MA 021110-1307, USA.
17  */
18 #include <linux/sched.h>
19 #include <linux/bio.h>
20 #include <linux/buffer_head.h>
21 #include <linux/blkdev.h>
22 #include <linux/random.h>
23 #include <asm/div64.h>
24 #include "ctree.h"
25 #include "extent_map.h"
26 #include "disk-io.h"
27 #include "transaction.h"
28 #include "print-tree.h"
29 #include "volumes.h"
30 #include "async-thread.h"
31
32 struct map_lookup {
33         u64 type;
34         int io_align;
35         int io_width;
36         int stripe_len;
37         int sector_size;
38         int num_stripes;
39         int sub_stripes;
40         struct btrfs_bio_stripe stripes[];
41 };
42
43 #define map_lookup_size(n) (sizeof(struct map_lookup) + \
44                             (sizeof(struct btrfs_bio_stripe) * (n)))
45
46 static DEFINE_MUTEX(uuid_mutex);
47 static LIST_HEAD(fs_uuids);
48
49 void btrfs_lock_volumes(void)
50 {
51         mutex_lock(&uuid_mutex);
52 }
53
54 void btrfs_unlock_volumes(void)
55 {
56         mutex_unlock(&uuid_mutex);
57 }
58
59 static void lock_chunks(struct btrfs_root *root)
60 {
61         mutex_lock(&root->fs_info->alloc_mutex);
62         mutex_lock(&root->fs_info->chunk_mutex);
63 }
64
65 static void unlock_chunks(struct btrfs_root *root)
66 {
67         mutex_unlock(&root->fs_info->alloc_mutex);
68         mutex_unlock(&root->fs_info->chunk_mutex);
69 }
70
71 int btrfs_cleanup_fs_uuids(void)
72 {
73         struct btrfs_fs_devices *fs_devices;
74         struct list_head *uuid_cur;
75         struct list_head *devices_cur;
76         struct btrfs_device *dev;
77
78         list_for_each(uuid_cur, &fs_uuids) {
79                 fs_devices = list_entry(uuid_cur, struct btrfs_fs_devices,
80                                         list);
81                 while(!list_empty(&fs_devices->devices)) {
82                         devices_cur = fs_devices->devices.next;
83                         dev = list_entry(devices_cur, struct btrfs_device,
84                                          dev_list);
85                         if (dev->bdev) {
86                                 close_bdev_excl(dev->bdev);
87                                 fs_devices->open_devices--;
88                         }
89                         list_del(&dev->dev_list);
90                         kfree(dev->name);
91                         kfree(dev);
92                 }
93         }
94         return 0;
95 }
96
97 static struct btrfs_device *__find_device(struct list_head *head, u64 devid,
98                                           u8 *uuid)
99 {
100         struct btrfs_device *dev;
101         struct list_head *cur;
102
103         list_for_each(cur, head) {
104                 dev = list_entry(cur, struct btrfs_device, dev_list);
105                 if (dev->devid == devid &&
106                     (!uuid || !memcmp(dev->uuid, uuid, BTRFS_UUID_SIZE))) {
107                         return dev;
108                 }
109         }
110         return NULL;
111 }
112
113 static struct btrfs_fs_devices *find_fsid(u8 *fsid)
114 {
115         struct list_head *cur;
116         struct btrfs_fs_devices *fs_devices;
117
118         list_for_each(cur, &fs_uuids) {
119                 fs_devices = list_entry(cur, struct btrfs_fs_devices, list);
120                 if (memcmp(fsid, fs_devices->fsid, BTRFS_FSID_SIZE) == 0)
121                         return fs_devices;
122         }
123         return NULL;
124 }
125
126 /*
127  * we try to collect pending bios for a device so we don't get a large
128  * number of procs sending bios down to the same device.  This greatly
129  * improves the schedulers ability to collect and merge the bios.
130  *
131  * But, it also turns into a long list of bios to process and that is sure
132  * to eventually make the worker thread block.  The solution here is to
133  * make some progress and then put this work struct back at the end of
134  * the list if the block device is congested.  This way, multiple devices
135  * can make progress from a single worker thread.
136  */
137 int run_scheduled_bios(struct btrfs_device *device)
138 {
139         struct bio *pending;
140         struct backing_dev_info *bdi;
141         struct btrfs_fs_info *fs_info;
142         struct bio *tail;
143         struct bio *cur;
144         int again = 0;
145         unsigned long num_run = 0;
146         unsigned long limit;
147
148         bdi = device->bdev->bd_inode->i_mapping->backing_dev_info;
149         fs_info = device->dev_root->fs_info;
150         limit = btrfs_async_submit_limit(fs_info);
151         limit = limit * 2 / 3;
152
153 loop:
154         spin_lock(&device->io_lock);
155
156         /* take all the bios off the list at once and process them
157          * later on (without the lock held).  But, remember the
158          * tail and other pointers so the bios can be properly reinserted
159          * into the list if we hit congestion
160          */
161         pending = device->pending_bios;
162         tail = device->pending_bio_tail;
163         WARN_ON(pending && !tail);
164         device->pending_bios = NULL;
165         device->pending_bio_tail = NULL;
166
167         /*
168          * if pending was null this time around, no bios need processing
169          * at all and we can stop.  Otherwise it'll loop back up again
170          * and do an additional check so no bios are missed.
171          *
172          * device->running_pending is used to synchronize with the
173          * schedule_bio code.
174          */
175         if (pending) {
176                 again = 1;
177                 device->running_pending = 1;
178         } else {
179                 again = 0;
180                 device->running_pending = 0;
181         }
182         spin_unlock(&device->io_lock);
183
184         while(pending) {
185                 cur = pending;
186                 pending = pending->bi_next;
187                 cur->bi_next = NULL;
188                 atomic_dec(&fs_info->nr_async_bios);
189
190                 if (atomic_read(&fs_info->nr_async_bios) < limit &&
191                     waitqueue_active(&fs_info->async_submit_wait))
192                         wake_up(&fs_info->async_submit_wait);
193
194                 BUG_ON(atomic_read(&cur->bi_cnt) == 0);
195                 bio_get(cur);
196                 submit_bio(cur->bi_rw, cur);
197                 bio_put(cur);
198                 num_run++;
199
200                 /*
201                  * we made progress, there is more work to do and the bdi
202                  * is now congested.  Back off and let other work structs
203                  * run instead
204                  */
205                 if (pending && bdi_write_congested(bdi)) {
206                         struct bio *old_head;
207
208                         spin_lock(&device->io_lock);
209
210                         old_head = device->pending_bios;
211                         device->pending_bios = pending;
212                         if (device->pending_bio_tail)
213                                 tail->bi_next = old_head;
214                         else
215                                 device->pending_bio_tail = tail;
216
217                         spin_unlock(&device->io_lock);
218                         btrfs_requeue_work(&device->work);
219                         goto done;
220                 }
221         }
222         if (again)
223                 goto loop;
224 done:
225         return 0;
226 }
227
228 void pending_bios_fn(struct btrfs_work *work)
229 {
230         struct btrfs_device *device;
231
232         device = container_of(work, struct btrfs_device, work);
233         run_scheduled_bios(device);
234 }
235
236 static int device_list_add(const char *path,
237                            struct btrfs_super_block *disk_super,
238                            u64 devid, struct btrfs_fs_devices **fs_devices_ret)
239 {
240         struct btrfs_device *device;
241         struct btrfs_fs_devices *fs_devices;
242         u64 found_transid = btrfs_super_generation(disk_super);
243
244         fs_devices = find_fsid(disk_super->fsid);
245         if (!fs_devices) {
246                 fs_devices = kzalloc(sizeof(*fs_devices), GFP_NOFS);
247                 if (!fs_devices)
248                         return -ENOMEM;
249                 INIT_LIST_HEAD(&fs_devices->devices);
250                 INIT_LIST_HEAD(&fs_devices->alloc_list);
251                 list_add(&fs_devices->list, &fs_uuids);
252                 memcpy(fs_devices->fsid, disk_super->fsid, BTRFS_FSID_SIZE);
253                 fs_devices->latest_devid = devid;
254                 fs_devices->latest_trans = found_transid;
255                 device = NULL;
256         } else {
257                 device = __find_device(&fs_devices->devices, devid,
258                                        disk_super->dev_item.uuid);
259         }
260         if (!device) {
261                 device = kzalloc(sizeof(*device), GFP_NOFS);
262                 if (!device) {
263                         /* we can safely leave the fs_devices entry around */
264                         return -ENOMEM;
265                 }
266                 device->devid = devid;
267                 device->work.func = pending_bios_fn;
268                 memcpy(device->uuid, disk_super->dev_item.uuid,
269                        BTRFS_UUID_SIZE);
270                 device->barriers = 1;
271                 spin_lock_init(&device->io_lock);
272                 device->name = kstrdup(path, GFP_NOFS);
273                 if (!device->name) {
274                         kfree(device);
275                         return -ENOMEM;
276                 }
277                 list_add(&device->dev_list, &fs_devices->devices);
278                 list_add(&device->dev_alloc_list, &fs_devices->alloc_list);
279                 fs_devices->num_devices++;
280         }
281
282         if (found_transid > fs_devices->latest_trans) {
283                 fs_devices->latest_devid = devid;
284                 fs_devices->latest_trans = found_transid;
285         }
286         *fs_devices_ret = fs_devices;
287         return 0;
288 }
289
290 int btrfs_close_extra_devices(struct btrfs_fs_devices *fs_devices)
291 {
292         struct list_head *head = &fs_devices->devices;
293         struct list_head *cur;
294         struct btrfs_device *device;
295
296         mutex_lock(&uuid_mutex);
297 again:
298         list_for_each(cur, head) {
299                 device = list_entry(cur, struct btrfs_device, dev_list);
300                 if (!device->in_fs_metadata) {
301                         struct block_device *bdev;
302                         list_del(&device->dev_list);
303                         list_del(&device->dev_alloc_list);
304                         fs_devices->num_devices--;
305                         if (device->bdev) {
306                                 bdev = device->bdev;
307                                 fs_devices->open_devices--;
308                                 mutex_unlock(&uuid_mutex);
309                                 close_bdev_excl(bdev);
310                                 mutex_lock(&uuid_mutex);
311                         }
312                         kfree(device->name);
313                         kfree(device);
314                         goto again;
315                 }
316         }
317         mutex_unlock(&uuid_mutex);
318         return 0;
319 }
320
321 int btrfs_close_devices(struct btrfs_fs_devices *fs_devices)
322 {
323         struct list_head *head = &fs_devices->devices;
324         struct list_head *cur;
325         struct btrfs_device *device;
326
327         mutex_lock(&uuid_mutex);
328         list_for_each(cur, head) {
329                 device = list_entry(cur, struct btrfs_device, dev_list);
330                 if (device->bdev) {
331                         close_bdev_excl(device->bdev);
332                         fs_devices->open_devices--;
333                 }
334                 device->bdev = NULL;
335                 device->in_fs_metadata = 0;
336         }
337         fs_devices->mounted = 0;
338         mutex_unlock(&uuid_mutex);
339         return 0;
340 }
341
342 int btrfs_open_devices(struct btrfs_fs_devices *fs_devices,
343                        int flags, void *holder)
344 {
345         struct block_device *bdev;
346         struct list_head *head = &fs_devices->devices;
347         struct list_head *cur;
348         struct btrfs_device *device;
349         struct block_device *latest_bdev = NULL;
350         struct buffer_head *bh;
351         struct btrfs_super_block *disk_super;
352         u64 latest_devid = 0;
353         u64 latest_transid = 0;
354         u64 transid;
355         u64 devid;
356         int ret = 0;
357
358         mutex_lock(&uuid_mutex);
359         if (fs_devices->mounted)
360                 goto out;
361
362         list_for_each(cur, head) {
363                 device = list_entry(cur, struct btrfs_device, dev_list);
364                 if (device->bdev)
365                         continue;
366
367                 if (!device->name)
368                         continue;
369
370                 bdev = open_bdev_excl(device->name, flags, holder);
371
372                 if (IS_ERR(bdev)) {
373                         printk("open %s failed\n", device->name);
374                         goto error;
375                 }
376                 set_blocksize(bdev, 4096);
377
378                 bh = __bread(bdev, BTRFS_SUPER_INFO_OFFSET / 4096, 4096);
379                 if (!bh)
380                         goto error_close;
381
382                 disk_super = (struct btrfs_super_block *)bh->b_data;
383                 if (strncmp((char *)(&disk_super->magic), BTRFS_MAGIC,
384                     sizeof(disk_super->magic)))
385                         goto error_brelse;
386
387                 devid = le64_to_cpu(disk_super->dev_item.devid);
388                 if (devid != device->devid)
389                         goto error_brelse;
390
391                 transid = btrfs_super_generation(disk_super);
392                 if (!latest_transid || transid > latest_transid) {
393                         latest_devid = devid;
394                         latest_transid = transid;
395                         latest_bdev = bdev;
396                 }
397
398                 device->bdev = bdev;
399                 device->in_fs_metadata = 0;
400                 fs_devices->open_devices++;
401                 continue;
402
403 error_brelse:
404                 brelse(bh);
405 error_close:
406                 close_bdev_excl(bdev);
407 error:
408                 continue;
409         }
410         if (fs_devices->open_devices == 0) {
411                 ret = -EIO;
412                 goto out;
413         }
414         fs_devices->mounted = 1;
415         fs_devices->latest_bdev = latest_bdev;
416         fs_devices->latest_devid = latest_devid;
417         fs_devices->latest_trans = latest_transid;
418 out:
419         mutex_unlock(&uuid_mutex);
420         return ret;
421 }
422
423 int btrfs_scan_one_device(const char *path, int flags, void *holder,
424                           struct btrfs_fs_devices **fs_devices_ret)
425 {
426         struct btrfs_super_block *disk_super;
427         struct block_device *bdev;
428         struct buffer_head *bh;
429         int ret;
430         u64 devid;
431         u64 transid;
432
433         mutex_lock(&uuid_mutex);
434
435         bdev = open_bdev_excl(path, flags, holder);
436
437         if (IS_ERR(bdev)) {
438                 ret = PTR_ERR(bdev);
439                 goto error;
440         }
441
442         ret = set_blocksize(bdev, 4096);
443         if (ret)
444                 goto error_close;
445         bh = __bread(bdev, BTRFS_SUPER_INFO_OFFSET / 4096, 4096);
446         if (!bh) {
447                 ret = -EIO;
448                 goto error_close;
449         }
450         disk_super = (struct btrfs_super_block *)bh->b_data;
451         if (strncmp((char *)(&disk_super->magic), BTRFS_MAGIC,
452             sizeof(disk_super->magic))) {
453                 ret = -EINVAL;
454                 goto error_brelse;
455         }
456         devid = le64_to_cpu(disk_super->dev_item.devid);
457         transid = btrfs_super_generation(disk_super);
458         if (disk_super->label[0])
459                 printk("device label %s ", disk_super->label);
460         else {
461                 /* FIXME, make a readl uuid parser */
462                 printk("device fsid %llx-%llx ",
463                        *(unsigned long long *)disk_super->fsid,
464                        *(unsigned long long *)(disk_super->fsid + 8));
465         }
466         printk("devid %Lu transid %Lu %s\n", devid, transid, path);
467         ret = device_list_add(path, disk_super, devid, fs_devices_ret);
468
469 error_brelse:
470         brelse(bh);
471 error_close:
472         close_bdev_excl(bdev);
473 error:
474         mutex_unlock(&uuid_mutex);
475         return ret;
476 }
477
478 /*
479  * this uses a pretty simple search, the expectation is that it is
480  * called very infrequently and that a given device has a small number
481  * of extents
482  */
483 static int find_free_dev_extent(struct btrfs_trans_handle *trans,
484                                 struct btrfs_device *device,
485                                 struct btrfs_path *path,
486                                 u64 num_bytes, u64 *start)
487 {
488         struct btrfs_key key;
489         struct btrfs_root *root = device->dev_root;
490         struct btrfs_dev_extent *dev_extent = NULL;
491         u64 hole_size = 0;
492         u64 last_byte = 0;
493         u64 search_start = 0;
494         u64 search_end = device->total_bytes;
495         int ret;
496         int slot = 0;
497         int start_found;
498         struct extent_buffer *l;
499
500         start_found = 0;
501         path->reada = 2;
502
503         /* FIXME use last free of some kind */
504
505         /* we don't want to overwrite the superblock on the drive,
506          * so we make sure to start at an offset of at least 1MB
507          */
508         search_start = max((u64)1024 * 1024, search_start);
509
510         if (root->fs_info->alloc_start + num_bytes <= device->total_bytes)
511                 search_start = max(root->fs_info->alloc_start, search_start);
512
513         key.objectid = device->devid;
514         key.offset = search_start;
515         key.type = BTRFS_DEV_EXTENT_KEY;
516         ret = btrfs_search_slot(trans, root, &key, path, 0, 0);
517         if (ret < 0)
518                 goto error;
519         ret = btrfs_previous_item(root, path, 0, key.type);
520         if (ret < 0)
521                 goto error;
522         l = path->nodes[0];
523         btrfs_item_key_to_cpu(l, &key, path->slots[0]);
524         while (1) {
525                 l = path->nodes[0];
526                 slot = path->slots[0];
527                 if (slot >= btrfs_header_nritems(l)) {
528                         ret = btrfs_next_leaf(root, path);
529                         if (ret == 0)
530                                 continue;
531                         if (ret < 0)
532                                 goto error;
533 no_more_items:
534                         if (!start_found) {
535                                 if (search_start >= search_end) {
536                                         ret = -ENOSPC;
537                                         goto error;
538                                 }
539                                 *start = search_start;
540                                 start_found = 1;
541                                 goto check_pending;
542                         }
543                         *start = last_byte > search_start ?
544                                 last_byte : search_start;
545                         if (search_end <= *start) {
546                                 ret = -ENOSPC;
547                                 goto error;
548                         }
549                         goto check_pending;
550                 }
551                 btrfs_item_key_to_cpu(l, &key, slot);
552
553                 if (key.objectid < device->devid)
554                         goto next;
555
556                 if (key.objectid > device->devid)
557                         goto no_more_items;
558
559                 if (key.offset >= search_start && key.offset > last_byte &&
560                     start_found) {
561                         if (last_byte < search_start)
562                                 last_byte = search_start;
563                         hole_size = key.offset - last_byte;
564                         if (key.offset > last_byte &&
565                             hole_size >= num_bytes) {
566                                 *start = last_byte;
567                                 goto check_pending;
568                         }
569                 }
570                 if (btrfs_key_type(&key) != BTRFS_DEV_EXTENT_KEY) {
571                         goto next;
572                 }
573
574                 start_found = 1;
575                 dev_extent = btrfs_item_ptr(l, slot, struct btrfs_dev_extent);
576                 last_byte = key.offset + btrfs_dev_extent_length(l, dev_extent);
577 next:
578                 path->slots[0]++;
579                 cond_resched();
580         }
581 check_pending:
582         /* we have to make sure we didn't find an extent that has already
583          * been allocated by the map tree or the original allocation
584          */
585         btrfs_release_path(root, path);
586         BUG_ON(*start < search_start);
587
588         if (*start + num_bytes > search_end) {
589                 ret = -ENOSPC;
590                 goto error;
591         }
592         /* check for pending inserts here */
593         return 0;
594
595 error:
596         btrfs_release_path(root, path);
597         return ret;
598 }
599
600 int btrfs_free_dev_extent(struct btrfs_trans_handle *trans,
601                           struct btrfs_device *device,
602                           u64 start)
603 {
604         int ret;
605         struct btrfs_path *path;
606         struct btrfs_root *root = device->dev_root;
607         struct btrfs_key key;
608         struct btrfs_key found_key;
609         struct extent_buffer *leaf = NULL;
610         struct btrfs_dev_extent *extent = NULL;
611
612         path = btrfs_alloc_path();
613         if (!path)
614                 return -ENOMEM;
615
616         key.objectid = device->devid;
617         key.offset = start;
618         key.type = BTRFS_DEV_EXTENT_KEY;
619
620         ret = btrfs_search_slot(trans, root, &key, path, -1, 1);
621         if (ret > 0) {
622                 ret = btrfs_previous_item(root, path, key.objectid,
623                                           BTRFS_DEV_EXTENT_KEY);
624                 BUG_ON(ret);
625                 leaf = path->nodes[0];
626                 btrfs_item_key_to_cpu(leaf, &found_key, path->slots[0]);
627                 extent = btrfs_item_ptr(leaf, path->slots[0],
628                                         struct btrfs_dev_extent);
629                 BUG_ON(found_key.offset > start || found_key.offset +
630                        btrfs_dev_extent_length(leaf, extent) < start);
631                 ret = 0;
632         } else if (ret == 0) {
633                 leaf = path->nodes[0];
634                 extent = btrfs_item_ptr(leaf, path->slots[0],
635                                         struct btrfs_dev_extent);
636         }
637         BUG_ON(ret);
638
639         if (device->bytes_used > 0)
640                 device->bytes_used -= btrfs_dev_extent_length(leaf, extent);
641         ret = btrfs_del_item(trans, root, path);
642         BUG_ON(ret);
643
644         btrfs_free_path(path);
645         return ret;
646 }
647
648 int btrfs_alloc_dev_extent(struct btrfs_trans_handle *trans,
649                            struct btrfs_device *device,
650                            u64 chunk_tree, u64 chunk_objectid,
651                            u64 chunk_offset,
652                            u64 num_bytes, u64 *start)
653 {
654         int ret;
655         struct btrfs_path *path;
656         struct btrfs_root *root = device->dev_root;
657         struct btrfs_dev_extent *extent;
658         struct extent_buffer *leaf;
659         struct btrfs_key key;
660
661         WARN_ON(!device->in_fs_metadata);
662         path = btrfs_alloc_path();
663         if (!path)
664                 return -ENOMEM;
665
666         ret = find_free_dev_extent(trans, device, path, num_bytes, start);
667         if (ret) {
668                 goto err;
669         }
670
671         key.objectid = device->devid;
672         key.offset = *start;
673         key.type = BTRFS_DEV_EXTENT_KEY;
674         ret = btrfs_insert_empty_item(trans, root, path, &key,
675                                       sizeof(*extent));
676         BUG_ON(ret);
677
678         leaf = path->nodes[0];
679         extent = btrfs_item_ptr(leaf, path->slots[0],
680                                 struct btrfs_dev_extent);
681         btrfs_set_dev_extent_chunk_tree(leaf, extent, chunk_tree);
682         btrfs_set_dev_extent_chunk_objectid(leaf, extent, chunk_objectid);
683         btrfs_set_dev_extent_chunk_offset(leaf, extent, chunk_offset);
684
685         write_extent_buffer(leaf, root->fs_info->chunk_tree_uuid,
686                     (unsigned long)btrfs_dev_extent_chunk_tree_uuid(extent),
687                     BTRFS_UUID_SIZE);
688
689         btrfs_set_dev_extent_length(leaf, extent, num_bytes);
690         btrfs_mark_buffer_dirty(leaf);
691 err:
692         btrfs_free_path(path);
693         return ret;
694 }
695
696 static int find_next_chunk(struct btrfs_root *root, u64 objectid, u64 *offset)
697 {
698         struct btrfs_path *path;
699         int ret;
700         struct btrfs_key key;
701         struct btrfs_chunk *chunk;
702         struct btrfs_key found_key;
703
704         path = btrfs_alloc_path();
705         BUG_ON(!path);
706
707         key.objectid = objectid;
708         key.offset = (u64)-1;
709         key.type = BTRFS_CHUNK_ITEM_KEY;
710
711         ret = btrfs_search_slot(NULL, root, &key, path, 0, 0);
712         if (ret < 0)
713                 goto error;
714
715         BUG_ON(ret == 0);
716
717         ret = btrfs_previous_item(root, path, 0, BTRFS_CHUNK_ITEM_KEY);
718         if (ret) {
719                 *offset = 0;
720         } else {
721                 btrfs_item_key_to_cpu(path->nodes[0], &found_key,
722                                       path->slots[0]);
723                 if (found_key.objectid != objectid)
724                         *offset = 0;
725                 else {
726                         chunk = btrfs_item_ptr(path->nodes[0], path->slots[0],
727                                                struct btrfs_chunk);
728                         *offset = found_key.offset +
729                                 btrfs_chunk_length(path->nodes[0], chunk);
730                 }
731         }
732         ret = 0;
733 error:
734         btrfs_free_path(path);
735         return ret;
736 }
737
738 static int find_next_devid(struct btrfs_root *root, struct btrfs_path *path,
739                            u64 *objectid)
740 {
741         int ret;
742         struct btrfs_key key;
743         struct btrfs_key found_key;
744
745         key.objectid = BTRFS_DEV_ITEMS_OBJECTID;
746         key.type = BTRFS_DEV_ITEM_KEY;
747         key.offset = (u64)-1;
748
749         ret = btrfs_search_slot(NULL, root, &key, path, 0, 0);
750         if (ret < 0)
751                 goto error;
752
753         BUG_ON(ret == 0);
754
755         ret = btrfs_previous_item(root, path, BTRFS_DEV_ITEMS_OBJECTID,
756                                   BTRFS_DEV_ITEM_KEY);
757         if (ret) {
758                 *objectid = 1;
759         } else {
760                 btrfs_item_key_to_cpu(path->nodes[0], &found_key,
761                                       path->slots[0]);
762                 *objectid = found_key.offset + 1;
763         }
764         ret = 0;
765 error:
766         btrfs_release_path(root, path);
767         return ret;
768 }
769
770 /*
771  * the device information is stored in the chunk root
772  * the btrfs_device struct should be fully filled in
773  */
774 int btrfs_add_device(struct btrfs_trans_handle *trans,
775                      struct btrfs_root *root,
776                      struct btrfs_device *device)
777 {
778         int ret;
779         struct btrfs_path *path;
780         struct btrfs_dev_item *dev_item;
781         struct extent_buffer *leaf;
782         struct btrfs_key key;
783         unsigned long ptr;
784         u64 free_devid = 0;
785
786         root = root->fs_info->chunk_root;
787
788         path = btrfs_alloc_path();
789         if (!path)
790                 return -ENOMEM;
791
792         ret = find_next_devid(root, path, &free_devid);
793         if (ret)
794                 goto out;
795
796         key.objectid = BTRFS_DEV_ITEMS_OBJECTID;
797         key.type = BTRFS_DEV_ITEM_KEY;
798         key.offset = free_devid;
799
800         ret = btrfs_insert_empty_item(trans, root, path, &key,
801                                       sizeof(*dev_item));
802         if (ret)
803                 goto out;
804
805         leaf = path->nodes[0];
806         dev_item = btrfs_item_ptr(leaf, path->slots[0], struct btrfs_dev_item);
807
808         device->devid = free_devid;
809         btrfs_set_device_id(leaf, dev_item, device->devid);
810         btrfs_set_device_type(leaf, dev_item, device->type);
811         btrfs_set_device_io_align(leaf, dev_item, device->io_align);
812         btrfs_set_device_io_width(leaf, dev_item, device->io_width);
813         btrfs_set_device_sector_size(leaf, dev_item, device->sector_size);
814         btrfs_set_device_total_bytes(leaf, dev_item, device->total_bytes);
815         btrfs_set_device_bytes_used(leaf, dev_item, device->bytes_used);
816         btrfs_set_device_group(leaf, dev_item, 0);
817         btrfs_set_device_seek_speed(leaf, dev_item, 0);
818         btrfs_set_device_bandwidth(leaf, dev_item, 0);
819
820         ptr = (unsigned long)btrfs_device_uuid(dev_item);
821         write_extent_buffer(leaf, device->uuid, ptr, BTRFS_UUID_SIZE);
822         btrfs_mark_buffer_dirty(leaf);
823         ret = 0;
824
825 out:
826         btrfs_free_path(path);
827         return ret;
828 }
829
830 static int btrfs_rm_dev_item(struct btrfs_root *root,
831                              struct btrfs_device *device)
832 {
833         int ret;
834         struct btrfs_path *path;
835         struct block_device *bdev = device->bdev;
836         struct btrfs_device *next_dev;
837         struct btrfs_key key;
838         u64 total_bytes;
839         struct btrfs_fs_devices *fs_devices;
840         struct btrfs_trans_handle *trans;
841
842         root = root->fs_info->chunk_root;
843
844         path = btrfs_alloc_path();
845         if (!path)
846                 return -ENOMEM;
847
848         trans = btrfs_start_transaction(root, 1);
849         key.objectid = BTRFS_DEV_ITEMS_OBJECTID;
850         key.type = BTRFS_DEV_ITEM_KEY;
851         key.offset = device->devid;
852         lock_chunks(root);
853
854         ret = btrfs_search_slot(trans, root, &key, path, -1, 1);
855         if (ret < 0)
856                 goto out;
857
858         if (ret > 0) {
859                 ret = -ENOENT;
860                 goto out;
861         }
862
863         ret = btrfs_del_item(trans, root, path);
864         if (ret)
865                 goto out;
866
867         /*
868          * at this point, the device is zero sized.  We want to
869          * remove it from the devices list and zero out the old super
870          */
871         list_del_init(&device->dev_list);
872         list_del_init(&device->dev_alloc_list);
873         fs_devices = root->fs_info->fs_devices;
874
875         next_dev = list_entry(fs_devices->devices.next, struct btrfs_device,
876                               dev_list);
877         if (bdev == root->fs_info->sb->s_bdev)
878                 root->fs_info->sb->s_bdev = next_dev->bdev;
879         if (bdev == fs_devices->latest_bdev)
880                 fs_devices->latest_bdev = next_dev->bdev;
881
882         total_bytes = btrfs_super_num_devices(&root->fs_info->super_copy);
883         btrfs_set_super_num_devices(&root->fs_info->super_copy,
884                                     total_bytes - 1);
885 out:
886         btrfs_free_path(path);
887         unlock_chunks(root);
888         btrfs_commit_transaction(trans, root);
889         return ret;
890 }
891
892 int btrfs_rm_device(struct btrfs_root *root, char *device_path)
893 {
894         struct btrfs_device *device;
895         struct block_device *bdev;
896         struct buffer_head *bh = NULL;
897         struct btrfs_super_block *disk_super;
898         u64 all_avail;
899         u64 devid;
900         int ret = 0;
901
902         mutex_lock(&uuid_mutex);
903         mutex_lock(&root->fs_info->volume_mutex);
904
905         all_avail = root->fs_info->avail_data_alloc_bits |
906                 root->fs_info->avail_system_alloc_bits |
907                 root->fs_info->avail_metadata_alloc_bits;
908
909         if ((all_avail & BTRFS_BLOCK_GROUP_RAID10) &&
910             btrfs_super_num_devices(&root->fs_info->super_copy) <= 4) {
911                 printk("btrfs: unable to go below four devices on raid10\n");
912                 ret = -EINVAL;
913                 goto out;
914         }
915
916         if ((all_avail & BTRFS_BLOCK_GROUP_RAID1) &&
917             btrfs_super_num_devices(&root->fs_info->super_copy) <= 2) {
918                 printk("btrfs: unable to go below two devices on raid1\n");
919                 ret = -EINVAL;
920                 goto out;
921         }
922
923         if (strcmp(device_path, "missing") == 0) {
924                 struct list_head *cur;
925                 struct list_head *devices;
926                 struct btrfs_device *tmp;
927
928                 device = NULL;
929                 devices = &root->fs_info->fs_devices->devices;
930                 list_for_each(cur, devices) {
931                         tmp = list_entry(cur, struct btrfs_device, dev_list);
932                         if (tmp->in_fs_metadata && !tmp->bdev) {
933                                 device = tmp;
934                                 break;
935                         }
936                 }
937                 bdev = NULL;
938                 bh = NULL;
939                 disk_super = NULL;
940                 if (!device) {
941                         printk("btrfs: no missing devices found to remove\n");
942                         goto out;
943                 }
944
945         } else {
946                 bdev = open_bdev_excl(device_path, 0,
947                                       root->fs_info->bdev_holder);
948                 if (IS_ERR(bdev)) {
949                         ret = PTR_ERR(bdev);
950                         goto out;
951                 }
952
953                 bh = __bread(bdev, BTRFS_SUPER_INFO_OFFSET / 4096, 4096);
954                 if (!bh) {
955                         ret = -EIO;
956                         goto error_close;
957                 }
958                 disk_super = (struct btrfs_super_block *)bh->b_data;
959                 if (strncmp((char *)(&disk_super->magic), BTRFS_MAGIC,
960                     sizeof(disk_super->magic))) {
961                         ret = -ENOENT;
962                         goto error_brelse;
963                 }
964                 if (memcmp(disk_super->fsid, root->fs_info->fsid,
965                            BTRFS_FSID_SIZE)) {
966                         ret = -ENOENT;
967                         goto error_brelse;
968                 }
969                 devid = le64_to_cpu(disk_super->dev_item.devid);
970                 device = btrfs_find_device(root, devid, NULL);
971                 if (!device) {
972                         ret = -ENOENT;
973                         goto error_brelse;
974                 }
975
976         }
977         root->fs_info->fs_devices->num_devices--;
978         root->fs_info->fs_devices->open_devices--;
979
980         ret = btrfs_shrink_device(device, 0);
981         if (ret)
982                 goto error_brelse;
983
984
985         ret = btrfs_rm_dev_item(root->fs_info->chunk_root, device);
986         if (ret)
987                 goto error_brelse;
988
989         if (bh) {
990                 /* make sure this device isn't detected as part of
991                  * the FS anymore
992                  */
993                 memset(&disk_super->magic, 0, sizeof(disk_super->magic));
994                 set_buffer_dirty(bh);
995                 sync_dirty_buffer(bh);
996
997                 brelse(bh);
998         }
999
1000         if (device->bdev) {
1001                 /* one close for the device struct or super_block */
1002                 close_bdev_excl(device->bdev);
1003         }
1004         if (bdev) {
1005                 /* one close for us */
1006                 close_bdev_excl(bdev);
1007         }
1008         kfree(device->name);
1009         kfree(device);
1010         ret = 0;
1011         goto out;
1012
1013 error_brelse:
1014         brelse(bh);
1015 error_close:
1016         if (bdev)
1017                 close_bdev_excl(bdev);
1018 out:
1019         mutex_unlock(&root->fs_info->volume_mutex);
1020         mutex_unlock(&uuid_mutex);
1021         return ret;
1022 }
1023
1024 int btrfs_init_new_device(struct btrfs_root *root, char *device_path)
1025 {
1026         struct btrfs_trans_handle *trans;
1027         struct btrfs_device *device;
1028         struct block_device *bdev;
1029         struct list_head *cur;
1030         struct list_head *devices;
1031         u64 total_bytes;
1032         int ret = 0;
1033
1034
1035         bdev = open_bdev_excl(device_path, 0, root->fs_info->bdev_holder);
1036         if (!bdev) {
1037                 return -EIO;
1038         }
1039
1040         mutex_lock(&root->fs_info->volume_mutex);
1041
1042         trans = btrfs_start_transaction(root, 1);
1043         lock_chunks(root);
1044         devices = &root->fs_info->fs_devices->devices;
1045         list_for_each(cur, devices) {
1046                 device = list_entry(cur, struct btrfs_device, dev_list);
1047                 if (device->bdev == bdev) {
1048                         ret = -EEXIST;
1049                         goto out;
1050                 }
1051         }
1052
1053         device = kzalloc(sizeof(*device), GFP_NOFS);
1054         if (!device) {
1055                 /* we can safely leave the fs_devices entry around */
1056                 ret = -ENOMEM;
1057                 goto out_close_bdev;
1058         }
1059
1060         device->barriers = 1;
1061         device->work.func = pending_bios_fn;
1062         generate_random_uuid(device->uuid);
1063         spin_lock_init(&device->io_lock);
1064         device->name = kstrdup(device_path, GFP_NOFS);
1065         if (!device->name) {
1066                 kfree(device);
1067                 goto out_close_bdev;
1068         }
1069         device->io_width = root->sectorsize;
1070         device->io_align = root->sectorsize;
1071         device->sector_size = root->sectorsize;
1072         device->total_bytes = i_size_read(bdev->bd_inode);
1073         device->dev_root = root->fs_info->dev_root;
1074         device->bdev = bdev;
1075         device->in_fs_metadata = 1;
1076
1077         ret = btrfs_add_device(trans, root, device);
1078         if (ret)
1079                 goto out_close_bdev;
1080
1081         total_bytes = btrfs_super_total_bytes(&root->fs_info->super_copy);
1082         btrfs_set_super_total_bytes(&root->fs_info->super_copy,
1083                                     total_bytes + device->total_bytes);
1084
1085         total_bytes = btrfs_super_num_devices(&root->fs_info->super_copy);
1086         btrfs_set_super_num_devices(&root->fs_info->super_copy,
1087                                     total_bytes + 1);
1088
1089         list_add(&device->dev_list, &root->fs_info->fs_devices->devices);
1090         list_add(&device->dev_alloc_list,
1091                  &root->fs_info->fs_devices->alloc_list);
1092         root->fs_info->fs_devices->num_devices++;
1093         root->fs_info->fs_devices->open_devices++;
1094 out:
1095         unlock_chunks(root);
1096         btrfs_end_transaction(trans, root);
1097         mutex_unlock(&root->fs_info->volume_mutex);
1098
1099         return ret;
1100
1101 out_close_bdev:
1102         close_bdev_excl(bdev);
1103         goto out;
1104 }
1105
1106 int btrfs_update_device(struct btrfs_trans_handle *trans,
1107                         struct btrfs_device *device)
1108 {
1109         int ret;
1110         struct btrfs_path *path;
1111         struct btrfs_root *root;
1112         struct btrfs_dev_item *dev_item;
1113         struct extent_buffer *leaf;
1114         struct btrfs_key key;
1115
1116         root = device->dev_root->fs_info->chunk_root;
1117
1118         path = btrfs_alloc_path();
1119         if (!path)
1120                 return -ENOMEM;
1121
1122         key.objectid = BTRFS_DEV_ITEMS_OBJECTID;
1123         key.type = BTRFS_DEV_ITEM_KEY;
1124         key.offset = device->devid;
1125
1126         ret = btrfs_search_slot(trans, root, &key, path, 0, 1);
1127         if (ret < 0)
1128                 goto out;
1129
1130         if (ret > 0) {
1131                 ret = -ENOENT;
1132                 goto out;
1133         }
1134
1135         leaf = path->nodes[0];
1136         dev_item = btrfs_item_ptr(leaf, path->slots[0], struct btrfs_dev_item);
1137
1138         btrfs_set_device_id(leaf, dev_item, device->devid);
1139         btrfs_set_device_type(leaf, dev_item, device->type);
1140         btrfs_set_device_io_align(leaf, dev_item, device->io_align);
1141         btrfs_set_device_io_width(leaf, dev_item, device->io_width);
1142         btrfs_set_device_sector_size(leaf, dev_item, device->sector_size);
1143         btrfs_set_device_total_bytes(leaf, dev_item, device->total_bytes);
1144         btrfs_set_device_bytes_used(leaf, dev_item, device->bytes_used);
1145         btrfs_mark_buffer_dirty(leaf);
1146
1147 out:
1148         btrfs_free_path(path);
1149         return ret;
1150 }
1151
1152 static int __btrfs_grow_device(struct btrfs_trans_handle *trans,
1153                       struct btrfs_device *device, u64 new_size)
1154 {
1155         struct btrfs_super_block *super_copy =
1156                 &device->dev_root->fs_info->super_copy;
1157         u64 old_total = btrfs_super_total_bytes(super_copy);
1158         u64 diff = new_size - device->total_bytes;
1159
1160         btrfs_set_super_total_bytes(super_copy, old_total + diff);
1161         return btrfs_update_device(trans, device);
1162 }
1163
1164 int btrfs_grow_device(struct btrfs_trans_handle *trans,
1165                       struct btrfs_device *device, u64 new_size)
1166 {
1167         int ret;
1168         lock_chunks(device->dev_root);
1169         ret = __btrfs_grow_device(trans, device, new_size);
1170         unlock_chunks(device->dev_root);
1171         return ret;
1172 }
1173
1174 static int btrfs_free_chunk(struct btrfs_trans_handle *trans,
1175                             struct btrfs_root *root,
1176                             u64 chunk_tree, u64 chunk_objectid,
1177                             u64 chunk_offset)
1178 {
1179         int ret;
1180         struct btrfs_path *path;
1181         struct btrfs_key key;
1182
1183         root = root->fs_info->chunk_root;
1184         path = btrfs_alloc_path();
1185         if (!path)
1186                 return -ENOMEM;
1187
1188         key.objectid = chunk_objectid;
1189         key.offset = chunk_offset;
1190         key.type = BTRFS_CHUNK_ITEM_KEY;
1191
1192         ret = btrfs_search_slot(trans, root, &key, path, -1, 1);
1193         BUG_ON(ret);
1194
1195         ret = btrfs_del_item(trans, root, path);
1196         BUG_ON(ret);
1197
1198         btrfs_free_path(path);
1199         return 0;
1200 }
1201
1202 int btrfs_del_sys_chunk(struct btrfs_root *root, u64 chunk_objectid, u64
1203                         chunk_offset)
1204 {
1205         struct btrfs_super_block *super_copy = &root->fs_info->super_copy;
1206         struct btrfs_disk_key *disk_key;
1207         struct btrfs_chunk *chunk;
1208         u8 *ptr;
1209         int ret = 0;
1210         u32 num_stripes;
1211         u32 array_size;
1212         u32 len = 0;
1213         u32 cur;
1214         struct btrfs_key key;
1215
1216         array_size = btrfs_super_sys_array_size(super_copy);
1217
1218         ptr = super_copy->sys_chunk_array;
1219         cur = 0;
1220
1221         while (cur < array_size) {
1222                 disk_key = (struct btrfs_disk_key *)ptr;
1223                 btrfs_disk_key_to_cpu(&key, disk_key);
1224
1225                 len = sizeof(*disk_key);
1226
1227                 if (key.type == BTRFS_CHUNK_ITEM_KEY) {
1228                         chunk = (struct btrfs_chunk *)(ptr + len);
1229                         num_stripes = btrfs_stack_chunk_num_stripes(chunk);
1230                         len += btrfs_chunk_item_size(num_stripes);
1231                 } else {
1232                         ret = -EIO;
1233                         break;
1234                 }
1235                 if (key.objectid == chunk_objectid &&
1236                     key.offset == chunk_offset) {
1237                         memmove(ptr, ptr + len, array_size - (cur + len));
1238                         array_size -= len;
1239                         btrfs_set_super_sys_array_size(super_copy, array_size);
1240                 } else {
1241                         ptr += len;
1242                         cur += len;
1243                 }
1244         }
1245         return ret;
1246 }
1247
1248
1249 int btrfs_relocate_chunk(struct btrfs_root *root,
1250                          u64 chunk_tree, u64 chunk_objectid,
1251                          u64 chunk_offset)
1252 {
1253         struct extent_map_tree *em_tree;
1254         struct btrfs_root *extent_root;
1255         struct btrfs_trans_handle *trans;
1256         struct extent_map *em;
1257         struct map_lookup *map;
1258         int ret;
1259         int i;
1260
1261         printk("btrfs relocating chunk %llu\n",
1262                (unsigned long long)chunk_offset);
1263         root = root->fs_info->chunk_root;
1264         extent_root = root->fs_info->extent_root;
1265         em_tree = &root->fs_info->mapping_tree.map_tree;
1266
1267         /* step one, relocate all the extents inside this chunk */
1268         ret = btrfs_shrink_extent_tree(extent_root, chunk_offset);
1269         BUG_ON(ret);
1270
1271         trans = btrfs_start_transaction(root, 1);
1272         BUG_ON(!trans);
1273
1274         lock_chunks(root);
1275
1276         /*
1277          * step two, delete the device extents and the
1278          * chunk tree entries
1279          */
1280         spin_lock(&em_tree->lock);
1281         em = lookup_extent_mapping(em_tree, chunk_offset, 1);
1282         spin_unlock(&em_tree->lock);
1283
1284         BUG_ON(em->start > chunk_offset ||
1285                em->start + em->len < chunk_offset);
1286         map = (struct map_lookup *)em->bdev;
1287
1288         for (i = 0; i < map->num_stripes; i++) {
1289                 ret = btrfs_free_dev_extent(trans, map->stripes[i].dev,
1290                                             map->stripes[i].physical);
1291                 BUG_ON(ret);
1292
1293                 if (map->stripes[i].dev) {
1294                         ret = btrfs_update_device(trans, map->stripes[i].dev);
1295                         BUG_ON(ret);
1296                 }
1297         }
1298         ret = btrfs_free_chunk(trans, root, chunk_tree, chunk_objectid,
1299                                chunk_offset);
1300
1301         BUG_ON(ret);
1302
1303         if (map->type & BTRFS_BLOCK_GROUP_SYSTEM) {
1304                 ret = btrfs_del_sys_chunk(root, chunk_objectid, chunk_offset);
1305                 BUG_ON(ret);
1306         }
1307
1308         spin_lock(&em_tree->lock);
1309         remove_extent_mapping(em_tree, em);
1310         kfree(map);
1311         em->bdev = NULL;
1312
1313         /* once for the tree */
1314         free_extent_map(em);
1315         spin_unlock(&em_tree->lock);
1316
1317         /* once for us */
1318         free_extent_map(em);
1319
1320         unlock_chunks(root);
1321         btrfs_end_transaction(trans, root);
1322         return 0;
1323 }
1324
1325 static u64 div_factor(u64 num, int factor)
1326 {
1327         if (factor == 10)
1328                 return num;
1329         num *= factor;
1330         do_div(num, 10);
1331         return num;
1332 }
1333
1334
1335 int btrfs_balance(struct btrfs_root *dev_root)
1336 {
1337         int ret;
1338         struct list_head *cur;
1339         struct list_head *devices = &dev_root->fs_info->fs_devices->devices;
1340         struct btrfs_device *device;
1341         u64 old_size;
1342         u64 size_to_free;
1343         struct btrfs_path *path;
1344         struct btrfs_key key;
1345         struct btrfs_chunk *chunk;
1346         struct btrfs_root *chunk_root = dev_root->fs_info->chunk_root;
1347         struct btrfs_trans_handle *trans;
1348         struct btrfs_key found_key;
1349
1350
1351         mutex_lock(&dev_root->fs_info->volume_mutex);
1352         dev_root = dev_root->fs_info->dev_root;
1353
1354         /* step one make some room on all the devices */
1355         list_for_each(cur, devices) {
1356                 device = list_entry(cur, struct btrfs_device, dev_list);
1357                 old_size = device->total_bytes;
1358                 size_to_free = div_factor(old_size, 1);
1359                 size_to_free = min(size_to_free, (u64)1 * 1024 * 1024);
1360                 if (device->total_bytes - device->bytes_used > size_to_free)
1361                         continue;
1362
1363                 ret = btrfs_shrink_device(device, old_size - size_to_free);
1364                 BUG_ON(ret);
1365
1366                 trans = btrfs_start_transaction(dev_root, 1);
1367                 BUG_ON(!trans);
1368
1369                 ret = btrfs_grow_device(trans, device, old_size);
1370                 BUG_ON(ret);
1371
1372                 btrfs_end_transaction(trans, dev_root);
1373         }
1374
1375         /* step two, relocate all the chunks */
1376         path = btrfs_alloc_path();
1377         BUG_ON(!path);
1378
1379         key.objectid = BTRFS_FIRST_CHUNK_TREE_OBJECTID;
1380         key.offset = (u64)-1;
1381         key.type = BTRFS_CHUNK_ITEM_KEY;
1382
1383         while(1) {
1384                 ret = btrfs_search_slot(NULL, chunk_root, &key, path, 0, 0);
1385                 if (ret < 0)
1386                         goto error;
1387
1388                 /*
1389                  * this shouldn't happen, it means the last relocate
1390                  * failed
1391                  */
1392                 if (ret == 0)
1393                         break;
1394
1395                 ret = btrfs_previous_item(chunk_root, path, 0,
1396                                           BTRFS_CHUNK_ITEM_KEY);
1397                 if (ret)
1398                         break;
1399
1400                 btrfs_item_key_to_cpu(path->nodes[0], &found_key,
1401                                       path->slots[0]);
1402                 if (found_key.objectid != key.objectid)
1403                         break;
1404
1405                 chunk = btrfs_item_ptr(path->nodes[0],
1406                                        path->slots[0],
1407                                        struct btrfs_chunk);
1408                 key.offset = found_key.offset;
1409                 /* chunk zero is special */
1410                 if (key.offset == 0)
1411                         break;
1412
1413                 btrfs_release_path(chunk_root, path);
1414                 ret = btrfs_relocate_chunk(chunk_root,
1415                                            chunk_root->root_key.objectid,
1416                                            found_key.objectid,
1417                                            found_key.offset);
1418                 BUG_ON(ret);
1419         }
1420         ret = 0;
1421 error:
1422         btrfs_free_path(path);
1423         mutex_unlock(&dev_root->fs_info->volume_mutex);
1424         return ret;
1425 }
1426
1427 /*
1428  * shrinking a device means finding all of the device extents past
1429  * the new size, and then following the back refs to the chunks.
1430  * The chunk relocation code actually frees the device extent
1431  */
1432 int btrfs_shrink_device(struct btrfs_device *device, u64 new_size)
1433 {
1434         struct btrfs_trans_handle *trans;
1435         struct btrfs_root *root = device->dev_root;
1436         struct btrfs_dev_extent *dev_extent = NULL;
1437         struct btrfs_path *path;
1438         u64 length;
1439         u64 chunk_tree;
1440         u64 chunk_objectid;
1441         u64 chunk_offset;
1442         int ret;
1443         int slot;
1444         struct extent_buffer *l;
1445         struct btrfs_key key;
1446         struct btrfs_super_block *super_copy = &root->fs_info->super_copy;
1447         u64 old_total = btrfs_super_total_bytes(super_copy);
1448         u64 diff = device->total_bytes - new_size;
1449
1450
1451         path = btrfs_alloc_path();
1452         if (!path)
1453                 return -ENOMEM;
1454
1455         trans = btrfs_start_transaction(root, 1);
1456         if (!trans) {
1457                 ret = -ENOMEM;
1458                 goto done;
1459         }
1460
1461         path->reada = 2;
1462
1463         lock_chunks(root);
1464
1465         device->total_bytes = new_size;
1466         ret = btrfs_update_device(trans, device);
1467         if (ret) {
1468                 unlock_chunks(root);
1469                 btrfs_end_transaction(trans, root);
1470                 goto done;
1471         }
1472         WARN_ON(diff > old_total);
1473         btrfs_set_super_total_bytes(super_copy, old_total - diff);
1474         unlock_chunks(root);
1475         btrfs_end_transaction(trans, root);
1476
1477         key.objectid = device->devid;
1478         key.offset = (u64)-1;
1479         key.type = BTRFS_DEV_EXTENT_KEY;
1480
1481         while (1) {
1482                 ret = btrfs_search_slot(NULL, root, &key, path, 0, 0);
1483                 if (ret < 0)
1484                         goto done;
1485
1486                 ret = btrfs_previous_item(root, path, 0, key.type);
1487                 if (ret < 0)
1488                         goto done;
1489                 if (ret) {
1490                         ret = 0;
1491                         goto done;
1492                 }
1493
1494                 l = path->nodes[0];
1495                 slot = path->slots[0];
1496                 btrfs_item_key_to_cpu(l, &key, path->slots[0]);
1497
1498                 if (key.objectid != device->devid)
1499                         goto done;
1500
1501                 dev_extent = btrfs_item_ptr(l, slot, struct btrfs_dev_extent);
1502                 length = btrfs_dev_extent_length(l, dev_extent);
1503
1504                 if (key.offset + length <= new_size)
1505                         goto done;
1506
1507                 chunk_tree = btrfs_dev_extent_chunk_tree(l, dev_extent);
1508                 chunk_objectid = btrfs_dev_extent_chunk_objectid(l, dev_extent);
1509                 chunk_offset = btrfs_dev_extent_chunk_offset(l, dev_extent);
1510                 btrfs_release_path(root, path);
1511
1512                 ret = btrfs_relocate_chunk(root, chunk_tree, chunk_objectid,
1513                                            chunk_offset);
1514                 if (ret)
1515                         goto done;
1516         }
1517
1518 done:
1519         btrfs_free_path(path);
1520         return ret;
1521 }
1522
1523 int btrfs_add_system_chunk(struct btrfs_trans_handle *trans,
1524                            struct btrfs_root *root,
1525                            struct btrfs_key *key,
1526                            struct btrfs_chunk *chunk, int item_size)
1527 {
1528         struct btrfs_super_block *super_copy = &root->fs_info->super_copy;
1529         struct btrfs_disk_key disk_key;
1530         u32 array_size;
1531         u8 *ptr;
1532
1533         array_size = btrfs_super_sys_array_size(super_copy);
1534         if (array_size + item_size > BTRFS_SYSTEM_CHUNK_ARRAY_SIZE)
1535                 return -EFBIG;
1536
1537         ptr = super_copy->sys_chunk_array + array_size;
1538         btrfs_cpu_key_to_disk(&disk_key, key);
1539         memcpy(ptr, &disk_key, sizeof(disk_key));
1540         ptr += sizeof(disk_key);
1541         memcpy(ptr, chunk, item_size);
1542         item_size += sizeof(disk_key);
1543         btrfs_set_super_sys_array_size(super_copy, array_size + item_size);
1544         return 0;
1545 }
1546
1547 static u64 chunk_bytes_by_type(u64 type, u64 calc_size, int num_stripes,
1548                                int sub_stripes)
1549 {
1550         if (type & (BTRFS_BLOCK_GROUP_RAID1 | BTRFS_BLOCK_GROUP_DUP))
1551                 return calc_size;
1552         else if (type & BTRFS_BLOCK_GROUP_RAID10)
1553                 return calc_size * (num_stripes / sub_stripes);
1554         else
1555                 return calc_size * num_stripes;
1556 }
1557
1558
1559 int btrfs_alloc_chunk(struct btrfs_trans_handle *trans,
1560                       struct btrfs_root *extent_root, u64 *start,
1561                       u64 *num_bytes, u64 type)
1562 {
1563         u64 dev_offset;
1564         struct btrfs_fs_info *info = extent_root->fs_info;
1565         struct btrfs_root *chunk_root = extent_root->fs_info->chunk_root;
1566         struct btrfs_path *path;
1567         struct btrfs_stripe *stripes;
1568         struct btrfs_device *device = NULL;
1569         struct btrfs_chunk *chunk;
1570         struct list_head private_devs;
1571         struct list_head *dev_list;
1572         struct list_head *cur;
1573         struct extent_map_tree *em_tree;
1574         struct map_lookup *map;
1575         struct extent_map *em;
1576         int min_stripe_size = 1 * 1024 * 1024;
1577         u64 physical;
1578         u64 calc_size = 1024 * 1024 * 1024;
1579         u64 max_chunk_size = calc_size;
1580         u64 min_free;
1581         u64 avail;
1582         u64 max_avail = 0;
1583         u64 percent_max;
1584         int num_stripes = 1;
1585         int min_stripes = 1;
1586         int sub_stripes = 0;
1587         int looped = 0;
1588         int ret;
1589         int index;
1590         int stripe_len = 64 * 1024;
1591         struct btrfs_key key;
1592
1593         if ((type & BTRFS_BLOCK_GROUP_RAID1) &&
1594             (type & BTRFS_BLOCK_GROUP_DUP)) {
1595                 WARN_ON(1);
1596                 type &= ~BTRFS_BLOCK_GROUP_DUP;
1597         }
1598         dev_list = &extent_root->fs_info->fs_devices->alloc_list;
1599         if (list_empty(dev_list))
1600                 return -ENOSPC;
1601
1602         if (type & (BTRFS_BLOCK_GROUP_RAID0)) {
1603                 num_stripes = extent_root->fs_info->fs_devices->open_devices;
1604                 min_stripes = 2;
1605         }
1606         if (type & (BTRFS_BLOCK_GROUP_DUP)) {
1607                 num_stripes = 2;
1608                 min_stripes = 2;
1609         }
1610         if (type & (BTRFS_BLOCK_GROUP_RAID1)) {
1611                 num_stripes = min_t(u64, 2,
1612                             extent_root->fs_info->fs_devices->open_devices);
1613                 if (num_stripes < 2)
1614                         return -ENOSPC;
1615                 min_stripes = 2;
1616         }
1617         if (type & (BTRFS_BLOCK_GROUP_RAID10)) {
1618                 num_stripes = extent_root->fs_info->fs_devices->open_devices;
1619                 if (num_stripes < 4)
1620                         return -ENOSPC;
1621                 num_stripes &= ~(u32)1;
1622                 sub_stripes = 2;
1623                 min_stripes = 4;
1624         }
1625
1626         if (type & BTRFS_BLOCK_GROUP_DATA) {
1627                 max_chunk_size = 10 * calc_size;
1628                 min_stripe_size = 64 * 1024 * 1024;
1629         } else if (type & BTRFS_BLOCK_GROUP_METADATA) {
1630                 max_chunk_size = 4 * calc_size;
1631                 min_stripe_size = 32 * 1024 * 1024;
1632         } else if (type & BTRFS_BLOCK_GROUP_SYSTEM) {
1633                 calc_size = 8 * 1024 * 1024;
1634                 max_chunk_size = calc_size * 2;
1635                 min_stripe_size = 1 * 1024 * 1024;
1636         }
1637
1638         path = btrfs_alloc_path();
1639         if (!path)
1640                 return -ENOMEM;
1641
1642         /* we don't want a chunk larger than 10% of the FS */
1643         percent_max = div_factor(btrfs_super_total_bytes(&info->super_copy), 1);
1644         max_chunk_size = min(percent_max, max_chunk_size);
1645
1646 again:
1647         if (calc_size * num_stripes > max_chunk_size) {
1648                 calc_size = max_chunk_size;
1649                 do_div(calc_size, num_stripes);
1650                 do_div(calc_size, stripe_len);
1651                 calc_size *= stripe_len;
1652         }
1653         /* we don't want tiny stripes */
1654         calc_size = max_t(u64, min_stripe_size, calc_size);
1655
1656         do_div(calc_size, stripe_len);
1657         calc_size *= stripe_len;
1658
1659         INIT_LIST_HEAD(&private_devs);
1660         cur = dev_list->next;
1661         index = 0;
1662
1663         if (type & BTRFS_BLOCK_GROUP_DUP)
1664                 min_free = calc_size * 2;
1665         else
1666                 min_free = calc_size;
1667
1668         /* we add 1MB because we never use the first 1MB of the device */
1669         min_free += 1024 * 1024;
1670
1671         /* build a private list of devices we will allocate from */
1672         while(index < num_stripes) {
1673                 device = list_entry(cur, struct btrfs_device, dev_alloc_list);
1674
1675                 if (device->total_bytes > device->bytes_used)
1676                         avail = device->total_bytes - device->bytes_used;
1677                 else
1678                         avail = 0;
1679                 cur = cur->next;
1680
1681                 if (device->in_fs_metadata && avail >= min_free) {
1682                         u64 ignored_start = 0;
1683                         ret = find_free_dev_extent(trans, device, path,
1684                                                    min_free,
1685                                                    &ignored_start);
1686                         if (ret == 0) {
1687                                 list_move_tail(&device->dev_alloc_list,
1688                                                &private_devs);
1689                                 index++;
1690                                 if (type & BTRFS_BLOCK_GROUP_DUP)
1691                                         index++;
1692                         }
1693                 } else if (device->in_fs_metadata && avail > max_avail)
1694                         max_avail = avail;
1695                 if (cur == dev_list)
1696                         break;
1697         }
1698         if (index < num_stripes) {
1699                 list_splice(&private_devs, dev_list);
1700                 if (index >= min_stripes) {
1701                         num_stripes = index;
1702                         if (type & (BTRFS_BLOCK_GROUP_RAID10)) {
1703                                 num_stripes /= sub_stripes;
1704                                 num_stripes *= sub_stripes;
1705                         }
1706                         looped = 1;
1707                         goto again;
1708                 }
1709                 if (!looped && max_avail > 0) {
1710                         looped = 1;
1711                         calc_size = max_avail;
1712                         goto again;
1713                 }
1714                 btrfs_free_path(path);
1715                 return -ENOSPC;
1716         }
1717         key.objectid = BTRFS_FIRST_CHUNK_TREE_OBJECTID;
1718         key.type = BTRFS_CHUNK_ITEM_KEY;
1719         ret = find_next_chunk(chunk_root, BTRFS_FIRST_CHUNK_TREE_OBJECTID,
1720                               &key.offset);
1721         if (ret) {
1722                 btrfs_free_path(path);
1723                 return ret;
1724         }
1725
1726         chunk = kmalloc(btrfs_chunk_item_size(num_stripes), GFP_NOFS);
1727         if (!chunk) {
1728                 btrfs_free_path(path);
1729                 return -ENOMEM;
1730         }
1731
1732         map = kmalloc(map_lookup_size(num_stripes), GFP_NOFS);
1733         if (!map) {
1734                 kfree(chunk);
1735                 btrfs_free_path(path);
1736                 return -ENOMEM;
1737         }
1738         btrfs_free_path(path);
1739         path = NULL;
1740
1741         stripes = &chunk->stripe;
1742         *num_bytes = chunk_bytes_by_type(type, calc_size,
1743                                          num_stripes, sub_stripes);
1744
1745         index = 0;
1746         while(index < num_stripes) {
1747                 struct btrfs_stripe *stripe;
1748                 BUG_ON(list_empty(&private_devs));
1749                 cur = private_devs.next;
1750                 device = list_entry(cur, struct btrfs_device, dev_alloc_list);
1751
1752                 /* loop over this device again if we're doing a dup group */
1753                 if (!(type & BTRFS_BLOCK_GROUP_DUP) ||
1754                     (index == num_stripes - 1))
1755                         list_move_tail(&device->dev_alloc_list, dev_list);
1756
1757                 ret = btrfs_alloc_dev_extent(trans, device,
1758                              info->chunk_root->root_key.objectid,
1759                              BTRFS_FIRST_CHUNK_TREE_OBJECTID, key.offset,
1760                              calc_size, &dev_offset);
1761                 BUG_ON(ret);
1762                 device->bytes_used += calc_size;
1763                 ret = btrfs_update_device(trans, device);
1764                 BUG_ON(ret);
1765
1766                 map->stripes[index].dev = device;
1767                 map->stripes[index].physical = dev_offset;
1768                 stripe = stripes + index;
1769                 btrfs_set_stack_stripe_devid(stripe, device->devid);
1770                 btrfs_set_stack_stripe_offset(stripe, dev_offset);
1771                 memcpy(stripe->dev_uuid, device->uuid, BTRFS_UUID_SIZE);
1772                 physical = dev_offset;
1773                 index++;
1774         }
1775         BUG_ON(!list_empty(&private_devs));
1776
1777         /* key was set above */
1778         btrfs_set_stack_chunk_length(chunk, *num_bytes);
1779         btrfs_set_stack_chunk_owner(chunk, extent_root->root_key.objectid);
1780         btrfs_set_stack_chunk_stripe_len(chunk, stripe_len);
1781         btrfs_set_stack_chunk_type(chunk, type);
1782         btrfs_set_stack_chunk_num_stripes(chunk, num_stripes);
1783         btrfs_set_stack_chunk_io_align(chunk, stripe_len);
1784         btrfs_set_stack_chunk_io_width(chunk, stripe_len);
1785         btrfs_set_stack_chunk_sector_size(chunk, extent_root->sectorsize);
1786         btrfs_set_stack_chunk_sub_stripes(chunk, sub_stripes);
1787         map->sector_size = extent_root->sectorsize;
1788         map->stripe_len = stripe_len;
1789         map->io_align = stripe_len;
1790         map->io_width = stripe_len;
1791         map->type = type;
1792         map->num_stripes = num_stripes;
1793         map->sub_stripes = sub_stripes;
1794
1795         ret = btrfs_insert_item(trans, chunk_root, &key, chunk,
1796                                 btrfs_chunk_item_size(num_stripes));
1797         BUG_ON(ret);
1798         *start = key.offset;;
1799
1800         em = alloc_extent_map(GFP_NOFS);
1801         if (!em)
1802                 return -ENOMEM;
1803         em->bdev = (struct block_device *)map;
1804         em->start = key.offset;
1805         em->len = *num_bytes;
1806         em->block_start = 0;
1807
1808         if (type & BTRFS_BLOCK_GROUP_SYSTEM) {
1809                 ret = btrfs_add_system_chunk(trans, chunk_root, &key,
1810                                     chunk, btrfs_chunk_item_size(num_stripes));
1811                 BUG_ON(ret);
1812         }
1813         kfree(chunk);
1814
1815         em_tree = &extent_root->fs_info->mapping_tree.map_tree;
1816         spin_lock(&em_tree->lock);
1817         ret = add_extent_mapping(em_tree, em);
1818         spin_unlock(&em_tree->lock);
1819         BUG_ON(ret);
1820         free_extent_map(em);
1821         return ret;
1822 }
1823
1824 void btrfs_mapping_init(struct btrfs_mapping_tree *tree)
1825 {
1826         extent_map_tree_init(&tree->map_tree, GFP_NOFS);
1827 }
1828
1829 void btrfs_mapping_tree_free(struct btrfs_mapping_tree *tree)
1830 {
1831         struct extent_map *em;
1832
1833         while(1) {
1834                 spin_lock(&tree->map_tree.lock);
1835                 em = lookup_extent_mapping(&tree->map_tree, 0, (u64)-1);
1836                 if (em)
1837                         remove_extent_mapping(&tree->map_tree, em);
1838                 spin_unlock(&tree->map_tree.lock);
1839                 if (!em)
1840                         break;
1841                 kfree(em->bdev);
1842                 /* once for us */
1843                 free_extent_map(em);
1844                 /* once for the tree */
1845                 free_extent_map(em);
1846         }
1847 }
1848
1849 int btrfs_num_copies(struct btrfs_mapping_tree *map_tree, u64 logical, u64 len)
1850 {
1851         struct extent_map *em;
1852         struct map_lookup *map;
1853         struct extent_map_tree *em_tree = &map_tree->map_tree;
1854         int ret;
1855
1856         spin_lock(&em_tree->lock);
1857         em = lookup_extent_mapping(em_tree, logical, len);
1858         spin_unlock(&em_tree->lock);
1859         BUG_ON(!em);
1860
1861         BUG_ON(em->start > logical || em->start + em->len < logical);
1862         map = (struct map_lookup *)em->bdev;
1863         if (map->type & (BTRFS_BLOCK_GROUP_DUP | BTRFS_BLOCK_GROUP_RAID1))
1864                 ret = map->num_stripes;
1865         else if (map->type & BTRFS_BLOCK_GROUP_RAID10)
1866                 ret = map->sub_stripes;
1867         else
1868                 ret = 1;
1869         free_extent_map(em);
1870         return ret;
1871 }
1872
1873 static int find_live_mirror(struct map_lookup *map, int first, int num,
1874                             int optimal)
1875 {
1876         int i;
1877         if (map->stripes[optimal].dev->bdev)
1878                 return optimal;
1879         for (i = first; i < first + num; i++) {
1880                 if (map->stripes[i].dev->bdev)
1881                         return i;
1882         }
1883         /* we couldn't find one that doesn't fail.  Just return something
1884          * and the io error handling code will clean up eventually
1885          */
1886         return optimal;
1887 }
1888
1889 static int __btrfs_map_block(struct btrfs_mapping_tree *map_tree, int rw,
1890                              u64 logical, u64 *length,
1891                              struct btrfs_multi_bio **multi_ret,
1892                              int mirror_num, struct page *unplug_page)
1893 {
1894         struct extent_map *em;
1895         struct map_lookup *map;
1896         struct extent_map_tree *em_tree = &map_tree->map_tree;
1897         u64 offset;
1898         u64 stripe_offset;
1899         u64 stripe_nr;
1900         int stripes_allocated = 8;
1901         int stripes_required = 1;
1902         int stripe_index;
1903         int i;
1904         int num_stripes;
1905         int max_errors = 0;
1906         struct btrfs_multi_bio *multi = NULL;
1907
1908         if (multi_ret && !(rw & (1 << BIO_RW))) {
1909                 stripes_allocated = 1;
1910         }
1911 again:
1912         if (multi_ret) {
1913                 multi = kzalloc(btrfs_multi_bio_size(stripes_allocated),
1914                                 GFP_NOFS);
1915                 if (!multi)
1916                         return -ENOMEM;
1917
1918                 atomic_set(&multi->error, 0);
1919         }
1920
1921         spin_lock(&em_tree->lock);
1922         em = lookup_extent_mapping(em_tree, logical, *length);
1923         spin_unlock(&em_tree->lock);
1924
1925         if (!em && unplug_page)
1926                 return 0;
1927
1928         if (!em) {
1929                 printk("unable to find logical %Lu len %Lu\n", logical, *length);
1930                 BUG();
1931         }
1932
1933         BUG_ON(em->start > logical || em->start + em->len < logical);
1934         map = (struct map_lookup *)em->bdev;
1935         offset = logical - em->start;
1936
1937         if (mirror_num > map->num_stripes)
1938                 mirror_num = 0;
1939
1940         /* if our multi bio struct is too small, back off and try again */
1941         if (rw & (1 << BIO_RW)) {
1942                 if (map->type & (BTRFS_BLOCK_GROUP_RAID1 |
1943                                  BTRFS_BLOCK_GROUP_DUP)) {
1944                         stripes_required = map->num_stripes;
1945                         max_errors = 1;
1946                 } else if (map->type & BTRFS_BLOCK_GROUP_RAID10) {
1947                         stripes_required = map->sub_stripes;
1948                         max_errors = 1;
1949                 }
1950         }
1951         if (multi_ret && rw == WRITE &&
1952             stripes_allocated < stripes_required) {
1953                 stripes_allocated = map->num_stripes;
1954                 free_extent_map(em);
1955                 kfree(multi);
1956                 goto again;
1957         }
1958         stripe_nr = offset;
1959         /*
1960          * stripe_nr counts the total number of stripes we have to stride
1961          * to get to this block
1962          */
1963         do_div(stripe_nr, map->stripe_len);
1964
1965         stripe_offset = stripe_nr * map->stripe_len;
1966         BUG_ON(offset < stripe_offset);
1967
1968         /* stripe_offset is the offset of this block in its stripe*/
1969         stripe_offset = offset - stripe_offset;
1970
1971         if (map->type & (BTRFS_BLOCK_GROUP_RAID0 | BTRFS_BLOCK_GROUP_RAID1 |
1972                          BTRFS_BLOCK_GROUP_RAID10 |
1973                          BTRFS_BLOCK_GROUP_DUP)) {
1974                 /* we limit the length of each bio to what fits in a stripe */
1975                 *length = min_t(u64, em->len - offset,
1976                               map->stripe_len - stripe_offset);
1977         } else {
1978                 *length = em->len - offset;
1979         }
1980
1981         if (!multi_ret && !unplug_page)
1982                 goto out;
1983
1984         num_stripes = 1;
1985         stripe_index = 0;
1986         if (map->type & BTRFS_BLOCK_GROUP_RAID1) {
1987                 if (unplug_page || (rw & (1 << BIO_RW)))
1988                         num_stripes = map->num_stripes;
1989                 else if (mirror_num)
1990                         stripe_index = mirror_num - 1;
1991                 else {
1992                         stripe_index = find_live_mirror(map, 0,
1993                                             map->num_stripes,
1994                                             current->pid % map->num_stripes);
1995                 }
1996
1997         } else if (map->type & BTRFS_BLOCK_GROUP_DUP) {
1998                 if (rw & (1 << BIO_RW))
1999                         num_stripes = map->num_stripes;
2000                 else if (mirror_num)
2001                         stripe_index = mirror_num - 1;
2002
2003         } else if (map->type & BTRFS_BLOCK_GROUP_RAID10) {
2004                 int factor = map->num_stripes / map->sub_stripes;
2005
2006                 stripe_index = do_div(stripe_nr, factor);
2007                 stripe_index *= map->sub_stripes;
2008
2009                 if (unplug_page || (rw & (1 << BIO_RW)))
2010                         num_stripes = map->sub_stripes;
2011                 else if (mirror_num)
2012                         stripe_index += mirror_num - 1;
2013                 else {
2014                         stripe_index = find_live_mirror(map, stripe_index,
2015                                               map->sub_stripes, stripe_index +
2016                                               current->pid % map->sub_stripes);
2017                 }
2018         } else {
2019                 /*
2020                  * after this do_div call, stripe_nr is the number of stripes
2021                  * on this device we have to walk to find the data, and
2022                  * stripe_index is the number of our device in the stripe array
2023                  */
2024                 stripe_index = do_div(stripe_nr, map->num_stripes);
2025         }
2026         BUG_ON(stripe_index >= map->num_stripes);
2027
2028         for (i = 0; i < num_stripes; i++) {
2029                 if (unplug_page) {
2030                         struct btrfs_device *device;
2031                         struct backing_dev_info *bdi;
2032
2033                         device = map->stripes[stripe_index].dev;
2034                         if (device->bdev) {
2035                                 bdi = blk_get_backing_dev_info(device->bdev);
2036                                 if (bdi->unplug_io_fn) {
2037                                         bdi->unplug_io_fn(bdi, unplug_page);
2038                                 }
2039                         }
2040                 } else {
2041                         multi->stripes[i].physical =
2042                                 map->stripes[stripe_index].physical +
2043                                 stripe_offset + stripe_nr * map->stripe_len;
2044                         multi->stripes[i].dev = map->stripes[stripe_index].dev;
2045                 }
2046                 stripe_index++;
2047         }
2048         if (multi_ret) {
2049                 *multi_ret = multi;
2050                 multi->num_stripes = num_stripes;
2051                 multi->max_errors = max_errors;
2052         }
2053 out:
2054         free_extent_map(em);
2055         return 0;
2056 }
2057
2058 int btrfs_map_block(struct btrfs_mapping_tree *map_tree, int rw,
2059                       u64 logical, u64 *length,
2060                       struct btrfs_multi_bio **multi_ret, int mirror_num)
2061 {
2062         return __btrfs_map_block(map_tree, rw, logical, length, multi_ret,
2063                                  mirror_num, NULL);
2064 }
2065
2066 int btrfs_unplug_page(struct btrfs_mapping_tree *map_tree,
2067                       u64 logical, struct page *page)
2068 {
2069         u64 length = PAGE_CACHE_SIZE;
2070         return __btrfs_map_block(map_tree, READ, logical, &length,
2071                                  NULL, 0, page);
2072 }
2073
2074
2075 #if LINUX_VERSION_CODE > KERNEL_VERSION(2,6,23)
2076 static void end_bio_multi_stripe(struct bio *bio, int err)
2077 #else
2078 static int end_bio_multi_stripe(struct bio *bio,
2079                                    unsigned int bytes_done, int err)
2080 #endif
2081 {
2082         struct btrfs_multi_bio *multi = bio->bi_private;
2083         int is_orig_bio = 0;
2084
2085 #if LINUX_VERSION_CODE <= KERNEL_VERSION(2,6,23)
2086         if (bio->bi_size)
2087                 return 1;
2088 #endif
2089         if (err)
2090                 atomic_inc(&multi->error);
2091
2092         if (bio == multi->orig_bio)
2093                 is_orig_bio = 1;
2094
2095         if (atomic_dec_and_test(&multi->stripes_pending)) {
2096                 if (!is_orig_bio) {
2097                         bio_put(bio);
2098                         bio = multi->orig_bio;
2099                 }
2100                 bio->bi_private = multi->private;
2101                 bio->bi_end_io = multi->end_io;
2102                 /* only send an error to the higher layers if it is
2103                  * beyond the tolerance of the multi-bio
2104                  */
2105                 if (atomic_read(&multi->error) > multi->max_errors) {
2106                         err = -EIO;
2107                 } else if (err) {
2108                         /*
2109                          * this bio is actually up to date, we didn't
2110                          * go over the max number of errors
2111                          */
2112                         set_bit(BIO_UPTODATE, &bio->bi_flags);
2113                         err = 0;
2114                 }
2115                 kfree(multi);
2116
2117 #if LINUX_VERSION_CODE <= KERNEL_VERSION(2,6,23)
2118                 bio_endio(bio, bio->bi_size, err);
2119 #else
2120                 bio_endio(bio, err);
2121 #endif
2122         } else if (!is_orig_bio) {
2123                 bio_put(bio);
2124         }
2125 #if LINUX_VERSION_CODE <= KERNEL_VERSION(2,6,23)
2126         return 0;
2127 #endif
2128 }
2129
2130 struct async_sched {
2131         struct bio *bio;
2132         int rw;
2133         struct btrfs_fs_info *info;
2134         struct btrfs_work work;
2135 };
2136
2137 /*
2138  * see run_scheduled_bios for a description of why bios are collected for
2139  * async submit.
2140  *
2141  * This will add one bio to the pending list for a device and make sure
2142  * the work struct is scheduled.
2143  */
2144 int schedule_bio(struct btrfs_root *root, struct btrfs_device *device,
2145                  int rw, struct bio *bio)
2146 {
2147         int should_queue = 1;
2148
2149         /* don't bother with additional async steps for reads, right now */
2150         if (!(rw & (1 << BIO_RW))) {
2151                 bio_get(bio);
2152                 submit_bio(rw, bio);
2153                 bio_put(bio);
2154                 return 0;
2155         }
2156
2157         /*
2158          * nr_async_bios allows us to reliably return congestion to the
2159          * higher layers.  Otherwise, the async bio makes it appear we have
2160          * made progress against dirty pages when we've really just put it
2161          * on a queue for later
2162          */
2163         atomic_inc(&root->fs_info->nr_async_bios);
2164         WARN_ON(bio->bi_next);
2165         bio->bi_next = NULL;
2166         bio->bi_rw |= rw;
2167
2168         spin_lock(&device->io_lock);
2169
2170         if (device->pending_bio_tail)
2171                 device->pending_bio_tail->bi_next = bio;
2172
2173         device->pending_bio_tail = bio;
2174         if (!device->pending_bios)
2175                 device->pending_bios = bio;
2176         if (device->running_pending)
2177                 should_queue = 0;
2178
2179         spin_unlock(&device->io_lock);
2180
2181         if (should_queue)
2182                 btrfs_queue_worker(&root->fs_info->submit_workers,
2183                                    &device->work);
2184         return 0;
2185 }
2186
2187 int btrfs_map_bio(struct btrfs_root *root, int rw, struct bio *bio,
2188                   int mirror_num, int async_submit)
2189 {
2190         struct btrfs_mapping_tree *map_tree;
2191         struct btrfs_device *dev;
2192         struct bio *first_bio = bio;
2193         u64 logical = bio->bi_sector << 9;
2194         u64 length = 0;
2195         u64 map_length;
2196         struct btrfs_multi_bio *multi = NULL;
2197         int ret;
2198         int dev_nr = 0;
2199         int total_devs = 1;
2200
2201         length = bio->bi_size;
2202         map_tree = &root->fs_info->mapping_tree;
2203         map_length = length;
2204
2205         ret = btrfs_map_block(map_tree, rw, logical, &map_length, &multi,
2206                               mirror_num);
2207         BUG_ON(ret);
2208
2209         total_devs = multi->num_stripes;
2210         if (map_length < length) {
2211                 printk("mapping failed logical %Lu bio len %Lu "
2212                        "len %Lu\n", logical, length, map_length);
2213                 BUG();
2214         }
2215         multi->end_io = first_bio->bi_end_io;
2216         multi->private = first_bio->bi_private;
2217         multi->orig_bio = first_bio;
2218         atomic_set(&multi->stripes_pending, multi->num_stripes);
2219
2220         while(dev_nr < total_devs) {
2221                 if (total_devs > 1) {
2222                         if (dev_nr < total_devs - 1) {
2223                                 bio = bio_clone(first_bio, GFP_NOFS);
2224                                 BUG_ON(!bio);
2225                         } else {
2226                                 bio = first_bio;
2227                         }
2228                         bio->bi_private = multi;
2229                         bio->bi_end_io = end_bio_multi_stripe;
2230                 }
2231                 bio->bi_sector = multi->stripes[dev_nr].physical >> 9;
2232                 dev = multi->stripes[dev_nr].dev;
2233                 if (dev && dev->bdev) {
2234                         bio->bi_bdev = dev->bdev;
2235                         if (async_submit)
2236                                 schedule_bio(root, dev, rw, bio);
2237                         else
2238                                 submit_bio(rw, bio);
2239                 } else {
2240                         bio->bi_bdev = root->fs_info->fs_devices->latest_bdev;
2241                         bio->bi_sector = logical >> 9;
2242 #if LINUX_VERSION_CODE <= KERNEL_VERSION(2,6,23)
2243                         bio_endio(bio, bio->bi_size, -EIO);
2244 #else
2245                         bio_endio(bio, -EIO);
2246 #endif
2247                 }
2248                 dev_nr++;
2249         }
2250         if (total_devs == 1)
2251                 kfree(multi);
2252         return 0;
2253 }
2254
2255 struct btrfs_device *btrfs_find_device(struct btrfs_root *root, u64 devid,
2256                                        u8 *uuid)
2257 {
2258         struct list_head *head = &root->fs_info->fs_devices->devices;
2259
2260         return __find_device(head, devid, uuid);
2261 }
2262
2263 static struct btrfs_device *add_missing_dev(struct btrfs_root *root,
2264                                             u64 devid, u8 *dev_uuid)
2265 {
2266         struct btrfs_device *device;
2267         struct btrfs_fs_devices *fs_devices = root->fs_info->fs_devices;
2268
2269         device = kzalloc(sizeof(*device), GFP_NOFS);
2270         list_add(&device->dev_list,
2271                  &fs_devices->devices);
2272         list_add(&device->dev_alloc_list,
2273                  &fs_devices->alloc_list);
2274         device->barriers = 1;
2275         device->dev_root = root->fs_info->dev_root;
2276         device->devid = devid;
2277         device->work.func = pending_bios_fn;
2278         fs_devices->num_devices++;
2279         spin_lock_init(&device->io_lock);
2280         memcpy(device->uuid, dev_uuid, BTRFS_UUID_SIZE);
2281         return device;
2282 }
2283
2284
2285 static int read_one_chunk(struct btrfs_root *root, struct btrfs_key *key,
2286                           struct extent_buffer *leaf,
2287                           struct btrfs_chunk *chunk)
2288 {
2289         struct btrfs_mapping_tree *map_tree = &root->fs_info->mapping_tree;
2290         struct map_lookup *map;
2291         struct extent_map *em;
2292         u64 logical;
2293         u64 length;
2294         u64 devid;
2295         u8 uuid[BTRFS_UUID_SIZE];
2296         int num_stripes;
2297         int ret;
2298         int i;
2299
2300         logical = key->offset;
2301         length = btrfs_chunk_length(leaf, chunk);
2302
2303         spin_lock(&map_tree->map_tree.lock);
2304         em = lookup_extent_mapping(&map_tree->map_tree, logical, 1);
2305         spin_unlock(&map_tree->map_tree.lock);
2306
2307         /* already mapped? */
2308         if (em && em->start <= logical && em->start + em->len > logical) {
2309                 free_extent_map(em);
2310                 return 0;
2311         } else if (em) {
2312                 free_extent_map(em);
2313         }
2314
2315         map = kzalloc(sizeof(*map), GFP_NOFS);
2316         if (!map)
2317                 return -ENOMEM;
2318
2319         em = alloc_extent_map(GFP_NOFS);
2320         if (!em)
2321                 return -ENOMEM;
2322         num_stripes = btrfs_chunk_num_stripes(leaf, chunk);
2323         map = kmalloc(map_lookup_size(num_stripes), GFP_NOFS);
2324         if (!map) {
2325                 free_extent_map(em);
2326                 return -ENOMEM;
2327         }
2328
2329         em->bdev = (struct block_device *)map;
2330         em->start = logical;
2331         em->len = length;
2332         em->block_start = 0;
2333
2334         map->num_stripes = num_stripes;
2335         map->io_width = btrfs_chunk_io_width(leaf, chunk);
2336         map->io_align = btrfs_chunk_io_align(leaf, chunk);
2337         map->sector_size = btrfs_chunk_sector_size(leaf, chunk);
2338         map->stripe_len = btrfs_chunk_stripe_len(leaf, chunk);
2339         map->type = btrfs_chunk_type(leaf, chunk);
2340         map->sub_stripes = btrfs_chunk_sub_stripes(leaf, chunk);
2341         for (i = 0; i < num_stripes; i++) {
2342                 map->stripes[i].physical =
2343                         btrfs_stripe_offset_nr(leaf, chunk, i);
2344                 devid = btrfs_stripe_devid_nr(leaf, chunk, i);
2345                 read_extent_buffer(leaf, uuid, (unsigned long)
2346                                    btrfs_stripe_dev_uuid_nr(chunk, i),
2347                                    BTRFS_UUID_SIZE);
2348                 map->stripes[i].dev = btrfs_find_device(root, devid, uuid);
2349
2350                 if (!map->stripes[i].dev && !btrfs_test_opt(root, DEGRADED)) {
2351                         kfree(map);
2352                         free_extent_map(em);
2353                         return -EIO;
2354                 }
2355                 if (!map->stripes[i].dev) {
2356                         map->stripes[i].dev =
2357                                 add_missing_dev(root, devid, uuid);
2358                         if (!map->stripes[i].dev) {
2359                                 kfree(map);
2360                                 free_extent_map(em);
2361                                 return -EIO;
2362                         }
2363                 }
2364                 map->stripes[i].dev->in_fs_metadata = 1;
2365         }
2366
2367         spin_lock(&map_tree->map_tree.lock);
2368         ret = add_extent_mapping(&map_tree->map_tree, em);
2369         spin_unlock(&map_tree->map_tree.lock);
2370         BUG_ON(ret);
2371         free_extent_map(em);
2372
2373         return 0;
2374 }
2375
2376 static int fill_device_from_item(struct extent_buffer *leaf,
2377                                  struct btrfs_dev_item *dev_item,
2378                                  struct btrfs_device *device)
2379 {
2380         unsigned long ptr;
2381
2382         device->devid = btrfs_device_id(leaf, dev_item);
2383         device->total_bytes = btrfs_device_total_bytes(leaf, dev_item);
2384         device->bytes_used = btrfs_device_bytes_used(leaf, dev_item);
2385         device->type = btrfs_device_type(leaf, dev_item);
2386         device->io_align = btrfs_device_io_align(leaf, dev_item);
2387         device->io_width = btrfs_device_io_width(leaf, dev_item);
2388         device->sector_size = btrfs_device_sector_size(leaf, dev_item);
2389
2390         ptr = (unsigned long)btrfs_device_uuid(dev_item);
2391         read_extent_buffer(leaf, device->uuid, ptr, BTRFS_UUID_SIZE);
2392
2393         return 0;
2394 }
2395
2396 static int read_one_dev(struct btrfs_root *root,
2397                         struct extent_buffer *leaf,
2398                         struct btrfs_dev_item *dev_item)
2399 {
2400         struct btrfs_device *device;
2401         u64 devid;
2402         int ret;
2403         u8 dev_uuid[BTRFS_UUID_SIZE];
2404
2405         devid = btrfs_device_id(leaf, dev_item);
2406         read_extent_buffer(leaf, dev_uuid,
2407                            (unsigned long)btrfs_device_uuid(dev_item),
2408                            BTRFS_UUID_SIZE);
2409         device = btrfs_find_device(root, devid, dev_uuid);
2410         if (!device) {
2411                 printk("warning devid %Lu missing\n", devid);
2412                 device = add_missing_dev(root, devid, dev_uuid);
2413                 if (!device)
2414                         return -ENOMEM;
2415         }
2416
2417         fill_device_from_item(leaf, dev_item, device);
2418         device->dev_root = root->fs_info->dev_root;
2419         device->in_fs_metadata = 1;
2420         ret = 0;
2421 #if 0
2422         ret = btrfs_open_device(device);
2423         if (ret) {
2424                 kfree(device);
2425         }
2426 #endif
2427         return ret;
2428 }
2429
2430 int btrfs_read_super_device(struct btrfs_root *root, struct extent_buffer *buf)
2431 {
2432         struct btrfs_dev_item *dev_item;
2433
2434         dev_item = (struct btrfs_dev_item *)offsetof(struct btrfs_super_block,
2435                                                      dev_item);
2436         return read_one_dev(root, buf, dev_item);
2437 }
2438
2439 int btrfs_read_sys_array(struct btrfs_root *root)
2440 {
2441         struct btrfs_super_block *super_copy = &root->fs_info->super_copy;
2442         struct extent_buffer *sb;
2443         struct btrfs_disk_key *disk_key;
2444         struct btrfs_chunk *chunk;
2445         u8 *ptr;
2446         unsigned long sb_ptr;
2447         int ret = 0;
2448         u32 num_stripes;
2449         u32 array_size;
2450         u32 len = 0;
2451         u32 cur;
2452         struct btrfs_key key;
2453
2454         sb = btrfs_find_create_tree_block(root, BTRFS_SUPER_INFO_OFFSET,
2455                                           BTRFS_SUPER_INFO_SIZE);
2456         if (!sb)
2457                 return -ENOMEM;
2458         btrfs_set_buffer_uptodate(sb);
2459         write_extent_buffer(sb, super_copy, 0, BTRFS_SUPER_INFO_SIZE);
2460         array_size = btrfs_super_sys_array_size(super_copy);
2461
2462         ptr = super_copy->sys_chunk_array;
2463         sb_ptr = offsetof(struct btrfs_super_block, sys_chunk_array);
2464         cur = 0;
2465
2466         while (cur < array_size) {
2467                 disk_key = (struct btrfs_disk_key *)ptr;
2468                 btrfs_disk_key_to_cpu(&key, disk_key);
2469
2470                 len = sizeof(*disk_key); ptr += len;
2471                 sb_ptr += len;
2472                 cur += len;
2473
2474                 if (key.type == BTRFS_CHUNK_ITEM_KEY) {
2475                         chunk = (struct btrfs_chunk *)sb_ptr;
2476                         ret = read_one_chunk(root, &key, sb, chunk);
2477                         if (ret)
2478                                 break;
2479                         num_stripes = btrfs_chunk_num_stripes(sb, chunk);
2480                         len = btrfs_chunk_item_size(num_stripes);
2481                 } else {
2482                         ret = -EIO;
2483                         break;
2484                 }
2485                 ptr += len;
2486                 sb_ptr += len;
2487                 cur += len;
2488         }
2489         free_extent_buffer(sb);
2490         return ret;
2491 }
2492
2493 int btrfs_read_chunk_tree(struct btrfs_root *root)
2494 {
2495         struct btrfs_path *path;
2496         struct extent_buffer *leaf;
2497         struct btrfs_key key;
2498         struct btrfs_key found_key;
2499         int ret;
2500         int slot;
2501
2502         root = root->fs_info->chunk_root;
2503
2504         path = btrfs_alloc_path();
2505         if (!path)
2506                 return -ENOMEM;
2507
2508         /* first we search for all of the device items, and then we
2509          * read in all of the chunk items.  This way we can create chunk
2510          * mappings that reference all of the devices that are afound
2511          */
2512         key.objectid = BTRFS_DEV_ITEMS_OBJECTID;
2513         key.offset = 0;
2514         key.type = 0;
2515 again:
2516         ret = btrfs_search_slot(NULL, root, &key, path, 0, 0);
2517         while(1) {
2518                 leaf = path->nodes[0];
2519                 slot = path->slots[0];
2520                 if (slot >= btrfs_header_nritems(leaf)) {
2521                         ret = btrfs_next_leaf(root, path);
2522                         if (ret == 0)
2523                                 continue;
2524                         if (ret < 0)
2525                                 goto error;
2526                         break;
2527                 }
2528                 btrfs_item_key_to_cpu(leaf, &found_key, slot);
2529                 if (key.objectid == BTRFS_DEV_ITEMS_OBJECTID) {
2530                         if (found_key.objectid != BTRFS_DEV_ITEMS_OBJECTID)
2531                                 break;
2532                         if (found_key.type == BTRFS_DEV_ITEM_KEY) {
2533                                 struct btrfs_dev_item *dev_item;
2534                                 dev_item = btrfs_item_ptr(leaf, slot,
2535                                                   struct btrfs_dev_item);
2536                                 ret = read_one_dev(root, leaf, dev_item);
2537                                 BUG_ON(ret);
2538                         }
2539                 } else if (found_key.type == BTRFS_CHUNK_ITEM_KEY) {
2540                         struct btrfs_chunk *chunk;
2541                         chunk = btrfs_item_ptr(leaf, slot, struct btrfs_chunk);
2542                         ret = read_one_chunk(root, &found_key, leaf, chunk);
2543                 }
2544                 path->slots[0]++;
2545         }
2546         if (key.objectid == BTRFS_DEV_ITEMS_OBJECTID) {
2547                 key.objectid = 0;
2548                 btrfs_release_path(root, path);
2549                 goto again;
2550         }
2551
2552         btrfs_free_path(path);
2553         ret = 0;
2554 error:
2555         return ret;
2556 }