98a1c0faedae1c7c5e28cca62a7fcaab30a73878
[safe/jmp/linux-2.6] / fs / btrfs / extent-tree.c
1 /*
2  * Copyright (C) 2007 Oracle.  All rights reserved.
3  *
4  * This program is free software; you can redistribute it and/or
5  * modify it under the terms of the GNU General Public
6  * License v2 as published by the Free Software Foundation.
7  *
8  * This program is distributed in the hope that it will be useful,
9  * but WITHOUT ANY WARRANTY; without even the implied warranty of
10  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
11  * General Public License for more details.
12  *
13  * You should have received a copy of the GNU General Public
14  * License along with this program; if not, write to the
15  * Free Software Foundation, Inc., 59 Temple Place - Suite 330,
16  * Boston, MA 021110-1307, USA.
17  */
18 #include <linux/sched.h>
19 #include <linux/pagemap.h>
20 #include <linux/writeback.h>
21 #include "hash.h"
22 #include "crc32c.h"
23 #include "ctree.h"
24 #include "disk-io.h"
25 #include "print-tree.h"
26 #include "transaction.h"
27 #include "volumes.h"
28 #include "locking.h"
29 #include "ref-cache.h"
30
31 #define BLOCK_GROUP_DATA     EXTENT_WRITEBACK
32 #define BLOCK_GROUP_METADATA EXTENT_UPTODATE
33 #define BLOCK_GROUP_SYSTEM   EXTENT_NEW
34
35 #define BLOCK_GROUP_DIRTY EXTENT_DIRTY
36
37 static int finish_current_insert(struct btrfs_trans_handle *trans, struct
38                                  btrfs_root *extent_root);
39 static int del_pending_extents(struct btrfs_trans_handle *trans, struct
40                                btrfs_root *extent_root);
41 static struct btrfs_block_group_cache *
42 __btrfs_find_block_group(struct btrfs_root *root,
43                          struct btrfs_block_group_cache *hint,
44                          u64 search_start, int data, int owner);
45
46 void maybe_lock_mutex(struct btrfs_root *root)
47 {
48         if (root != root->fs_info->extent_root &&
49             root != root->fs_info->chunk_root &&
50             root != root->fs_info->dev_root) {
51                 mutex_lock(&root->fs_info->alloc_mutex);
52         }
53 }
54
55 void maybe_unlock_mutex(struct btrfs_root *root)
56 {
57         if (root != root->fs_info->extent_root &&
58             root != root->fs_info->chunk_root &&
59             root != root->fs_info->dev_root) {
60                 mutex_unlock(&root->fs_info->alloc_mutex);
61         }
62 }
63
64 static int cache_block_group(struct btrfs_root *root,
65                              struct btrfs_block_group_cache *block_group)
66 {
67         struct btrfs_path *path;
68         int ret;
69         struct btrfs_key key;
70         struct extent_buffer *leaf;
71         struct extent_io_tree *free_space_cache;
72         int slot;
73         u64 last = 0;
74         u64 hole_size;
75         u64 first_free;
76         int found = 0;
77
78         if (!block_group)
79                 return 0;
80
81         root = root->fs_info->extent_root;
82         free_space_cache = &root->fs_info->free_space_cache;
83
84         if (block_group->cached)
85                 return 0;
86
87         path = btrfs_alloc_path();
88         if (!path)
89                 return -ENOMEM;
90
91         path->reada = 2;
92         /*
93          * we get into deadlocks with paths held by callers of this function.
94          * since the alloc_mutex is protecting things right now, just
95          * skip the locking here
96          */
97         path->skip_locking = 1;
98         first_free = block_group->key.objectid;
99         key.objectid = block_group->key.objectid;
100         key.offset = 0;
101         btrfs_set_key_type(&key, BTRFS_EXTENT_ITEM_KEY);
102         ret = btrfs_search_slot(NULL, root, &key, path, 0, 0);
103         if (ret < 0)
104                 return ret;
105         ret = btrfs_previous_item(root, path, 0, BTRFS_EXTENT_ITEM_KEY);
106         if (ret < 0)
107                 return ret;
108         if (ret == 0) {
109                 leaf = path->nodes[0];
110                 btrfs_item_key_to_cpu(leaf, &key, path->slots[0]);
111                 if (key.objectid + key.offset > first_free)
112                         first_free = key.objectid + key.offset;
113         }
114         while(1) {
115                 leaf = path->nodes[0];
116                 slot = path->slots[0];
117                 if (slot >= btrfs_header_nritems(leaf)) {
118                         ret = btrfs_next_leaf(root, path);
119                         if (ret < 0)
120                                 goto err;
121                         if (ret == 0) {
122                                 continue;
123                         } else {
124                                 break;
125                         }
126                 }
127                 btrfs_item_key_to_cpu(leaf, &key, slot);
128                 if (key.objectid < block_group->key.objectid) {
129                         goto next;
130                 }
131                 if (key.objectid >= block_group->key.objectid +
132                     block_group->key.offset) {
133                         break;
134                 }
135
136                 if (btrfs_key_type(&key) == BTRFS_EXTENT_ITEM_KEY) {
137                         if (!found) {
138                                 last = first_free;
139                                 found = 1;
140                         }
141                         if (key.objectid > last) {
142                                 hole_size = key.objectid - last;
143                                 set_extent_dirty(free_space_cache, last,
144                                                  last + hole_size - 1,
145                                                  GFP_NOFS);
146                         }
147                         last = key.objectid + key.offset;
148                 }
149 next:
150                 path->slots[0]++;
151         }
152
153         if (!found)
154                 last = first_free;
155         if (block_group->key.objectid +
156             block_group->key.offset > last) {
157                 hole_size = block_group->key.objectid +
158                         block_group->key.offset - last;
159                 set_extent_dirty(free_space_cache, last,
160                                  last + hole_size - 1, GFP_NOFS);
161         }
162         block_group->cached = 1;
163 err:
164         btrfs_free_path(path);
165         return 0;
166 }
167
168 struct btrfs_block_group_cache *btrfs_lookup_first_block_group(struct
169                                                        btrfs_fs_info *info,
170                                                          u64 bytenr)
171 {
172         struct extent_io_tree *block_group_cache;
173         struct btrfs_block_group_cache *block_group = NULL;
174         u64 ptr;
175         u64 start;
176         u64 end;
177         int ret;
178
179         bytenr = max_t(u64, bytenr,
180                        BTRFS_SUPER_INFO_OFFSET + BTRFS_SUPER_INFO_SIZE);
181         block_group_cache = &info->block_group_cache;
182         ret = find_first_extent_bit(block_group_cache,
183                                     bytenr, &start, &end,
184                                     BLOCK_GROUP_DATA | BLOCK_GROUP_METADATA |
185                                     BLOCK_GROUP_SYSTEM);
186         if (ret) {
187                 return NULL;
188         }
189         ret = get_state_private(block_group_cache, start, &ptr);
190         if (ret)
191                 return NULL;
192
193         block_group = (struct btrfs_block_group_cache *)(unsigned long)ptr;
194         return block_group;
195 }
196
197 struct btrfs_block_group_cache *btrfs_lookup_block_group(struct
198                                                          btrfs_fs_info *info,
199                                                          u64 bytenr)
200 {
201         struct extent_io_tree *block_group_cache;
202         struct btrfs_block_group_cache *block_group = NULL;
203         u64 ptr;
204         u64 start;
205         u64 end;
206         int ret;
207
208         bytenr = max_t(u64, bytenr,
209                        BTRFS_SUPER_INFO_OFFSET + BTRFS_SUPER_INFO_SIZE);
210         block_group_cache = &info->block_group_cache;
211         ret = find_first_extent_bit(block_group_cache,
212                                     bytenr, &start, &end,
213                                     BLOCK_GROUP_DATA | BLOCK_GROUP_METADATA |
214                                     BLOCK_GROUP_SYSTEM);
215         if (ret) {
216                 return NULL;
217         }
218         ret = get_state_private(block_group_cache, start, &ptr);
219         if (ret)
220                 return NULL;
221
222         block_group = (struct btrfs_block_group_cache *)(unsigned long)ptr;
223         if (block_group->key.objectid <= bytenr && bytenr <
224             block_group->key.objectid + block_group->key.offset)
225                 return block_group;
226         return NULL;
227 }
228
229 static int block_group_bits(struct btrfs_block_group_cache *cache, u64 bits)
230 {
231         return (cache->flags & bits) == bits;
232 }
233
234 static int noinline find_search_start(struct btrfs_root *root,
235                               struct btrfs_block_group_cache **cache_ret,
236                               u64 *start_ret, u64 num, int data)
237 {
238         int ret;
239         struct btrfs_block_group_cache *cache = *cache_ret;
240         struct extent_io_tree *free_space_cache;
241         struct extent_state *state;
242         u64 last;
243         u64 start = 0;
244         u64 cache_miss = 0;
245         u64 total_fs_bytes;
246         u64 search_start = *start_ret;
247         int wrapped = 0;
248
249         WARN_ON(!mutex_is_locked(&root->fs_info->alloc_mutex));
250         total_fs_bytes = btrfs_super_total_bytes(&root->fs_info->super_copy);
251         free_space_cache = &root->fs_info->free_space_cache;
252
253         if (!cache)
254                 goto out;
255
256 again:
257         ret = cache_block_group(root, cache);
258         if (ret) {
259                 goto out;
260         }
261
262         last = max(search_start, cache->key.objectid);
263         if (!block_group_bits(cache, data) || cache->ro)
264                 goto new_group;
265
266         spin_lock_irq(&free_space_cache->lock);
267         state = find_first_extent_bit_state(free_space_cache, last, EXTENT_DIRTY);
268         while(1) {
269                 if (!state) {
270                         if (!cache_miss)
271                                 cache_miss = last;
272                         spin_unlock_irq(&free_space_cache->lock);
273                         goto new_group;
274                 }
275
276                 start = max(last, state->start);
277                 last = state->end + 1;
278                 if (last - start < num) {
279                         do {
280                                 state = extent_state_next(state);
281                         } while(state && !(state->state & EXTENT_DIRTY));
282                         continue;
283                 }
284                 spin_unlock_irq(&free_space_cache->lock);
285                 if (cache->ro) {
286                         goto new_group;
287                 }
288                 if (start + num > cache->key.objectid + cache->key.offset)
289                         goto new_group;
290                 if (!block_group_bits(cache, data)) {
291                         printk("block group bits don't match %Lu %d\n", cache->flags, data);
292                 }
293                 *start_ret = start;
294                 return 0;
295         }
296 out:
297         cache = btrfs_lookup_block_group(root->fs_info, search_start);
298         if (!cache) {
299                 printk("Unable to find block group for %Lu\n", search_start);
300                 WARN_ON(1);
301         }
302         return -ENOSPC;
303
304 new_group:
305         last = cache->key.objectid + cache->key.offset;
306 wrapped:
307         cache = btrfs_lookup_first_block_group(root->fs_info, last);
308         if (!cache || cache->key.objectid >= total_fs_bytes) {
309 no_cache:
310                 if (!wrapped) {
311                         wrapped = 1;
312                         last = search_start;
313                         goto wrapped;
314                 }
315                 goto out;
316         }
317         if (cache_miss && !cache->cached) {
318                 cache_block_group(root, cache);
319                 last = cache_miss;
320                 cache = btrfs_lookup_first_block_group(root->fs_info, last);
321         }
322         cache_miss = 0;
323         cache = btrfs_find_block_group(root, cache, last, data, 0);
324         if (!cache)
325                 goto no_cache;
326         *cache_ret = cache;
327         goto again;
328 }
329
330 static u64 div_factor(u64 num, int factor)
331 {
332         if (factor == 10)
333                 return num;
334         num *= factor;
335         do_div(num, 10);
336         return num;
337 }
338
339 static int block_group_state_bits(u64 flags)
340 {
341         int bits = 0;
342         if (flags & BTRFS_BLOCK_GROUP_DATA)
343                 bits |= BLOCK_GROUP_DATA;
344         if (flags & BTRFS_BLOCK_GROUP_METADATA)
345                 bits |= BLOCK_GROUP_METADATA;
346         if (flags & BTRFS_BLOCK_GROUP_SYSTEM)
347                 bits |= BLOCK_GROUP_SYSTEM;
348         return bits;
349 }
350
351 static struct btrfs_block_group_cache *
352 __btrfs_find_block_group(struct btrfs_root *root,
353                          struct btrfs_block_group_cache *hint,
354                          u64 search_start, int data, int owner)
355 {
356         struct btrfs_block_group_cache *cache;
357         struct extent_io_tree *block_group_cache;
358         struct btrfs_block_group_cache *found_group = NULL;
359         struct btrfs_fs_info *info = root->fs_info;
360         u64 used;
361         u64 last = 0;
362         u64 start;
363         u64 end;
364         u64 free_check;
365         u64 ptr;
366         int bit;
367         int ret;
368         int full_search = 0;
369         int factor = 10;
370         int wrapped = 0;
371
372         block_group_cache = &info->block_group_cache;
373
374         if (data & BTRFS_BLOCK_GROUP_METADATA)
375                 factor = 9;
376
377         bit = block_group_state_bits(data);
378
379         if (search_start) {
380                 struct btrfs_block_group_cache *shint;
381                 shint = btrfs_lookup_first_block_group(info, search_start);
382                 if (shint && block_group_bits(shint, data) && !shint->ro) {
383                         spin_lock(&shint->lock);
384                         used = btrfs_block_group_used(&shint->item);
385                         if (used + shint->pinned <
386                             div_factor(shint->key.offset, factor)) {
387                                 spin_unlock(&shint->lock);
388                                 return shint;
389                         }
390                         spin_unlock(&shint->lock);
391                 }
392         }
393         if (hint && !hint->ro && block_group_bits(hint, data)) {
394                 spin_lock(&hint->lock);
395                 used = btrfs_block_group_used(&hint->item);
396                 if (used + hint->pinned <
397                     div_factor(hint->key.offset, factor)) {
398                         spin_unlock(&hint->lock);
399                         return hint;
400                 }
401                 spin_unlock(&hint->lock);
402                 last = hint->key.objectid + hint->key.offset;
403         } else {
404                 if (hint)
405                         last = max(hint->key.objectid, search_start);
406                 else
407                         last = search_start;
408         }
409 again:
410         while(1) {
411                 ret = find_first_extent_bit(block_group_cache, last,
412                                             &start, &end, bit);
413                 if (ret)
414                         break;
415
416                 ret = get_state_private(block_group_cache, start, &ptr);
417                 if (ret) {
418                         last = end + 1;
419                         continue;
420                 }
421
422                 cache = (struct btrfs_block_group_cache *)(unsigned long)ptr;
423                 spin_lock(&cache->lock);
424                 last = cache->key.objectid + cache->key.offset;
425                 used = btrfs_block_group_used(&cache->item);
426
427                 if (!cache->ro && block_group_bits(cache, data)) {
428                         free_check = div_factor(cache->key.offset, factor);
429                         if (used + cache->pinned < free_check) {
430                                 found_group = cache;
431                                 spin_unlock(&cache->lock);
432                                 goto found;
433                         }
434                 }
435                 spin_unlock(&cache->lock);
436                 cond_resched();
437         }
438         if (!wrapped) {
439                 last = search_start;
440                 wrapped = 1;
441                 goto again;
442         }
443         if (!full_search && factor < 10) {
444                 last = search_start;
445                 full_search = 1;
446                 factor = 10;
447                 goto again;
448         }
449 found:
450         return found_group;
451 }
452
453 struct btrfs_block_group_cache *btrfs_find_block_group(struct btrfs_root *root,
454                                                  struct btrfs_block_group_cache
455                                                  *hint, u64 search_start,
456                                                  int data, int owner)
457 {
458
459         struct btrfs_block_group_cache *ret;
460         ret = __btrfs_find_block_group(root, hint, search_start, data, owner);
461         return ret;
462 }
463 static u64 hash_extent_ref(u64 root_objectid, u64 ref_generation,
464                            u64 owner, u64 owner_offset)
465 {
466         u32 high_crc = ~(u32)0;
467         u32 low_crc = ~(u32)0;
468         __le64 lenum;
469         lenum = cpu_to_le64(root_objectid);
470         high_crc = btrfs_crc32c(high_crc, &lenum, sizeof(lenum));
471         lenum = cpu_to_le64(ref_generation);
472         low_crc = btrfs_crc32c(low_crc, &lenum, sizeof(lenum));
473         if (owner >= BTRFS_FIRST_FREE_OBJECTID) {
474                 lenum = cpu_to_le64(owner);
475                 low_crc = btrfs_crc32c(low_crc, &lenum, sizeof(lenum));
476                 lenum = cpu_to_le64(owner_offset);
477                 low_crc = btrfs_crc32c(low_crc, &lenum, sizeof(lenum));
478         }
479         return ((u64)high_crc << 32) | (u64)low_crc;
480 }
481
482 static int match_extent_ref(struct extent_buffer *leaf,
483                             struct btrfs_extent_ref *disk_ref,
484                             struct btrfs_extent_ref *cpu_ref)
485 {
486         int ret;
487         int len;
488
489         if (cpu_ref->objectid)
490                 len = sizeof(*cpu_ref);
491         else
492                 len = 2 * sizeof(u64);
493         ret = memcmp_extent_buffer(leaf, cpu_ref, (unsigned long)disk_ref,
494                                    len);
495         return ret == 0;
496 }
497
498 static int noinline lookup_extent_backref(struct btrfs_trans_handle *trans,
499                                           struct btrfs_root *root,
500                                           struct btrfs_path *path, u64 bytenr,
501                                           u64 root_objectid,
502                                           u64 ref_generation, u64 owner,
503                                           u64 owner_offset, int del)
504 {
505         u64 hash;
506         struct btrfs_key key;
507         struct btrfs_key found_key;
508         struct btrfs_extent_ref ref;
509         struct extent_buffer *leaf;
510         struct btrfs_extent_ref *disk_ref;
511         int ret;
512         int ret2;
513
514         btrfs_set_stack_ref_root(&ref, root_objectid);
515         btrfs_set_stack_ref_generation(&ref, ref_generation);
516         btrfs_set_stack_ref_objectid(&ref, owner);
517         btrfs_set_stack_ref_offset(&ref, owner_offset);
518
519         hash = hash_extent_ref(root_objectid, ref_generation, owner,
520                                owner_offset);
521         key.offset = hash;
522         key.objectid = bytenr;
523         key.type = BTRFS_EXTENT_REF_KEY;
524
525         while (1) {
526                 ret = btrfs_search_slot(trans, root, &key, path,
527                                         del ? -1 : 0, del);
528                 if (ret < 0)
529                         goto out;
530                 leaf = path->nodes[0];
531                 if (ret != 0) {
532                         u32 nritems = btrfs_header_nritems(leaf);
533                         if (path->slots[0] >= nritems) {
534                                 ret2 = btrfs_next_leaf(root, path);
535                                 if (ret2)
536                                         goto out;
537                                 leaf = path->nodes[0];
538                         }
539                         btrfs_item_key_to_cpu(leaf, &found_key, path->slots[0]);
540                         if (found_key.objectid != bytenr ||
541                             found_key.type != BTRFS_EXTENT_REF_KEY)
542                                 goto out;
543                         key.offset = found_key.offset;
544                         if (del) {
545                                 btrfs_release_path(root, path);
546                                 continue;
547                         }
548                 }
549                 disk_ref = btrfs_item_ptr(path->nodes[0],
550                                           path->slots[0],
551                                           struct btrfs_extent_ref);
552                 if (match_extent_ref(path->nodes[0], disk_ref, &ref)) {
553                         ret = 0;
554                         goto out;
555                 }
556                 btrfs_item_key_to_cpu(leaf, &found_key, path->slots[0]);
557                 key.offset = found_key.offset + 1;
558                 btrfs_release_path(root, path);
559         }
560 out:
561         return ret;
562 }
563
564 /*
565  * Back reference rules.  Back refs have three main goals:
566  *
567  * 1) differentiate between all holders of references to an extent so that
568  *    when a reference is dropped we can make sure it was a valid reference
569  *    before freeing the extent.
570  *
571  * 2) Provide enough information to quickly find the holders of an extent
572  *    if we notice a given block is corrupted or bad.
573  *
574  * 3) Make it easy to migrate blocks for FS shrinking or storage pool
575  *    maintenance.  This is actually the same as #2, but with a slightly
576  *    different use case.
577  *
578  * File extents can be referenced by:
579  *
580  * - multiple snapshots, subvolumes, or different generations in one subvol
581  * - different files inside a single subvolume (in theory, not implemented yet)
582  * - different offsets inside a file (bookend extents in file.c)
583  *
584  * The extent ref structure has fields for:
585  *
586  * - Objectid of the subvolume root
587  * - Generation number of the tree holding the reference
588  * - objectid of the file holding the reference
589  * - offset in the file corresponding to the key holding the reference
590  *
591  * When a file extent is allocated the fields are filled in:
592  *     (root_key.objectid, trans->transid, inode objectid, offset in file)
593  *
594  * When a leaf is cow'd new references are added for every file extent found
595  * in the leaf.  It looks the same as the create case, but trans->transid
596  * will be different when the block is cow'd.
597  *
598  *     (root_key.objectid, trans->transid, inode objectid, offset in file)
599  *
600  * When a file extent is removed either during snapshot deletion or file
601  * truncation, the corresponding back reference is found
602  * by searching for:
603  *
604  *     (btrfs_header_owner(leaf), btrfs_header_generation(leaf),
605  *      inode objectid, offset in file)
606  *
607  * Btree extents can be referenced by:
608  *
609  * - Different subvolumes
610  * - Different generations of the same subvolume
611  *
612  * Storing sufficient information for a full reverse mapping of a btree
613  * block would require storing the lowest key of the block in the backref,
614  * and it would require updating that lowest key either before write out or
615  * every time it changed.  Instead, the objectid of the lowest key is stored
616  * along with the level of the tree block.  This provides a hint
617  * about where in the btree the block can be found.  Searches through the
618  * btree only need to look for a pointer to that block, so they stop one
619  * level higher than the level recorded in the backref.
620  *
621  * Some btrees do not do reference counting on their extents.  These
622  * include the extent tree and the tree of tree roots.  Backrefs for these
623  * trees always have a generation of zero.
624  *
625  * When a tree block is created, back references are inserted:
626  *
627  * (root->root_key.objectid, trans->transid or zero, level, lowest_key_objectid)
628  *
629  * When a tree block is cow'd in a reference counted root,
630  * new back references are added for all the blocks it points to.
631  * These are of the form (trans->transid will have increased since creation):
632  *
633  * (root->root_key.objectid, trans->transid, level, lowest_key_objectid)
634  *
635  * Because the lowest_key_objectid and the level are just hints
636  * they are not used when backrefs are deleted.  When a backref is deleted:
637  *
638  * if backref was for a tree root:
639  *     root_objectid = root->root_key.objectid
640  * else
641  *     root_objectid = btrfs_header_owner(parent)
642  *
643  * (root_objectid, btrfs_header_generation(parent) or zero, 0, 0)
644  *
645  * Back Reference Key hashing:
646  *
647  * Back references have four fields, each 64 bits long.  Unfortunately,
648  * This is hashed into a single 64 bit number and placed into the key offset.
649  * The key objectid corresponds to the first byte in the extent, and the
650  * key type is set to BTRFS_EXTENT_REF_KEY
651  */
652 int btrfs_insert_extent_backref(struct btrfs_trans_handle *trans,
653                                  struct btrfs_root *root,
654                                  struct btrfs_path *path, u64 bytenr,
655                                  u64 root_objectid, u64 ref_generation,
656                                  u64 owner, u64 owner_offset)
657 {
658         u64 hash;
659         struct btrfs_key key;
660         struct btrfs_extent_ref ref;
661         struct btrfs_extent_ref *disk_ref;
662         int ret;
663
664         btrfs_set_stack_ref_root(&ref, root_objectid);
665         btrfs_set_stack_ref_generation(&ref, ref_generation);
666         btrfs_set_stack_ref_objectid(&ref, owner);
667         btrfs_set_stack_ref_offset(&ref, owner_offset);
668
669         hash = hash_extent_ref(root_objectid, ref_generation, owner,
670                                owner_offset);
671         key.offset = hash;
672         key.objectid = bytenr;
673         key.type = BTRFS_EXTENT_REF_KEY;
674
675         ret = btrfs_insert_empty_item(trans, root, path, &key, sizeof(ref));
676         while (ret == -EEXIST) {
677                 disk_ref = btrfs_item_ptr(path->nodes[0], path->slots[0],
678                                           struct btrfs_extent_ref);
679                 if (match_extent_ref(path->nodes[0], disk_ref, &ref))
680                         goto out;
681                 key.offset++;
682                 btrfs_release_path(root, path);
683                 ret = btrfs_insert_empty_item(trans, root, path, &key,
684                                               sizeof(ref));
685         }
686         if (ret)
687                 goto out;
688         disk_ref = btrfs_item_ptr(path->nodes[0], path->slots[0],
689                                   struct btrfs_extent_ref);
690         write_extent_buffer(path->nodes[0], &ref, (unsigned long)disk_ref,
691                             sizeof(ref));
692         btrfs_mark_buffer_dirty(path->nodes[0]);
693 out:
694         btrfs_release_path(root, path);
695         return ret;
696 }
697
698 static int __btrfs_inc_extent_ref(struct btrfs_trans_handle *trans,
699                                 struct btrfs_root *root,
700                                 u64 bytenr, u64 num_bytes,
701                                 u64 root_objectid, u64 ref_generation,
702                                 u64 owner, u64 owner_offset)
703 {
704         struct btrfs_path *path;
705         int ret;
706         struct btrfs_key key;
707         struct extent_buffer *l;
708         struct btrfs_extent_item *item;
709         u32 refs;
710
711         WARN_ON(num_bytes < root->sectorsize);
712         path = btrfs_alloc_path();
713         if (!path)
714                 return -ENOMEM;
715
716         path->reada = 1;
717         key.objectid = bytenr;
718         btrfs_set_key_type(&key, BTRFS_EXTENT_ITEM_KEY);
719         key.offset = num_bytes;
720         ret = btrfs_search_slot(trans, root->fs_info->extent_root, &key, path,
721                                 0, 1);
722         if (ret < 0)
723                 return ret;
724         if (ret != 0) {
725                 BUG();
726         }
727         BUG_ON(ret != 0);
728         l = path->nodes[0];
729         item = btrfs_item_ptr(l, path->slots[0], struct btrfs_extent_item);
730         refs = btrfs_extent_refs(l, item);
731         btrfs_set_extent_refs(l, item, refs + 1);
732         btrfs_mark_buffer_dirty(path->nodes[0]);
733
734         btrfs_release_path(root->fs_info->extent_root, path);
735
736         path->reada = 1;
737         ret = btrfs_insert_extent_backref(trans, root->fs_info->extent_root,
738                                           path, bytenr, root_objectid,
739                                           ref_generation, owner, owner_offset);
740         BUG_ON(ret);
741         finish_current_insert(trans, root->fs_info->extent_root);
742         del_pending_extents(trans, root->fs_info->extent_root);
743
744         btrfs_free_path(path);
745         return 0;
746 }
747
748 int btrfs_inc_extent_ref(struct btrfs_trans_handle *trans,
749                                 struct btrfs_root *root,
750                                 u64 bytenr, u64 num_bytes,
751                                 u64 root_objectid, u64 ref_generation,
752                                 u64 owner, u64 owner_offset)
753 {
754         int ret;
755
756         mutex_lock(&root->fs_info->alloc_mutex);
757         ret = __btrfs_inc_extent_ref(trans, root, bytenr, num_bytes,
758                                      root_objectid, ref_generation,
759                                      owner, owner_offset);
760         mutex_unlock(&root->fs_info->alloc_mutex);
761         return ret;
762 }
763
764 int btrfs_extent_post_op(struct btrfs_trans_handle *trans,
765                          struct btrfs_root *root)
766 {
767         finish_current_insert(trans, root->fs_info->extent_root);
768         del_pending_extents(trans, root->fs_info->extent_root);
769         return 0;
770 }
771
772 static int lookup_extent_ref(struct btrfs_trans_handle *trans,
773                              struct btrfs_root *root, u64 bytenr,
774                              u64 num_bytes, u32 *refs)
775 {
776         struct btrfs_path *path;
777         int ret;
778         struct btrfs_key key;
779         struct extent_buffer *l;
780         struct btrfs_extent_item *item;
781
782         WARN_ON(num_bytes < root->sectorsize);
783         path = btrfs_alloc_path();
784         path->reada = 1;
785         key.objectid = bytenr;
786         key.offset = num_bytes;
787         btrfs_set_key_type(&key, BTRFS_EXTENT_ITEM_KEY);
788         ret = btrfs_search_slot(trans, root->fs_info->extent_root, &key, path,
789                                 0, 0);
790         if (ret < 0)
791                 goto out;
792         if (ret != 0) {
793                 btrfs_print_leaf(root, path->nodes[0]);
794                 printk("failed to find block number %Lu\n", bytenr);
795                 BUG();
796         }
797         l = path->nodes[0];
798         item = btrfs_item_ptr(l, path->slots[0], struct btrfs_extent_item);
799         *refs = btrfs_extent_refs(l, item);
800 out:
801         btrfs_free_path(path);
802         return 0;
803 }
804
805
806 static int get_reference_status(struct btrfs_root *root, u64 bytenr,
807                                 u64 parent_gen, u64 ref_objectid,
808                                 u64 *min_generation, u32 *ref_count)
809 {
810         struct btrfs_root *extent_root = root->fs_info->extent_root;
811         struct btrfs_path *path;
812         struct extent_buffer *leaf;
813         struct btrfs_extent_ref *ref_item;
814         struct btrfs_key key;
815         struct btrfs_key found_key;
816         u64 root_objectid = root->root_key.objectid;
817         u64 ref_generation;
818         u32 nritems;
819         int ret;
820
821         key.objectid = bytenr;
822         key.offset = 0;
823         key.type = BTRFS_EXTENT_ITEM_KEY;
824
825         path = btrfs_alloc_path();
826         mutex_lock(&root->fs_info->alloc_mutex);
827         ret = btrfs_search_slot(NULL, extent_root, &key, path, 0, 0);
828         if (ret < 0)
829                 goto out;
830         BUG_ON(ret == 0);
831
832         leaf = path->nodes[0];
833         btrfs_item_key_to_cpu(leaf, &found_key, path->slots[0]);
834
835         if (found_key.objectid != bytenr ||
836             found_key.type != BTRFS_EXTENT_ITEM_KEY) {
837                 ret = 1;
838                 goto out;
839         }
840
841         *ref_count = 0;
842         *min_generation = (u64)-1;
843
844         while (1) {
845                 leaf = path->nodes[0];
846                 nritems = btrfs_header_nritems(leaf);
847                 if (path->slots[0] >= nritems) {
848                         ret = btrfs_next_leaf(extent_root, path);
849                         if (ret < 0)
850                                 goto out;
851                         if (ret == 0)
852                                 continue;
853                         break;
854                 }
855                 btrfs_item_key_to_cpu(leaf, &found_key, path->slots[0]);
856                 if (found_key.objectid != bytenr)
857                         break;
858
859                 if (found_key.type != BTRFS_EXTENT_REF_KEY) {
860                         path->slots[0]++;
861                         continue;
862                 }
863
864                 ref_item = btrfs_item_ptr(leaf, path->slots[0],
865                                           struct btrfs_extent_ref);
866                 ref_generation = btrfs_ref_generation(leaf, ref_item);
867                 /*
868                  * For (parent_gen > 0 && parent_gen > ref_gen):
869                  *
870                  * we reach here through the oldest root, therefore
871                  * all other reference from same snapshot should have
872                  * a larger generation.
873                  */
874                 if ((root_objectid != btrfs_ref_root(leaf, ref_item)) ||
875                     (parent_gen > 0 && parent_gen > ref_generation) ||
876                     (ref_objectid >= BTRFS_FIRST_FREE_OBJECTID &&
877                      ref_objectid != btrfs_ref_objectid(leaf, ref_item))) {
878                         if (ref_count)
879                                 *ref_count = 2;
880                         break;
881                 }
882
883                 *ref_count = 1;
884                 if (*min_generation > ref_generation)
885                         *min_generation = ref_generation;
886
887                 path->slots[0]++;
888         }
889         ret = 0;
890 out:
891         mutex_unlock(&root->fs_info->alloc_mutex);
892         btrfs_free_path(path);
893         return ret;
894 }
895
896 int btrfs_cross_ref_exists(struct btrfs_root *root,
897                            struct btrfs_key *key, u64 bytenr)
898 {
899         struct btrfs_trans_handle *trans;
900         struct btrfs_root *old_root;
901         struct btrfs_path *path = NULL;
902         struct extent_buffer *eb;
903         struct btrfs_file_extent_item *item;
904         u64 ref_generation;
905         u64 min_generation;
906         u64 extent_start;
907         u32 ref_count;
908         int level;
909         int ret;
910
911         BUG_ON(key->type != BTRFS_EXTENT_DATA_KEY);
912         ret = get_reference_status(root, bytenr, 0, key->objectid,
913                                    &min_generation, &ref_count);
914         if (ret)
915                 return ret;
916
917         if (ref_count != 1)
918                 return 1;
919
920         trans = btrfs_start_transaction(root, 0);
921         old_root = root->dirty_root->root;
922         ref_generation = old_root->root_key.offset;
923
924         /* all references are created in running transaction */
925         if (min_generation > ref_generation) {
926                 ret = 0;
927                 goto out;
928         }
929
930         path = btrfs_alloc_path();
931         if (!path) {
932                 ret = -ENOMEM;
933                 goto out;
934         }
935
936         path->skip_locking = 1;
937         /* if no item found, the extent is referenced by other snapshot */
938         ret = btrfs_search_slot(NULL, old_root, key, path, 0, 0);
939         if (ret)
940                 goto out;
941
942         eb = path->nodes[0];
943         item = btrfs_item_ptr(eb, path->slots[0],
944                               struct btrfs_file_extent_item);
945         if (btrfs_file_extent_type(eb, item) != BTRFS_FILE_EXTENT_REG ||
946             btrfs_file_extent_disk_bytenr(eb, item) != bytenr) {
947                 ret = 1;
948                 goto out;
949         }
950
951         for (level = BTRFS_MAX_LEVEL - 1; level >= -1; level--) {
952                 if (level >= 0) {
953                         eb = path->nodes[level];
954                         if (!eb)
955                                 continue;
956                         extent_start = eb->start;
957                 } else
958                         extent_start = bytenr;
959
960                 ret = get_reference_status(root, extent_start, ref_generation,
961                                            0, &min_generation, &ref_count);
962                 if (ret)
963                         goto out;
964
965                 if (ref_count != 1) {
966                         ret = 1;
967                         goto out;
968                 }
969                 if (level >= 0)
970                         ref_generation = btrfs_header_generation(eb);
971         }
972         ret = 0;
973 out:
974         if (path)
975                 btrfs_free_path(path);
976         btrfs_end_transaction(trans, root);
977         return ret;
978 }
979
980 int btrfs_inc_ref(struct btrfs_trans_handle *trans, struct btrfs_root *root,
981                   struct extent_buffer *buf, int cache_ref)
982 {
983         u64 bytenr;
984         u32 nritems;
985         struct btrfs_key key;
986         struct btrfs_file_extent_item *fi;
987         int i;
988         int level;
989         int ret;
990         int faili;
991         int nr_file_extents = 0;
992
993         if (!root->ref_cows)
994                 return 0;
995
996         level = btrfs_header_level(buf);
997         nritems = btrfs_header_nritems(buf);
998         for (i = 0; i < nritems; i++) {
999                 cond_resched();
1000                 if (level == 0) {
1001                         u64 disk_bytenr;
1002                         btrfs_item_key_to_cpu(buf, &key, i);
1003                         if (btrfs_key_type(&key) != BTRFS_EXTENT_DATA_KEY)
1004                                 continue;
1005                         fi = btrfs_item_ptr(buf, i,
1006                                             struct btrfs_file_extent_item);
1007                         if (btrfs_file_extent_type(buf, fi) ==
1008                             BTRFS_FILE_EXTENT_INLINE)
1009                                 continue;
1010                         disk_bytenr = btrfs_file_extent_disk_bytenr(buf, fi);
1011                         if (disk_bytenr == 0)
1012                                 continue;
1013
1014                         if (buf != root->commit_root)
1015                                 nr_file_extents++;
1016
1017                         mutex_lock(&root->fs_info->alloc_mutex);
1018                         ret = __btrfs_inc_extent_ref(trans, root, disk_bytenr,
1019                                     btrfs_file_extent_disk_num_bytes(buf, fi),
1020                                     root->root_key.objectid, trans->transid,
1021                                     key.objectid, key.offset);
1022                         mutex_unlock(&root->fs_info->alloc_mutex);
1023                         if (ret) {
1024                                 faili = i;
1025                                 WARN_ON(1);
1026                                 goto fail;
1027                         }
1028                 } else {
1029                         bytenr = btrfs_node_blockptr(buf, i);
1030                         btrfs_node_key_to_cpu(buf, &key, i);
1031
1032                         mutex_lock(&root->fs_info->alloc_mutex);
1033                         ret = __btrfs_inc_extent_ref(trans, root, bytenr,
1034                                            btrfs_level_size(root, level - 1),
1035                                            root->root_key.objectid,
1036                                            trans->transid,
1037                                            level - 1, key.objectid);
1038                         mutex_unlock(&root->fs_info->alloc_mutex);
1039                         if (ret) {
1040                                 faili = i;
1041                                 WARN_ON(1);
1042                                 goto fail;
1043                         }
1044                 }
1045         }
1046         /* cache orignal leaf block's references */
1047         if (level == 0 && cache_ref && buf != root->commit_root) {
1048                 struct btrfs_leaf_ref *ref;
1049                 struct btrfs_extent_info *info;
1050
1051                 ref = btrfs_alloc_leaf_ref(root, nr_file_extents);
1052                 if (!ref) {
1053                         WARN_ON(1);
1054                         goto out;
1055                 }
1056
1057                 ref->root_gen = root->root_key.offset;
1058                 ref->bytenr = buf->start;
1059                 ref->owner = btrfs_header_owner(buf);
1060                 ref->generation = btrfs_header_generation(buf);
1061                 ref->nritems = nr_file_extents;
1062                 info = ref->extents;
1063
1064                 for (i = 0; nr_file_extents > 0 && i < nritems; i++) {
1065                         u64 disk_bytenr;
1066                         btrfs_item_key_to_cpu(buf, &key, i);
1067                         if (btrfs_key_type(&key) != BTRFS_EXTENT_DATA_KEY)
1068                                 continue;
1069                         fi = btrfs_item_ptr(buf, i,
1070                                             struct btrfs_file_extent_item);
1071                         if (btrfs_file_extent_type(buf, fi) ==
1072                             BTRFS_FILE_EXTENT_INLINE)
1073                                 continue;
1074                         disk_bytenr = btrfs_file_extent_disk_bytenr(buf, fi);
1075                         if (disk_bytenr == 0)
1076                                 continue;
1077
1078                         info->bytenr = disk_bytenr;
1079                         info->num_bytes =
1080                                 btrfs_file_extent_disk_num_bytes(buf, fi);
1081                         info->objectid = key.objectid;
1082                         info->offset = key.offset;
1083                         info++;
1084                 }
1085
1086                 BUG_ON(!root->ref_tree);
1087                 ret = btrfs_add_leaf_ref(root, ref);
1088                 WARN_ON(ret);
1089                 btrfs_free_leaf_ref(root, ref);
1090         }
1091 out:
1092         return 0;
1093 fail:
1094         WARN_ON(1);
1095 #if 0
1096         for (i =0; i < faili; i++) {
1097                 if (level == 0) {
1098                         u64 disk_bytenr;
1099                         btrfs_item_key_to_cpu(buf, &key, i);
1100                         if (btrfs_key_type(&key) != BTRFS_EXTENT_DATA_KEY)
1101                                 continue;
1102                         fi = btrfs_item_ptr(buf, i,
1103                                             struct btrfs_file_extent_item);
1104                         if (btrfs_file_extent_type(buf, fi) ==
1105                             BTRFS_FILE_EXTENT_INLINE)
1106                                 continue;
1107                         disk_bytenr = btrfs_file_extent_disk_bytenr(buf, fi);
1108                         if (disk_bytenr == 0)
1109                                 continue;
1110                         err = btrfs_free_extent(trans, root, disk_bytenr,
1111                                     btrfs_file_extent_disk_num_bytes(buf,
1112                                                                       fi), 0);
1113                         BUG_ON(err);
1114                 } else {
1115                         bytenr = btrfs_node_blockptr(buf, i);
1116                         err = btrfs_free_extent(trans, root, bytenr,
1117                                         btrfs_level_size(root, level - 1), 0);
1118                         BUG_ON(err);
1119                 }
1120         }
1121 #endif
1122         return ret;
1123 }
1124
1125 static int write_one_cache_group(struct btrfs_trans_handle *trans,
1126                                  struct btrfs_root *root,
1127                                  struct btrfs_path *path,
1128                                  struct btrfs_block_group_cache *cache)
1129 {
1130         int ret;
1131         int pending_ret;
1132         struct btrfs_root *extent_root = root->fs_info->extent_root;
1133         unsigned long bi;
1134         struct extent_buffer *leaf;
1135
1136         ret = btrfs_search_slot(trans, extent_root, &cache->key, path, 0, 1);
1137         if (ret < 0)
1138                 goto fail;
1139         BUG_ON(ret);
1140
1141         leaf = path->nodes[0];
1142         bi = btrfs_item_ptr_offset(leaf, path->slots[0]);
1143         write_extent_buffer(leaf, &cache->item, bi, sizeof(cache->item));
1144         btrfs_mark_buffer_dirty(leaf);
1145         btrfs_release_path(extent_root, path);
1146 fail:
1147         finish_current_insert(trans, extent_root);
1148         pending_ret = del_pending_extents(trans, extent_root);
1149         if (ret)
1150                 return ret;
1151         if (pending_ret)
1152                 return pending_ret;
1153         return 0;
1154
1155 }
1156
1157 int btrfs_write_dirty_block_groups(struct btrfs_trans_handle *trans,
1158                                    struct btrfs_root *root)
1159 {
1160         struct extent_io_tree *block_group_cache;
1161         struct btrfs_block_group_cache *cache;
1162         int ret;
1163         int err = 0;
1164         int werr = 0;
1165         struct btrfs_path *path;
1166         u64 last = 0;
1167         u64 start;
1168         u64 end;
1169         u64 ptr;
1170
1171         block_group_cache = &root->fs_info->block_group_cache;
1172         path = btrfs_alloc_path();
1173         if (!path)
1174                 return -ENOMEM;
1175
1176         mutex_lock(&root->fs_info->alloc_mutex);
1177         while(1) {
1178                 ret = find_first_extent_bit(block_group_cache, last,
1179                                             &start, &end, BLOCK_GROUP_DIRTY);
1180                 if (ret)
1181                         break;
1182
1183                 last = end + 1;
1184                 ret = get_state_private(block_group_cache, start, &ptr);
1185                 if (ret)
1186                         break;
1187                 cache = (struct btrfs_block_group_cache *)(unsigned long)ptr;
1188                 err = write_one_cache_group(trans, root,
1189                                             path, cache);
1190                 /*
1191                  * if we fail to write the cache group, we want
1192                  * to keep it marked dirty in hopes that a later
1193                  * write will work
1194                  */
1195                 if (err) {
1196                         werr = err;
1197                         continue;
1198                 }
1199                 clear_extent_bits(block_group_cache, start, end,
1200                                   BLOCK_GROUP_DIRTY, GFP_NOFS);
1201         }
1202         btrfs_free_path(path);
1203         mutex_unlock(&root->fs_info->alloc_mutex);
1204         return werr;
1205 }
1206
1207 static struct btrfs_space_info *__find_space_info(struct btrfs_fs_info *info,
1208                                                   u64 flags)
1209 {
1210         struct list_head *head = &info->space_info;
1211         struct list_head *cur;
1212         struct btrfs_space_info *found;
1213         list_for_each(cur, head) {
1214                 found = list_entry(cur, struct btrfs_space_info, list);
1215                 if (found->flags == flags)
1216                         return found;
1217         }
1218         return NULL;
1219
1220 }
1221
1222 static int update_space_info(struct btrfs_fs_info *info, u64 flags,
1223                              u64 total_bytes, u64 bytes_used,
1224                              struct btrfs_space_info **space_info)
1225 {
1226         struct btrfs_space_info *found;
1227
1228         found = __find_space_info(info, flags);
1229         if (found) {
1230                 found->total_bytes += total_bytes;
1231                 found->bytes_used += bytes_used;
1232                 found->full = 0;
1233                 WARN_ON(found->total_bytes < found->bytes_used);
1234                 *space_info = found;
1235                 return 0;
1236         }
1237         found = kmalloc(sizeof(*found), GFP_NOFS);
1238         if (!found)
1239                 return -ENOMEM;
1240
1241         list_add(&found->list, &info->space_info);
1242         found->flags = flags;
1243         found->total_bytes = total_bytes;
1244         found->bytes_used = bytes_used;
1245         found->bytes_pinned = 0;
1246         found->full = 0;
1247         found->force_alloc = 0;
1248         *space_info = found;
1249         return 0;
1250 }
1251
1252 static void set_avail_alloc_bits(struct btrfs_fs_info *fs_info, u64 flags)
1253 {
1254         u64 extra_flags = flags & (BTRFS_BLOCK_GROUP_RAID0 |
1255                                    BTRFS_BLOCK_GROUP_RAID1 |
1256                                    BTRFS_BLOCK_GROUP_RAID10 |
1257                                    BTRFS_BLOCK_GROUP_DUP);
1258         if (extra_flags) {
1259                 if (flags & BTRFS_BLOCK_GROUP_DATA)
1260                         fs_info->avail_data_alloc_bits |= extra_flags;
1261                 if (flags & BTRFS_BLOCK_GROUP_METADATA)
1262                         fs_info->avail_metadata_alloc_bits |= extra_flags;
1263                 if (flags & BTRFS_BLOCK_GROUP_SYSTEM)
1264                         fs_info->avail_system_alloc_bits |= extra_flags;
1265         }
1266 }
1267
1268 static u64 reduce_alloc_profile(struct btrfs_root *root, u64 flags)
1269 {
1270         u64 num_devices = root->fs_info->fs_devices->num_devices;
1271
1272         if (num_devices == 1)
1273                 flags &= ~(BTRFS_BLOCK_GROUP_RAID1 | BTRFS_BLOCK_GROUP_RAID0);
1274         if (num_devices < 4)
1275                 flags &= ~BTRFS_BLOCK_GROUP_RAID10;
1276
1277         if ((flags & BTRFS_BLOCK_GROUP_DUP) &&
1278             (flags & (BTRFS_BLOCK_GROUP_RAID1 |
1279                       BTRFS_BLOCK_GROUP_RAID10))) {
1280                 flags &= ~BTRFS_BLOCK_GROUP_DUP;
1281         }
1282
1283         if ((flags & BTRFS_BLOCK_GROUP_RAID1) &&
1284             (flags & BTRFS_BLOCK_GROUP_RAID10)) {
1285                 flags &= ~BTRFS_BLOCK_GROUP_RAID1;
1286         }
1287
1288         if ((flags & BTRFS_BLOCK_GROUP_RAID0) &&
1289             ((flags & BTRFS_BLOCK_GROUP_RAID1) |
1290              (flags & BTRFS_BLOCK_GROUP_RAID10) |
1291              (flags & BTRFS_BLOCK_GROUP_DUP)))
1292                 flags &= ~BTRFS_BLOCK_GROUP_RAID0;
1293         return flags;
1294 }
1295
1296 static int do_chunk_alloc(struct btrfs_trans_handle *trans,
1297                           struct btrfs_root *extent_root, u64 alloc_bytes,
1298                           u64 flags, int force)
1299 {
1300         struct btrfs_space_info *space_info;
1301         u64 thresh;
1302         u64 start;
1303         u64 num_bytes;
1304         int ret;
1305
1306         flags = reduce_alloc_profile(extent_root, flags);
1307
1308         space_info = __find_space_info(extent_root->fs_info, flags);
1309         if (!space_info) {
1310                 ret = update_space_info(extent_root->fs_info, flags,
1311                                         0, 0, &space_info);
1312                 BUG_ON(ret);
1313         }
1314         BUG_ON(!space_info);
1315
1316         if (space_info->force_alloc) {
1317                 force = 1;
1318                 space_info->force_alloc = 0;
1319         }
1320         if (space_info->full)
1321                 goto out;
1322
1323         thresh = div_factor(space_info->total_bytes, 6);
1324         if (!force &&
1325            (space_info->bytes_used + space_info->bytes_pinned + alloc_bytes) <
1326             thresh)
1327                 goto out;
1328
1329         mutex_lock(&extent_root->fs_info->chunk_mutex);
1330         ret = btrfs_alloc_chunk(trans, extent_root, &start, &num_bytes, flags);
1331         if (ret == -ENOSPC) {
1332 printk("space info full %Lu\n", flags);
1333                 space_info->full = 1;
1334                 goto out_unlock;
1335         }
1336         BUG_ON(ret);
1337
1338         ret = btrfs_make_block_group(trans, extent_root, 0, flags,
1339                      BTRFS_FIRST_CHUNK_TREE_OBJECTID, start, num_bytes);
1340         BUG_ON(ret);
1341 out_unlock:
1342         mutex_unlock(&extent_root->fs_info->chunk_mutex);
1343 out:
1344         return 0;
1345 }
1346
1347 static int update_block_group(struct btrfs_trans_handle *trans,
1348                               struct btrfs_root *root,
1349                               u64 bytenr, u64 num_bytes, int alloc,
1350                               int mark_free)
1351 {
1352         struct btrfs_block_group_cache *cache;
1353         struct btrfs_fs_info *info = root->fs_info;
1354         u64 total = num_bytes;
1355         u64 old_val;
1356         u64 byte_in_group;
1357         u64 start;
1358         u64 end;
1359
1360         WARN_ON(!mutex_is_locked(&root->fs_info->alloc_mutex));
1361         while(total) {
1362                 cache = btrfs_lookup_block_group(info, bytenr);
1363                 if (!cache) {
1364                         return -1;
1365                 }
1366                 byte_in_group = bytenr - cache->key.objectid;
1367                 WARN_ON(byte_in_group > cache->key.offset);
1368                 start = cache->key.objectid;
1369                 end = start + cache->key.offset - 1;
1370                 set_extent_bits(&info->block_group_cache, start, end,
1371                                 BLOCK_GROUP_DIRTY, GFP_NOFS);
1372
1373                 spin_lock(&cache->lock);
1374                 old_val = btrfs_block_group_used(&cache->item);
1375                 num_bytes = min(total, cache->key.offset - byte_in_group);
1376                 if (alloc) {
1377                         old_val += num_bytes;
1378                         cache->space_info->bytes_used += num_bytes;
1379                         btrfs_set_block_group_used(&cache->item, old_val);
1380                         spin_unlock(&cache->lock);
1381                 } else {
1382                         old_val -= num_bytes;
1383                         cache->space_info->bytes_used -= num_bytes;
1384                         btrfs_set_block_group_used(&cache->item, old_val);
1385                         spin_unlock(&cache->lock);
1386                         if (mark_free) {
1387                                 set_extent_dirty(&info->free_space_cache,
1388                                                  bytenr, bytenr + num_bytes - 1,
1389                                                  GFP_NOFS);
1390                         }
1391                 }
1392                 total -= num_bytes;
1393                 bytenr += num_bytes;
1394         }
1395         return 0;
1396 }
1397
1398 static u64 first_logical_byte(struct btrfs_root *root, u64 search_start)
1399 {
1400         u64 start;
1401         u64 end;
1402         int ret;
1403         ret = find_first_extent_bit(&root->fs_info->block_group_cache,
1404                                     search_start, &start, &end,
1405                                     BLOCK_GROUP_DATA | BLOCK_GROUP_METADATA |
1406                                     BLOCK_GROUP_SYSTEM);
1407         if (ret)
1408                 return 0;
1409         return start;
1410 }
1411
1412
1413 static int update_pinned_extents(struct btrfs_root *root,
1414                                 u64 bytenr, u64 num, int pin)
1415 {
1416         u64 len;
1417         struct btrfs_block_group_cache *cache;
1418         struct btrfs_fs_info *fs_info = root->fs_info;
1419
1420         WARN_ON(!mutex_is_locked(&root->fs_info->alloc_mutex));
1421         if (pin) {
1422                 set_extent_dirty(&fs_info->pinned_extents,
1423                                 bytenr, bytenr + num - 1, GFP_NOFS);
1424         } else {
1425                 clear_extent_dirty(&fs_info->pinned_extents,
1426                                 bytenr, bytenr + num - 1, GFP_NOFS);
1427         }
1428         while (num > 0) {
1429                 cache = btrfs_lookup_block_group(fs_info, bytenr);
1430                 if (!cache) {
1431                         u64 first = first_logical_byte(root, bytenr);
1432                         WARN_ON(first < bytenr);
1433                         len = min(first - bytenr, num);
1434                 } else {
1435                         len = min(num, cache->key.offset -
1436                                   (bytenr - cache->key.objectid));
1437                 }
1438                 if (pin) {
1439                         if (cache) {
1440                                 spin_lock(&cache->lock);
1441                                 cache->pinned += len;
1442                                 cache->space_info->bytes_pinned += len;
1443                                 spin_unlock(&cache->lock);
1444                         }
1445                         fs_info->total_pinned += len;
1446                 } else {
1447                         if (cache) {
1448                                 spin_lock(&cache->lock);
1449                                 cache->pinned -= len;
1450                                 cache->space_info->bytes_pinned -= len;
1451                                 spin_unlock(&cache->lock);
1452                         }
1453                         fs_info->total_pinned -= len;
1454                 }
1455                 bytenr += len;
1456                 num -= len;
1457         }
1458         return 0;
1459 }
1460
1461 int btrfs_copy_pinned(struct btrfs_root *root, struct extent_io_tree *copy)
1462 {
1463         u64 last = 0;
1464         u64 start;
1465         u64 end;
1466         struct extent_io_tree *pinned_extents = &root->fs_info->pinned_extents;
1467         int ret;
1468
1469         while(1) {
1470                 ret = find_first_extent_bit(pinned_extents, last,
1471                                             &start, &end, EXTENT_DIRTY);
1472                 if (ret)
1473                         break;
1474                 set_extent_dirty(copy, start, end, GFP_NOFS);
1475                 last = end + 1;
1476         }
1477         return 0;
1478 }
1479
1480 int btrfs_finish_extent_commit(struct btrfs_trans_handle *trans,
1481                                struct btrfs_root *root,
1482                                struct extent_io_tree *unpin)
1483 {
1484         u64 start;
1485         u64 end;
1486         int ret;
1487         struct extent_io_tree *free_space_cache;
1488         free_space_cache = &root->fs_info->free_space_cache;
1489
1490         mutex_lock(&root->fs_info->alloc_mutex);
1491         while(1) {
1492                 ret = find_first_extent_bit(unpin, 0, &start, &end,
1493                                             EXTENT_DIRTY);
1494                 if (ret)
1495                         break;
1496                 update_pinned_extents(root, start, end + 1 - start, 0);
1497                 clear_extent_dirty(unpin, start, end, GFP_NOFS);
1498                 set_extent_dirty(free_space_cache, start, end, GFP_NOFS);
1499                 if (need_resched()) {
1500                         mutex_unlock(&root->fs_info->alloc_mutex);
1501                         cond_resched();
1502                         mutex_lock(&root->fs_info->alloc_mutex);
1503                 }
1504         }
1505         mutex_unlock(&root->fs_info->alloc_mutex);
1506         return 0;
1507 }
1508
1509 static int finish_current_insert(struct btrfs_trans_handle *trans,
1510                                  struct btrfs_root *extent_root)
1511 {
1512         u64 start;
1513         u64 end;
1514         struct btrfs_fs_info *info = extent_root->fs_info;
1515         struct extent_buffer *eb;
1516         struct btrfs_path *path;
1517         struct btrfs_key ins;
1518         struct btrfs_disk_key first;
1519         struct btrfs_extent_item extent_item;
1520         int ret;
1521         int level;
1522         int err = 0;
1523
1524         WARN_ON(!mutex_is_locked(&extent_root->fs_info->alloc_mutex));
1525         btrfs_set_stack_extent_refs(&extent_item, 1);
1526         btrfs_set_key_type(&ins, BTRFS_EXTENT_ITEM_KEY);
1527         path = btrfs_alloc_path();
1528
1529         while(1) {
1530                 ret = find_first_extent_bit(&info->extent_ins, 0, &start,
1531                                             &end, EXTENT_LOCKED);
1532                 if (ret)
1533                         break;
1534
1535                 ins.objectid = start;
1536                 ins.offset = end + 1 - start;
1537                 err = btrfs_insert_item(trans, extent_root, &ins,
1538                                         &extent_item, sizeof(extent_item));
1539                 clear_extent_bits(&info->extent_ins, start, end, EXTENT_LOCKED,
1540                                   GFP_NOFS);
1541
1542                 eb = btrfs_find_tree_block(extent_root, ins.objectid,
1543                                            ins.offset);
1544
1545                 if (!btrfs_buffer_uptodate(eb, trans->transid)) {
1546                         mutex_unlock(&extent_root->fs_info->alloc_mutex);
1547                         btrfs_read_buffer(eb, trans->transid);
1548                         mutex_lock(&extent_root->fs_info->alloc_mutex);
1549                 }
1550
1551                 btrfs_tree_lock(eb);
1552                 level = btrfs_header_level(eb);
1553                 if (level == 0) {
1554                         btrfs_item_key(eb, &first, 0);
1555                 } else {
1556                         btrfs_node_key(eb, &first, 0);
1557                 }
1558                 btrfs_tree_unlock(eb);
1559                 free_extent_buffer(eb);
1560                 /*
1561                  * the first key is just a hint, so the race we've created
1562                  * against reading it is fine
1563                  */
1564                 err = btrfs_insert_extent_backref(trans, extent_root, path,
1565                                           start, extent_root->root_key.objectid,
1566                                           0, level,
1567                                           btrfs_disk_key_objectid(&first));
1568                 BUG_ON(err);
1569                 if (need_resched()) {
1570                         mutex_unlock(&extent_root->fs_info->alloc_mutex);
1571                         cond_resched();
1572                         mutex_lock(&extent_root->fs_info->alloc_mutex);
1573                 }
1574         }
1575         btrfs_free_path(path);
1576         return 0;
1577 }
1578
1579 static int pin_down_bytes(struct btrfs_root *root, u64 bytenr, u32 num_bytes,
1580                           int pending)
1581 {
1582         int err = 0;
1583
1584         WARN_ON(!mutex_is_locked(&root->fs_info->alloc_mutex));
1585         if (!pending) {
1586                 struct extent_buffer *buf;
1587                 buf = btrfs_find_tree_block(root, bytenr, num_bytes);
1588                 if (buf) {
1589                         if (btrfs_buffer_uptodate(buf, 0) &&
1590                             btrfs_try_tree_lock(buf)) {
1591                                 u64 transid =
1592                                     root->fs_info->running_transaction->transid;
1593                                 u64 header_transid =
1594                                         btrfs_header_generation(buf);
1595                                 if (header_transid == transid &&
1596                                     !btrfs_header_flag(buf,
1597                                                BTRFS_HEADER_FLAG_WRITTEN)) {
1598                                         clean_tree_block(NULL, root, buf);
1599                                         btrfs_tree_unlock(buf);
1600                                         free_extent_buffer(buf);
1601                                         return 1;
1602                                 }
1603                                 btrfs_tree_unlock(buf);
1604                         }
1605                         free_extent_buffer(buf);
1606                 }
1607                 update_pinned_extents(root, bytenr, num_bytes, 1);
1608         } else {
1609                 set_extent_bits(&root->fs_info->pending_del,
1610                                 bytenr, bytenr + num_bytes - 1,
1611                                 EXTENT_LOCKED, GFP_NOFS);
1612         }
1613         BUG_ON(err < 0);
1614         return 0;
1615 }
1616
1617 /*
1618  * remove an extent from the root, returns 0 on success
1619  */
1620 static int __free_extent(struct btrfs_trans_handle *trans, struct btrfs_root
1621                          *root, u64 bytenr, u64 num_bytes,
1622                          u64 root_objectid, u64 ref_generation,
1623                          u64 owner_objectid, u64 owner_offset, int pin,
1624                          int mark_free)
1625 {
1626         struct btrfs_path *path;
1627         struct btrfs_key key;
1628         struct btrfs_fs_info *info = root->fs_info;
1629         struct btrfs_root *extent_root = info->extent_root;
1630         struct extent_buffer *leaf;
1631         int ret;
1632         int extent_slot = 0;
1633         int found_extent = 0;
1634         int num_to_del = 1;
1635         struct btrfs_extent_item *ei;
1636         u32 refs;
1637
1638         WARN_ON(!mutex_is_locked(&root->fs_info->alloc_mutex));
1639         key.objectid = bytenr;
1640         btrfs_set_key_type(&key, BTRFS_EXTENT_ITEM_KEY);
1641         key.offset = num_bytes;
1642         path = btrfs_alloc_path();
1643         if (!path)
1644                 return -ENOMEM;
1645
1646         path->reada = 1;
1647         ret = lookup_extent_backref(trans, extent_root, path,
1648                                     bytenr, root_objectid,
1649                                     ref_generation,
1650                                     owner_objectid, owner_offset, 1);
1651         if (ret == 0) {
1652                 struct btrfs_key found_key;
1653                 extent_slot = path->slots[0];
1654                 while(extent_slot > 0) {
1655                         extent_slot--;
1656                         btrfs_item_key_to_cpu(path->nodes[0], &found_key,
1657                                               extent_slot);
1658                         if (found_key.objectid != bytenr)
1659                                 break;
1660                         if (found_key.type == BTRFS_EXTENT_ITEM_KEY &&
1661                             found_key.offset == num_bytes) {
1662                                 found_extent = 1;
1663                                 break;
1664                         }
1665                         if (path->slots[0] - extent_slot > 5)
1666                                 break;
1667                 }
1668                 if (!found_extent)
1669                         ret = btrfs_del_item(trans, extent_root, path);
1670         } else {
1671                 btrfs_print_leaf(extent_root, path->nodes[0]);
1672                 WARN_ON(1);
1673                 printk("Unable to find ref byte nr %Lu root %Lu "
1674                        " gen %Lu owner %Lu offset %Lu\n", bytenr,
1675                        root_objectid, ref_generation, owner_objectid,
1676                        owner_offset);
1677         }
1678         if (!found_extent) {
1679                 btrfs_release_path(extent_root, path);
1680                 ret = btrfs_search_slot(trans, extent_root, &key, path, -1, 1);
1681                 if (ret < 0)
1682                         return ret;
1683                 BUG_ON(ret);
1684                 extent_slot = path->slots[0];
1685         }
1686
1687         leaf = path->nodes[0];
1688         ei = btrfs_item_ptr(leaf, extent_slot,
1689                             struct btrfs_extent_item);
1690         refs = btrfs_extent_refs(leaf, ei);
1691         BUG_ON(refs == 0);
1692         refs -= 1;
1693         btrfs_set_extent_refs(leaf, ei, refs);
1694
1695         btrfs_mark_buffer_dirty(leaf);
1696
1697         if (refs == 0 && found_extent && path->slots[0] == extent_slot + 1) {
1698                 /* if the back ref and the extent are next to each other
1699                  * they get deleted below in one shot
1700                  */
1701                 path->slots[0] = extent_slot;
1702                 num_to_del = 2;
1703         } else if (found_extent) {
1704                 /* otherwise delete the extent back ref */
1705                 ret = btrfs_del_item(trans, extent_root, path);
1706                 BUG_ON(ret);
1707                 /* if refs are 0, we need to setup the path for deletion */
1708                 if (refs == 0) {
1709                         btrfs_release_path(extent_root, path);
1710                         ret = btrfs_search_slot(trans, extent_root, &key, path,
1711                                                 -1, 1);
1712                         if (ret < 0)
1713                                 return ret;
1714                         BUG_ON(ret);
1715                 }
1716         }
1717
1718         if (refs == 0) {
1719                 u64 super_used;
1720                 u64 root_used;
1721
1722                 if (pin) {
1723                         ret = pin_down_bytes(root, bytenr, num_bytes, 0);
1724                         if (ret > 0)
1725                                 mark_free = 1;
1726                         BUG_ON(ret < 0);
1727                 }
1728
1729                 /* block accounting for super block */
1730                 spin_lock_irq(&info->delalloc_lock);
1731                 super_used = btrfs_super_bytes_used(&info->super_copy);
1732                 btrfs_set_super_bytes_used(&info->super_copy,
1733                                            super_used - num_bytes);
1734                 spin_unlock_irq(&info->delalloc_lock);
1735
1736                 /* block accounting for root item */
1737                 root_used = btrfs_root_used(&root->root_item);
1738                 btrfs_set_root_used(&root->root_item,
1739                                            root_used - num_bytes);
1740                 ret = btrfs_del_items(trans, extent_root, path, path->slots[0],
1741                                       num_to_del);
1742                 if (ret) {
1743                         return ret;
1744                 }
1745                 ret = update_block_group(trans, root, bytenr, num_bytes, 0,
1746                                          mark_free);
1747                 BUG_ON(ret);
1748         }
1749         btrfs_free_path(path);
1750         finish_current_insert(trans, extent_root);
1751         return ret;
1752 }
1753
1754 /*
1755  * find all the blocks marked as pending in the radix tree and remove
1756  * them from the extent map
1757  */
1758 static int del_pending_extents(struct btrfs_trans_handle *trans, struct
1759                                btrfs_root *extent_root)
1760 {
1761         int ret;
1762         int err = 0;
1763         u64 start;
1764         u64 end;
1765         struct extent_io_tree *pending_del;
1766         struct extent_io_tree *pinned_extents;
1767
1768         WARN_ON(!mutex_is_locked(&extent_root->fs_info->alloc_mutex));
1769         pending_del = &extent_root->fs_info->pending_del;
1770         pinned_extents = &extent_root->fs_info->pinned_extents;
1771
1772         while(1) {
1773                 ret = find_first_extent_bit(pending_del, 0, &start, &end,
1774                                             EXTENT_LOCKED);
1775                 if (ret)
1776                         break;
1777                 clear_extent_bits(pending_del, start, end, EXTENT_LOCKED,
1778                                   GFP_NOFS);
1779                 if (!test_range_bit(&extent_root->fs_info->extent_ins,
1780                                     start, end, EXTENT_LOCKED, 0)) {
1781                         update_pinned_extents(extent_root, start,
1782                                               end + 1 - start, 1);
1783                         ret = __free_extent(trans, extent_root,
1784                                              start, end + 1 - start,
1785                                              extent_root->root_key.objectid,
1786                                              0, 0, 0, 0, 0);
1787                 } else {
1788                         clear_extent_bits(&extent_root->fs_info->extent_ins,
1789                                           start, end, EXTENT_LOCKED, GFP_NOFS);
1790                 }
1791                 if (ret)
1792                         err = ret;
1793
1794                 if (need_resched()) {
1795                         mutex_unlock(&extent_root->fs_info->alloc_mutex);
1796                         cond_resched();
1797                         mutex_lock(&extent_root->fs_info->alloc_mutex);
1798                 }
1799         }
1800         return err;
1801 }
1802
1803 /*
1804  * remove an extent from the root, returns 0 on success
1805  */
1806 static int __btrfs_free_extent(struct btrfs_trans_handle *trans,
1807                                struct btrfs_root *root, u64 bytenr,
1808                                u64 num_bytes, u64 root_objectid,
1809                                u64 ref_generation, u64 owner_objectid,
1810                                u64 owner_offset, int pin)
1811 {
1812         struct btrfs_root *extent_root = root->fs_info->extent_root;
1813         int pending_ret;
1814         int ret;
1815
1816         WARN_ON(num_bytes < root->sectorsize);
1817         if (!root->ref_cows)
1818                 ref_generation = 0;
1819
1820         if (root == extent_root) {
1821                 pin_down_bytes(root, bytenr, num_bytes, 1);
1822                 return 0;
1823         }
1824         ret = __free_extent(trans, root, bytenr, num_bytes, root_objectid,
1825                             ref_generation, owner_objectid, owner_offset,
1826                             pin, pin == 0);
1827
1828         finish_current_insert(trans, root->fs_info->extent_root);
1829         pending_ret = del_pending_extents(trans, root->fs_info->extent_root);
1830         return ret ? ret : pending_ret;
1831 }
1832
1833 int btrfs_free_extent(struct btrfs_trans_handle *trans,
1834                       struct btrfs_root *root, u64 bytenr,
1835                       u64 num_bytes, u64 root_objectid,
1836                       u64 ref_generation, u64 owner_objectid,
1837                       u64 owner_offset, int pin)
1838 {
1839         int ret;
1840
1841         maybe_lock_mutex(root);
1842         ret = __btrfs_free_extent(trans, root, bytenr, num_bytes,
1843                                   root_objectid, ref_generation,
1844                                   owner_objectid, owner_offset, pin);
1845         maybe_unlock_mutex(root);
1846         return ret;
1847 }
1848
1849 static u64 stripe_align(struct btrfs_root *root, u64 val)
1850 {
1851         u64 mask = ((u64)root->stripesize - 1);
1852         u64 ret = (val + mask) & ~mask;
1853         return ret;
1854 }
1855
1856 /*
1857  * walks the btree of allocated extents and find a hole of a given size.
1858  * The key ins is changed to record the hole:
1859  * ins->objectid == block start
1860  * ins->flags = BTRFS_EXTENT_ITEM_KEY
1861  * ins->offset == number of blocks
1862  * Any available blocks before search_start are skipped.
1863  */
1864 static int noinline find_free_extent(struct btrfs_trans_handle *trans,
1865                                      struct btrfs_root *orig_root,
1866                                      u64 num_bytes, u64 empty_size,
1867                                      u64 search_start, u64 search_end,
1868                                      u64 hint_byte, struct btrfs_key *ins,
1869                                      u64 exclude_start, u64 exclude_nr,
1870                                      int data)
1871 {
1872         int ret;
1873         u64 orig_search_start;
1874         struct btrfs_root * root = orig_root->fs_info->extent_root;
1875         struct btrfs_fs_info *info = root->fs_info;
1876         u64 total_needed = num_bytes;
1877         u64 *last_ptr = NULL;
1878         struct btrfs_block_group_cache *block_group;
1879         int full_scan = 0;
1880         int wrapped = 0;
1881         int chunk_alloc_done = 0;
1882         int empty_cluster = 2 * 1024 * 1024;
1883         int allowed_chunk_alloc = 0;
1884
1885         WARN_ON(num_bytes < root->sectorsize);
1886         btrfs_set_key_type(ins, BTRFS_EXTENT_ITEM_KEY);
1887
1888         if (orig_root->ref_cows || empty_size)
1889                 allowed_chunk_alloc = 1;
1890
1891         if (data & BTRFS_BLOCK_GROUP_METADATA) {
1892                 last_ptr = &root->fs_info->last_alloc;
1893                 empty_cluster = 256 * 1024;
1894         }
1895
1896         if ((data & BTRFS_BLOCK_GROUP_DATA) && btrfs_test_opt(root, SSD)) {
1897                 last_ptr = &root->fs_info->last_data_alloc;
1898         }
1899
1900         if (last_ptr) {
1901                 if (*last_ptr)
1902                         hint_byte = *last_ptr;
1903                 else {
1904                         empty_size += empty_cluster;
1905                 }
1906         }
1907
1908         search_start = max(search_start, first_logical_byte(root, 0));
1909         orig_search_start = search_start;
1910
1911         if (search_end == (u64)-1)
1912                 search_end = btrfs_super_total_bytes(&info->super_copy);
1913
1914         if (hint_byte) {
1915                 block_group = btrfs_lookup_first_block_group(info, hint_byte);
1916                 if (!block_group)
1917                         hint_byte = search_start;
1918                 block_group = btrfs_find_block_group(root, block_group,
1919                                                      hint_byte, data, 1);
1920                 if (last_ptr && *last_ptr == 0 && block_group)
1921                         hint_byte = block_group->key.objectid;
1922         } else {
1923                 block_group = btrfs_find_block_group(root,
1924                                                      trans->block_group,
1925                                                      search_start, data, 1);
1926         }
1927         search_start = max(search_start, hint_byte);
1928
1929         total_needed += empty_size;
1930
1931 check_failed:
1932         if (!block_group) {
1933                 block_group = btrfs_lookup_first_block_group(info,
1934                                                              search_start);
1935                 if (!block_group)
1936                         block_group = btrfs_lookup_first_block_group(info,
1937                                                        orig_search_start);
1938         }
1939         if (full_scan && !chunk_alloc_done) {
1940                 if (allowed_chunk_alloc) {
1941                         do_chunk_alloc(trans, root,
1942                                      num_bytes + 2 * 1024 * 1024, data, 1);
1943                         allowed_chunk_alloc = 0;
1944                 } else if (block_group && block_group_bits(block_group, data)) {
1945                         block_group->space_info->force_alloc = 1;
1946                 }
1947                 chunk_alloc_done = 1;
1948         }
1949         ret = find_search_start(root, &block_group, &search_start,
1950                                 total_needed, data);
1951         if (ret == -ENOSPC && last_ptr && *last_ptr) {
1952                 *last_ptr = 0;
1953                 block_group = btrfs_lookup_first_block_group(info,
1954                                                              orig_search_start);
1955                 search_start = orig_search_start;
1956                 ret = find_search_start(root, &block_group, &search_start,
1957                                         total_needed, data);
1958         }
1959         if (ret == -ENOSPC)
1960                 goto enospc;
1961         if (ret)
1962                 goto error;
1963
1964         if (last_ptr && *last_ptr && search_start != *last_ptr) {
1965                 *last_ptr = 0;
1966                 if (!empty_size) {
1967                         empty_size += empty_cluster;
1968                         total_needed += empty_size;
1969                 }
1970                 block_group = btrfs_lookup_first_block_group(info,
1971                                                        orig_search_start);
1972                 search_start = orig_search_start;
1973                 ret = find_search_start(root, &block_group,
1974                                         &search_start, total_needed, data);
1975                 if (ret == -ENOSPC)
1976                         goto enospc;
1977                 if (ret)
1978                         goto error;
1979         }
1980
1981         search_start = stripe_align(root, search_start);
1982         ins->objectid = search_start;
1983         ins->offset = num_bytes;
1984
1985         if (ins->objectid + num_bytes >= search_end)
1986                 goto enospc;
1987
1988         if (ins->objectid + num_bytes >
1989             block_group->key.objectid + block_group->key.offset) {
1990                 search_start = block_group->key.objectid +
1991                         block_group->key.offset;
1992                 goto new_group;
1993         }
1994
1995         if (test_range_bit(&info->extent_ins, ins->objectid,
1996                            ins->objectid + num_bytes -1, EXTENT_LOCKED, 0)) {
1997                 search_start = ins->objectid + num_bytes;
1998                 goto new_group;
1999         }
2000
2001         if (test_range_bit(&info->pinned_extents, ins->objectid,
2002                            ins->objectid + num_bytes -1, EXTENT_DIRTY, 0)) {
2003                 search_start = ins->objectid + num_bytes;
2004                 goto new_group;
2005         }
2006
2007         if (exclude_nr > 0 && (ins->objectid + num_bytes > exclude_start &&
2008             ins->objectid < exclude_start + exclude_nr)) {
2009                 search_start = exclude_start + exclude_nr;
2010                 goto new_group;
2011         }
2012
2013         if (!(data & BTRFS_BLOCK_GROUP_DATA)) {
2014                 block_group = btrfs_lookup_block_group(info, ins->objectid);
2015                 if (block_group)
2016                         trans->block_group = block_group;
2017         }
2018         ins->offset = num_bytes;
2019         if (last_ptr) {
2020                 *last_ptr = ins->objectid + ins->offset;
2021                 if (*last_ptr ==
2022                     btrfs_super_total_bytes(&root->fs_info->super_copy)) {
2023                         *last_ptr = 0;
2024                 }
2025         }
2026         return 0;
2027
2028 new_group:
2029         if (search_start + num_bytes >= search_end) {
2030 enospc:
2031                 search_start = orig_search_start;
2032                 if (full_scan) {
2033                         ret = -ENOSPC;
2034                         goto error;
2035                 }
2036                 if (wrapped) {
2037                         if (!full_scan)
2038                                 total_needed -= empty_size;
2039                         full_scan = 1;
2040                 } else
2041                         wrapped = 1;
2042         }
2043         block_group = btrfs_lookup_first_block_group(info, search_start);
2044         cond_resched();
2045         block_group = btrfs_find_block_group(root, block_group,
2046                                              search_start, data, 0);
2047         goto check_failed;
2048
2049 error:
2050         return ret;
2051 }
2052
2053 static int __btrfs_reserve_extent(struct btrfs_trans_handle *trans,
2054                                   struct btrfs_root *root,
2055                                   u64 num_bytes, u64 min_alloc_size,
2056                                   u64 empty_size, u64 hint_byte,
2057                                   u64 search_end, struct btrfs_key *ins,
2058                                   u64 data)
2059 {
2060         int ret;
2061         u64 search_start = 0;
2062         u64 alloc_profile;
2063         struct btrfs_fs_info *info = root->fs_info;
2064
2065         if (data) {
2066                 alloc_profile = info->avail_data_alloc_bits &
2067                                 info->data_alloc_profile;
2068                 data = BTRFS_BLOCK_GROUP_DATA | alloc_profile;
2069         } else if (root == root->fs_info->chunk_root) {
2070                 alloc_profile = info->avail_system_alloc_bits &
2071                                 info->system_alloc_profile;
2072                 data = BTRFS_BLOCK_GROUP_SYSTEM | alloc_profile;
2073         } else {
2074                 alloc_profile = info->avail_metadata_alloc_bits &
2075                                 info->metadata_alloc_profile;
2076                 data = BTRFS_BLOCK_GROUP_METADATA | alloc_profile;
2077         }
2078 again:
2079         data = reduce_alloc_profile(root, data);
2080         /*
2081          * the only place that sets empty_size is btrfs_realloc_node, which
2082          * is not called recursively on allocations
2083          */
2084         if (empty_size || root->ref_cows) {
2085                 if (!(data & BTRFS_BLOCK_GROUP_METADATA)) {
2086                         ret = do_chunk_alloc(trans, root->fs_info->extent_root,
2087                                      2 * 1024 * 1024,
2088                                      BTRFS_BLOCK_GROUP_METADATA |
2089                                      (info->metadata_alloc_profile &
2090                                       info->avail_metadata_alloc_bits), 0);
2091                         BUG_ON(ret);
2092                 }
2093                 ret = do_chunk_alloc(trans, root->fs_info->extent_root,
2094                                      num_bytes + 2 * 1024 * 1024, data, 0);
2095                 BUG_ON(ret);
2096         }
2097
2098         WARN_ON(num_bytes < root->sectorsize);
2099         ret = find_free_extent(trans, root, num_bytes, empty_size,
2100                                search_start, search_end, hint_byte, ins,
2101                                trans->alloc_exclude_start,
2102                                trans->alloc_exclude_nr, data);
2103
2104         if (ret == -ENOSPC && num_bytes > min_alloc_size) {
2105                 num_bytes = num_bytes >> 1;
2106                 num_bytes = max(num_bytes, min_alloc_size);
2107                 do_chunk_alloc(trans, root->fs_info->extent_root,
2108                                num_bytes, data, 1);
2109                 goto again;
2110         }
2111         if (ret) {
2112                 printk("allocation failed flags %Lu\n", data);
2113                 BUG();
2114         }
2115         clear_extent_dirty(&root->fs_info->free_space_cache,
2116                            ins->objectid, ins->objectid + ins->offset - 1,
2117                            GFP_NOFS);
2118         return 0;
2119 }
2120
2121 int btrfs_free_reserved_extent(struct btrfs_root *root, u64 start, u64 len)
2122 {
2123         maybe_lock_mutex(root);
2124         set_extent_dirty(&root->fs_info->free_space_cache,
2125                          start, start + len - 1, GFP_NOFS);
2126         maybe_unlock_mutex(root);
2127         return 0;
2128 }
2129
2130 int btrfs_reserve_extent(struct btrfs_trans_handle *trans,
2131                                   struct btrfs_root *root,
2132                                   u64 num_bytes, u64 min_alloc_size,
2133                                   u64 empty_size, u64 hint_byte,
2134                                   u64 search_end, struct btrfs_key *ins,
2135                                   u64 data)
2136 {
2137         int ret;
2138         maybe_lock_mutex(root);
2139         ret = __btrfs_reserve_extent(trans, root, num_bytes, min_alloc_size,
2140                                      empty_size, hint_byte, search_end, ins,
2141                                      data);
2142         maybe_unlock_mutex(root);
2143         return ret;
2144 }
2145
2146 static int __btrfs_alloc_reserved_extent(struct btrfs_trans_handle *trans,
2147                                          struct btrfs_root *root,
2148                                          u64 root_objectid, u64 ref_generation,
2149                                          u64 owner, u64 owner_offset,
2150                                          struct btrfs_key *ins)
2151 {
2152         int ret;
2153         int pending_ret;
2154         u64 super_used;
2155         u64 root_used;
2156         u64 num_bytes = ins->offset;
2157         u32 sizes[2];
2158         struct btrfs_fs_info *info = root->fs_info;
2159         struct btrfs_root *extent_root = info->extent_root;
2160         struct btrfs_extent_item *extent_item;
2161         struct btrfs_extent_ref *ref;
2162         struct btrfs_path *path;
2163         struct btrfs_key keys[2];
2164
2165         /* block accounting for super block */
2166         spin_lock_irq(&info->delalloc_lock);
2167         super_used = btrfs_super_bytes_used(&info->super_copy);
2168         btrfs_set_super_bytes_used(&info->super_copy, super_used + num_bytes);
2169         spin_unlock_irq(&info->delalloc_lock);
2170
2171         /* block accounting for root item */
2172         root_used = btrfs_root_used(&root->root_item);
2173         btrfs_set_root_used(&root->root_item, root_used + num_bytes);
2174
2175         if (root == extent_root) {
2176                 set_extent_bits(&root->fs_info->extent_ins, ins->objectid,
2177                                 ins->objectid + ins->offset - 1,
2178                                 EXTENT_LOCKED, GFP_NOFS);
2179                 goto update_block;
2180         }
2181
2182         memcpy(&keys[0], ins, sizeof(*ins));
2183         keys[1].offset = hash_extent_ref(root_objectid, ref_generation,
2184                                          owner, owner_offset);
2185         keys[1].objectid = ins->objectid;
2186         keys[1].type = BTRFS_EXTENT_REF_KEY;
2187         sizes[0] = sizeof(*extent_item);
2188         sizes[1] = sizeof(*ref);
2189
2190         path = btrfs_alloc_path();
2191         BUG_ON(!path);
2192
2193         ret = btrfs_insert_empty_items(trans, extent_root, path, keys,
2194                                        sizes, 2);
2195
2196         BUG_ON(ret);
2197         extent_item = btrfs_item_ptr(path->nodes[0], path->slots[0],
2198                                      struct btrfs_extent_item);
2199         btrfs_set_extent_refs(path->nodes[0], extent_item, 1);
2200         ref = btrfs_item_ptr(path->nodes[0], path->slots[0] + 1,
2201                              struct btrfs_extent_ref);
2202
2203         btrfs_set_ref_root(path->nodes[0], ref, root_objectid);
2204         btrfs_set_ref_generation(path->nodes[0], ref, ref_generation);
2205         btrfs_set_ref_objectid(path->nodes[0], ref, owner);
2206         btrfs_set_ref_offset(path->nodes[0], ref, owner_offset);
2207
2208         btrfs_mark_buffer_dirty(path->nodes[0]);
2209
2210         trans->alloc_exclude_start = 0;
2211         trans->alloc_exclude_nr = 0;
2212         btrfs_free_path(path);
2213         finish_current_insert(trans, extent_root);
2214         pending_ret = del_pending_extents(trans, extent_root);
2215
2216         if (ret)
2217                 goto out;
2218         if (pending_ret) {
2219                 ret = pending_ret;
2220                 goto out;
2221         }
2222
2223 update_block:
2224         ret = update_block_group(trans, root, ins->objectid, ins->offset, 1, 0);
2225         if (ret) {
2226                 printk("update block group failed for %Lu %Lu\n",
2227                        ins->objectid, ins->offset);
2228                 BUG();
2229         }
2230 out:
2231         return ret;
2232 }
2233
2234 int btrfs_alloc_reserved_extent(struct btrfs_trans_handle *trans,
2235                                 struct btrfs_root *root,
2236                                 u64 root_objectid, u64 ref_generation,
2237                                 u64 owner, u64 owner_offset,
2238                                 struct btrfs_key *ins)
2239 {
2240         int ret;
2241         maybe_lock_mutex(root);
2242         ret = __btrfs_alloc_reserved_extent(trans, root, root_objectid,
2243                                             ref_generation, owner,
2244                                             owner_offset, ins);
2245         maybe_unlock_mutex(root);
2246         return ret;
2247 }
2248 /*
2249  * finds a free extent and does all the dirty work required for allocation
2250  * returns the key for the extent through ins, and a tree buffer for
2251  * the first block of the extent through buf.
2252  *
2253  * returns 0 if everything worked, non-zero otherwise.
2254  */
2255 int btrfs_alloc_extent(struct btrfs_trans_handle *trans,
2256                        struct btrfs_root *root,
2257                        u64 num_bytes, u64 min_alloc_size,
2258                        u64 root_objectid, u64 ref_generation,
2259                        u64 owner, u64 owner_offset,
2260                        u64 empty_size, u64 hint_byte,
2261                        u64 search_end, struct btrfs_key *ins, u64 data)
2262 {
2263         int ret;
2264
2265         maybe_lock_mutex(root);
2266
2267         ret = __btrfs_reserve_extent(trans, root, num_bytes,
2268                                      min_alloc_size, empty_size, hint_byte,
2269                                      search_end, ins, data);
2270         BUG_ON(ret);
2271         ret = __btrfs_alloc_reserved_extent(trans, root, root_objectid,
2272                                             ref_generation, owner,
2273                                             owner_offset, ins);
2274         BUG_ON(ret);
2275
2276         maybe_unlock_mutex(root);
2277         return ret;
2278 }
2279
2280 struct extent_buffer *btrfs_init_new_buffer(struct btrfs_trans_handle *trans,
2281                                             struct btrfs_root *root,
2282                                             u64 bytenr, u32 blocksize)
2283 {
2284         struct extent_buffer *buf;
2285
2286         buf = btrfs_find_create_tree_block(root, bytenr, blocksize);
2287         if (!buf)
2288                 return ERR_PTR(-ENOMEM);
2289         btrfs_set_header_generation(buf, trans->transid);
2290         btrfs_tree_lock(buf);
2291         clean_tree_block(trans, root, buf);
2292         btrfs_set_buffer_uptodate(buf);
2293         set_extent_dirty(&trans->transaction->dirty_pages, buf->start,
2294                          buf->start + buf->len - 1, GFP_NOFS);
2295         trans->blocks_used++;
2296         return buf;
2297 }
2298
2299 /*
2300  * helper function to allocate a block for a given tree
2301  * returns the tree buffer or NULL.
2302  */
2303 struct extent_buffer *btrfs_alloc_free_block(struct btrfs_trans_handle *trans,
2304                                              struct btrfs_root *root,
2305                                              u32 blocksize,
2306                                              u64 root_objectid,
2307                                              u64 ref_generation,
2308                                              u64 first_objectid,
2309                                              int level,
2310                                              u64 hint,
2311                                              u64 empty_size)
2312 {
2313         struct btrfs_key ins;
2314         int ret;
2315         struct extent_buffer *buf;
2316
2317         ret = btrfs_alloc_extent(trans, root, blocksize, blocksize,
2318                                  root_objectid, ref_generation,
2319                                  level, first_objectid, empty_size, hint,
2320                                  (u64)-1, &ins, 0);
2321         if (ret) {
2322                 BUG_ON(ret > 0);
2323                 return ERR_PTR(ret);
2324         }
2325
2326         buf = btrfs_init_new_buffer(trans, root, ins.objectid, blocksize);
2327         return buf;
2328 }
2329
2330 static int noinline drop_leaf_ref_no_cache(struct btrfs_trans_handle *trans,
2331                                            struct btrfs_root *root,
2332                                            struct extent_buffer *leaf)
2333 {
2334         u64 leaf_owner;
2335         u64 leaf_generation;
2336         struct btrfs_key key;
2337         struct btrfs_file_extent_item *fi;
2338         int i;
2339         int nritems;
2340         int ret;
2341
2342         BUG_ON(!btrfs_is_leaf(leaf));
2343         nritems = btrfs_header_nritems(leaf);
2344         leaf_owner = btrfs_header_owner(leaf);
2345         leaf_generation = btrfs_header_generation(leaf);
2346
2347         for (i = 0; i < nritems; i++) {
2348                 u64 disk_bytenr;
2349                 cond_resched();
2350
2351                 btrfs_item_key_to_cpu(leaf, &key, i);
2352                 if (btrfs_key_type(&key) != BTRFS_EXTENT_DATA_KEY)
2353                         continue;
2354                 fi = btrfs_item_ptr(leaf, i, struct btrfs_file_extent_item);
2355                 if (btrfs_file_extent_type(leaf, fi) ==
2356                     BTRFS_FILE_EXTENT_INLINE)
2357                         continue;
2358                 /*
2359                  * FIXME make sure to insert a trans record that
2360                  * repeats the snapshot del on crash
2361                  */
2362                 disk_bytenr = btrfs_file_extent_disk_bytenr(leaf, fi);
2363                 if (disk_bytenr == 0)
2364                         continue;
2365
2366                 mutex_lock(&root->fs_info->alloc_mutex);
2367                 ret = __btrfs_free_extent(trans, root, disk_bytenr,
2368                                 btrfs_file_extent_disk_num_bytes(leaf, fi),
2369                                 leaf_owner, leaf_generation,
2370                                 key.objectid, key.offset, 0);
2371                 mutex_unlock(&root->fs_info->alloc_mutex);
2372                 BUG_ON(ret);
2373         }
2374         return 0;
2375 }
2376
2377 static int noinline drop_leaf_ref(struct btrfs_trans_handle *trans,
2378                                          struct btrfs_root *root,
2379                                          struct btrfs_leaf_ref *ref)
2380 {
2381         int i;
2382         int ret;
2383         struct btrfs_extent_info *info = ref->extents;
2384
2385         for (i = 0; i < ref->nritems; i++) {
2386                 mutex_lock(&root->fs_info->alloc_mutex);
2387                 ret = __btrfs_free_extent(trans, root,
2388                                         info->bytenr, info->num_bytes,
2389                                         ref->owner, ref->generation,
2390                                         info->objectid, info->offset, 0);
2391                 mutex_unlock(&root->fs_info->alloc_mutex);
2392                 BUG_ON(ret);
2393                 info++;
2394         }
2395
2396         return 0;
2397 }
2398
2399 static void noinline reada_walk_down(struct btrfs_root *root,
2400                                      struct extent_buffer *node,
2401                                      int slot)
2402 {
2403         u64 bytenr;
2404         u64 last = 0;
2405         u32 nritems;
2406         u32 refs;
2407         u32 blocksize;
2408         int ret;
2409         int i;
2410         int level;
2411         int skipped = 0;
2412
2413         nritems = btrfs_header_nritems(node);
2414         level = btrfs_header_level(node);
2415         if (level)
2416                 return;
2417
2418         for (i = slot; i < nritems && skipped < 32; i++) {
2419                 bytenr = btrfs_node_blockptr(node, i);
2420                 if (last && ((bytenr > last && bytenr - last > 32 * 1024) ||
2421                              (last > bytenr && last - bytenr > 32 * 1024))) {
2422                         skipped++;
2423                         continue;
2424                 }
2425                 blocksize = btrfs_level_size(root, level - 1);
2426                 if (i != slot) {
2427                         ret = lookup_extent_ref(NULL, root, bytenr,
2428                                                 blocksize, &refs);
2429                         BUG_ON(ret);
2430                         if (refs != 1) {
2431                                 skipped++;
2432                                 continue;
2433                         }
2434                 }
2435                 ret = readahead_tree_block(root, bytenr, blocksize,
2436                                            btrfs_node_ptr_generation(node, i));
2437                 last = bytenr + blocksize;
2438                 cond_resched();
2439                 if (ret)
2440                         break;
2441         }
2442 }
2443
2444 int drop_snap_lookup_refcount(struct btrfs_root *root, u64 start, u64 len,
2445                               u32 *refs)
2446 {
2447         int ret;
2448
2449         ret = lookup_extent_ref(NULL, root, start, len, refs);
2450         BUG_ON(ret);
2451
2452 #if 0 // some debugging code in case we see problems here
2453         /* if the refs count is one, it won't get increased again.  But
2454          * if the ref count is > 1, someone may be decreasing it at
2455          * the same time we are.
2456          */
2457         if (*refs != 1) {
2458                 struct extent_buffer *eb = NULL;
2459                 eb = btrfs_find_create_tree_block(root, start, len);
2460                 if (eb)
2461                         btrfs_tree_lock(eb);
2462
2463                 mutex_lock(&root->fs_info->alloc_mutex);
2464                 ret = lookup_extent_ref(NULL, root, start, len, refs);
2465                 BUG_ON(ret);
2466                 mutex_unlock(&root->fs_info->alloc_mutex);
2467
2468                 if (eb) {
2469                         btrfs_tree_unlock(eb);
2470                         free_extent_buffer(eb);
2471                 }
2472                 if (*refs == 1) {
2473                         printk("block %llu went down to one during drop_snap\n",
2474                                (unsigned long long)start);
2475                 }
2476
2477         }
2478 #endif
2479
2480         cond_resched();
2481         return ret;
2482 }
2483
2484 /*
2485  * helper function for drop_snapshot, this walks down the tree dropping ref
2486  * counts as it goes.
2487  */
2488 static int noinline walk_down_tree(struct btrfs_trans_handle *trans,
2489                                    struct btrfs_root *root,
2490                                    struct btrfs_path *path, int *level)
2491 {
2492         u64 root_owner;
2493         u64 root_gen;
2494         u64 bytenr;
2495         u64 ptr_gen;
2496         struct extent_buffer *next;
2497         struct extent_buffer *cur;
2498         struct extent_buffer *parent;
2499         struct btrfs_leaf_ref *ref;
2500         u32 blocksize;
2501         int ret;
2502         u32 refs;
2503
2504         WARN_ON(*level < 0);
2505         WARN_ON(*level >= BTRFS_MAX_LEVEL);
2506         ret = drop_snap_lookup_refcount(root, path->nodes[*level]->start,
2507                                 path->nodes[*level]->len, &refs);
2508         BUG_ON(ret);
2509         if (refs > 1)
2510                 goto out;
2511
2512         /*
2513          * walk down to the last node level and free all the leaves
2514          */
2515         while(*level >= 0) {
2516                 WARN_ON(*level < 0);
2517                 WARN_ON(*level >= BTRFS_MAX_LEVEL);
2518                 cur = path->nodes[*level];
2519
2520                 if (btrfs_header_level(cur) != *level)
2521                         WARN_ON(1);
2522
2523                 if (path->slots[*level] >=
2524                     btrfs_header_nritems(cur))
2525                         break;
2526                 if (*level == 0) {
2527                         ret = drop_leaf_ref_no_cache(trans, root, cur);
2528                         BUG_ON(ret);
2529                         break;
2530                 }
2531                 bytenr = btrfs_node_blockptr(cur, path->slots[*level]);
2532                 ptr_gen = btrfs_node_ptr_generation(cur, path->slots[*level]);
2533                 blocksize = btrfs_level_size(root, *level - 1);
2534
2535                 ret = drop_snap_lookup_refcount(root, bytenr, blocksize, &refs);
2536                 BUG_ON(ret);
2537                 if (refs != 1) {
2538                         parent = path->nodes[*level];
2539                         root_owner = btrfs_header_owner(parent);
2540                         root_gen = btrfs_header_generation(parent);
2541                         path->slots[*level]++;
2542
2543                         mutex_lock(&root->fs_info->alloc_mutex);
2544                         ret = __btrfs_free_extent(trans, root, bytenr,
2545                                                 blocksize, root_owner,
2546                                                 root_gen, 0, 0, 1);
2547                         BUG_ON(ret);
2548                         mutex_unlock(&root->fs_info->alloc_mutex);
2549
2550                         atomic_inc(&root->fs_info->throttle_gen);
2551                         wake_up(&root->fs_info->transaction_throttle);
2552
2553                         continue;
2554                 }
2555                 /*
2556                  * at this point, we have a single ref, and since the
2557                  * only place referencing this extent is a dead root
2558                  * the reference count should never go higher.
2559                  * So, we don't need to check it again
2560                  */
2561                 if (*level == 1) {
2562                         struct btrfs_key key;
2563                         btrfs_node_key_to_cpu(cur, &key, path->slots[*level]);
2564                         ref = btrfs_lookup_leaf_ref(root, bytenr);
2565                         if (ref) {
2566                                 ret = drop_leaf_ref(trans, root, ref);
2567                                 BUG_ON(ret);
2568                                 btrfs_remove_leaf_ref(root, ref);
2569                                 btrfs_free_leaf_ref(root, ref);
2570                                 *level = 0;
2571                                 break;
2572                         }
2573                         if (printk_ratelimit())
2574                                 printk("leaf ref miss for bytenr %llu\n",
2575                                        (unsigned long long)bytenr);
2576                 }
2577                 next = btrfs_find_tree_block(root, bytenr, blocksize);
2578                 if (!next || !btrfs_buffer_uptodate(next, ptr_gen)) {
2579                         free_extent_buffer(next);
2580
2581                         if (path->slots[*level] == 0)
2582                                 reada_walk_down(root, cur, path->slots[*level]);
2583                         next = read_tree_block(root, bytenr, blocksize,
2584                                                ptr_gen);
2585                         cond_resched();
2586 #if 0
2587                         /*
2588                          * this is a debugging check and can go away
2589                          * the ref should never go all the way down to 1
2590                          * at this point
2591                          */
2592                         ret = lookup_extent_ref(NULL, root, bytenr, blocksize,
2593                                                 &refs);
2594                         BUG_ON(ret);
2595                         WARN_ON(refs != 1);
2596 #endif
2597                 }
2598                 WARN_ON(*level <= 0);
2599                 if (path->nodes[*level-1])
2600                         free_extent_buffer(path->nodes[*level-1]);
2601                 path->nodes[*level-1] = next;
2602                 *level = btrfs_header_level(next);
2603                 path->slots[*level] = 0;
2604         }
2605 out:
2606         WARN_ON(*level < 0);
2607         WARN_ON(*level >= BTRFS_MAX_LEVEL);
2608
2609         if (path->nodes[*level] == root->node) {
2610                 parent = path->nodes[*level];
2611                 bytenr = path->nodes[*level]->start;
2612         } else {
2613                 parent = path->nodes[*level + 1];
2614                 bytenr = btrfs_node_blockptr(parent, path->slots[*level + 1]);
2615         }
2616
2617         blocksize = btrfs_level_size(root, *level);
2618         root_owner = btrfs_header_owner(parent);
2619         root_gen = btrfs_header_generation(parent);
2620
2621         mutex_lock(&root->fs_info->alloc_mutex);
2622         ret = __btrfs_free_extent(trans, root, bytenr, blocksize,
2623                                   root_owner, root_gen, 0, 0, 1);
2624         free_extent_buffer(path->nodes[*level]);
2625         path->nodes[*level] = NULL;
2626         *level += 1;
2627         BUG_ON(ret);
2628         mutex_unlock(&root->fs_info->alloc_mutex);
2629
2630         cond_resched();
2631         return 0;
2632 }
2633
2634 /*
2635  * helper for dropping snapshots.  This walks back up the tree in the path
2636  * to find the first node higher up where we haven't yet gone through
2637  * all the slots
2638  */
2639 static int noinline walk_up_tree(struct btrfs_trans_handle *trans,
2640                                  struct btrfs_root *root,
2641                                  struct btrfs_path *path, int *level)
2642 {
2643         u64 root_owner;
2644         u64 root_gen;
2645         struct btrfs_root_item *root_item = &root->root_item;
2646         int i;
2647         int slot;
2648         int ret;
2649
2650         for(i = *level; i < BTRFS_MAX_LEVEL - 1 && path->nodes[i]; i++) {
2651                 slot = path->slots[i];
2652                 if (slot < btrfs_header_nritems(path->nodes[i]) - 1) {
2653                         struct extent_buffer *node;
2654                         struct btrfs_disk_key disk_key;
2655                         node = path->nodes[i];
2656                         path->slots[i]++;
2657                         *level = i;
2658                         WARN_ON(*level == 0);
2659                         btrfs_node_key(node, &disk_key, path->slots[i]);
2660                         memcpy(&root_item->drop_progress,
2661                                &disk_key, sizeof(disk_key));
2662                         root_item->drop_level = i;
2663                         return 0;
2664                 } else {
2665                         if (path->nodes[*level] == root->node) {
2666                                 root_owner = root->root_key.objectid;
2667                                 root_gen =
2668                                    btrfs_header_generation(path->nodes[*level]);
2669                         } else {
2670                                 struct extent_buffer *node;
2671                                 node = path->nodes[*level + 1];
2672                                 root_owner = btrfs_header_owner(node);
2673                                 root_gen = btrfs_header_generation(node);
2674                         }
2675                         ret = btrfs_free_extent(trans, root,
2676                                                 path->nodes[*level]->start,
2677                                                 path->nodes[*level]->len,
2678                                                 root_owner, root_gen, 0, 0, 1);
2679                         BUG_ON(ret);
2680                         free_extent_buffer(path->nodes[*level]);
2681                         path->nodes[*level] = NULL;
2682                         *level = i + 1;
2683                 }
2684         }
2685         return 1;
2686 }
2687
2688 /*
2689  * drop the reference count on the tree rooted at 'snap'.  This traverses
2690  * the tree freeing any blocks that have a ref count of zero after being
2691  * decremented.
2692  */
2693 int btrfs_drop_snapshot(struct btrfs_trans_handle *trans, struct btrfs_root
2694                         *root)
2695 {
2696         int ret = 0;
2697         int wret;
2698         int level;
2699         struct btrfs_path *path;
2700         int i;
2701         int orig_level;
2702         struct btrfs_root_item *root_item = &root->root_item;
2703
2704         WARN_ON(!mutex_is_locked(&root->fs_info->drop_mutex));
2705         path = btrfs_alloc_path();
2706         BUG_ON(!path);
2707
2708         level = btrfs_header_level(root->node);
2709         orig_level = level;
2710         if (btrfs_disk_key_objectid(&root_item->drop_progress) == 0) {
2711                 path->nodes[level] = root->node;
2712                 extent_buffer_get(root->node);
2713                 path->slots[level] = 0;
2714         } else {
2715                 struct btrfs_key key;
2716                 struct btrfs_disk_key found_key;
2717                 struct extent_buffer *node;
2718
2719                 btrfs_disk_key_to_cpu(&key, &root_item->drop_progress);
2720                 level = root_item->drop_level;
2721                 path->lowest_level = level;
2722                 wret = btrfs_search_slot(NULL, root, &key, path, 0, 0);
2723                 if (wret < 0) {
2724                         ret = wret;
2725                         goto out;
2726                 }
2727                 node = path->nodes[level];
2728                 btrfs_node_key(node, &found_key, path->slots[level]);
2729                 WARN_ON(memcmp(&found_key, &root_item->drop_progress,
2730                                sizeof(found_key)));
2731                 /*
2732                  * unlock our path, this is safe because only this
2733                  * function is allowed to delete this snapshot
2734                  */
2735                 for (i = 0; i < BTRFS_MAX_LEVEL; i++) {
2736                         if (path->nodes[i] && path->locks[i]) {
2737                                 path->locks[i] = 0;
2738                                 btrfs_tree_unlock(path->nodes[i]);
2739                         }
2740                 }
2741         }
2742         while(1) {
2743                 wret = walk_down_tree(trans, root, path, &level);
2744                 if (wret > 0)
2745                         break;
2746                 if (wret < 0)
2747                         ret = wret;
2748
2749                 wret = walk_up_tree(trans, root, path, &level);
2750                 if (wret > 0)
2751                         break;
2752                 if (wret < 0)
2753                         ret = wret;
2754                 if (trans->transaction->in_commit) {
2755                         ret = -EAGAIN;
2756                         break;
2757                 }
2758                 atomic_inc(&root->fs_info->throttle_gen);
2759                 wake_up(&root->fs_info->transaction_throttle);
2760         }
2761         for (i = 0; i <= orig_level; i++) {
2762                 if (path->nodes[i]) {
2763                         free_extent_buffer(path->nodes[i]);
2764                         path->nodes[i] = NULL;
2765                 }
2766         }
2767 out:
2768         btrfs_free_path(path);
2769         return ret;
2770 }
2771
2772 int btrfs_free_block_groups(struct btrfs_fs_info *info)
2773 {
2774         u64 start;
2775         u64 end;
2776         u64 ptr;
2777         int ret;
2778
2779         mutex_lock(&info->alloc_mutex);
2780         while(1) {
2781                 ret = find_first_extent_bit(&info->block_group_cache, 0,
2782                                             &start, &end, (unsigned int)-1);
2783                 if (ret)
2784                         break;
2785                 ret = get_state_private(&info->block_group_cache, start, &ptr);
2786                 if (!ret)
2787                         kfree((void *)(unsigned long)ptr);
2788                 clear_extent_bits(&info->block_group_cache, start,
2789                                   end, (unsigned int)-1, GFP_NOFS);
2790         }
2791         while(1) {
2792                 ret = find_first_extent_bit(&info->free_space_cache, 0,
2793                                             &start, &end, EXTENT_DIRTY);
2794                 if (ret)
2795                         break;
2796                 clear_extent_dirty(&info->free_space_cache, start,
2797                                    end, GFP_NOFS);
2798         }
2799         mutex_unlock(&info->alloc_mutex);
2800         return 0;
2801 }
2802
2803 static unsigned long calc_ra(unsigned long start, unsigned long last,
2804                              unsigned long nr)
2805 {
2806         return min(last, start + nr - 1);
2807 }
2808
2809 static int noinline relocate_inode_pages(struct inode *inode, u64 start,
2810                                          u64 len)
2811 {
2812         u64 page_start;
2813         u64 page_end;
2814         unsigned long last_index;
2815         unsigned long i;
2816         struct page *page;
2817         struct extent_io_tree *io_tree = &BTRFS_I(inode)->io_tree;
2818         struct file_ra_state *ra;
2819         unsigned long total_read = 0;
2820         unsigned long ra_pages;
2821         struct btrfs_ordered_extent *ordered;
2822         struct btrfs_trans_handle *trans;
2823
2824         ra = kzalloc(sizeof(*ra), GFP_NOFS);
2825
2826         mutex_lock(&inode->i_mutex);
2827         i = start >> PAGE_CACHE_SHIFT;
2828         last_index = (start + len - 1) >> PAGE_CACHE_SHIFT;
2829
2830         ra_pages = BTRFS_I(inode)->root->fs_info->bdi.ra_pages;
2831
2832         file_ra_state_init(ra, inode->i_mapping);
2833
2834         for (; i <= last_index; i++) {
2835                 if (total_read % ra_pages == 0) {
2836                         btrfs_force_ra(inode->i_mapping, ra, NULL, i,
2837                                        calc_ra(i, last_index, ra_pages));
2838                 }
2839                 total_read++;
2840 again:
2841                 if (((u64)i << PAGE_CACHE_SHIFT) > i_size_read(inode))
2842                         goto truncate_racing;
2843                 page = grab_cache_page(inode->i_mapping, i);
2844                 if (!page) {
2845                         goto out_unlock;
2846                 }
2847                 if (!PageUptodate(page)) {
2848                         btrfs_readpage(NULL, page);
2849                         lock_page(page);
2850                         if (!PageUptodate(page)) {
2851                                 unlock_page(page);
2852                                 page_cache_release(page);
2853                                 goto out_unlock;
2854                         }
2855                 }
2856                 wait_on_page_writeback(page);
2857
2858                 page_start = (u64)page->index << PAGE_CACHE_SHIFT;
2859                 page_end = page_start + PAGE_CACHE_SIZE - 1;
2860                 lock_extent(io_tree, page_start, page_end, GFP_NOFS);
2861
2862                 ordered = btrfs_lookup_ordered_extent(inode, page_start);
2863                 if (ordered) {
2864                         unlock_extent(io_tree, page_start, page_end, GFP_NOFS);
2865                         unlock_page(page);
2866                         page_cache_release(page);
2867                         btrfs_start_ordered_extent(inode, ordered, 1);
2868                         btrfs_put_ordered_extent(ordered);
2869                         goto again;
2870                 }
2871                 set_page_extent_mapped(page);
2872
2873                 /*
2874                  * make sure page_mkwrite is called for this page if userland
2875                  * wants to change it from mmap
2876                  */
2877                 clear_page_dirty_for_io(page);
2878
2879                 set_extent_delalloc(io_tree, page_start,
2880                                     page_end, GFP_NOFS);
2881                 set_page_dirty(page);
2882
2883                 unlock_extent(io_tree, page_start, page_end, GFP_NOFS);
2884                 unlock_page(page);
2885                 page_cache_release(page);
2886         }
2887
2888 out_unlock:
2889         /* we have to start the IO in order to get the ordered extents
2890          * instantiated.  This allows the relocation to code to wait
2891          * for all the ordered extents to hit the disk.
2892          *
2893          * Otherwise, it would constantly loop over the same extents
2894          * because the old ones don't get deleted  until the IO is
2895          * started
2896          */
2897         btrfs_fdatawrite_range(inode->i_mapping, start, start + len - 1,
2898                                WB_SYNC_NONE);
2899         kfree(ra);
2900         trans = btrfs_start_transaction(BTRFS_I(inode)->root, 1);
2901         if (trans) {
2902                 btrfs_end_transaction(trans, BTRFS_I(inode)->root);
2903                 mark_inode_dirty(inode);
2904         }
2905         mutex_unlock(&inode->i_mutex);
2906         return 0;
2907
2908 truncate_racing:
2909         vmtruncate(inode, inode->i_size);
2910         balance_dirty_pages_ratelimited_nr(inode->i_mapping,
2911                                            total_read);
2912         goto out_unlock;
2913 }
2914
2915 /*
2916  * The back references tell us which tree holds a ref on a block,
2917  * but it is possible for the tree root field in the reference to
2918  * reflect the original root before a snapshot was made.  In this
2919  * case we should search through all the children of a given root
2920  * to find potential holders of references on a block.
2921  *
2922  * Instead, we do something a little less fancy and just search
2923  * all the roots for a given key/block combination.
2924  */
2925 static int find_root_for_ref(struct btrfs_root *root,
2926                              struct btrfs_path *path,
2927                              struct btrfs_key *key0,
2928                              int level,
2929                              int file_key,
2930                              struct btrfs_root **found_root,
2931                              u64 bytenr)
2932 {
2933         struct btrfs_key root_location;
2934         struct btrfs_root *cur_root = *found_root;
2935         struct btrfs_file_extent_item *file_extent;
2936         u64 root_search_start = BTRFS_FS_TREE_OBJECTID;
2937         u64 found_bytenr;
2938         int ret;
2939
2940         root_location.offset = (u64)-1;
2941         root_location.type = BTRFS_ROOT_ITEM_KEY;
2942         path->lowest_level = level;
2943         path->reada = 0;
2944         while(1) {
2945                 ret = btrfs_search_slot(NULL, cur_root, key0, path, 0, 0);
2946                 found_bytenr = 0;
2947                 if (ret == 0 && file_key) {
2948                         struct extent_buffer *leaf = path->nodes[0];
2949                         file_extent = btrfs_item_ptr(leaf, path->slots[0],
2950                                              struct btrfs_file_extent_item);
2951                         if (btrfs_file_extent_type(leaf, file_extent) ==
2952                             BTRFS_FILE_EXTENT_REG) {
2953                                 found_bytenr =
2954                                         btrfs_file_extent_disk_bytenr(leaf,
2955                                                                file_extent);
2956                        }
2957                 } else if (!file_key) {
2958                         if (path->nodes[level])
2959                                 found_bytenr = path->nodes[level]->start;
2960                 }
2961
2962                 btrfs_release_path(cur_root, path);
2963
2964                 if (found_bytenr == bytenr) {
2965                         *found_root = cur_root;
2966                         ret = 0;
2967                         goto out;
2968                 }
2969                 ret = btrfs_search_root(root->fs_info->tree_root,
2970                                         root_search_start, &root_search_start);
2971                 if (ret)
2972                         break;
2973
2974                 root_location.objectid = root_search_start;
2975                 cur_root = btrfs_read_fs_root_no_name(root->fs_info,
2976                                                       &root_location);
2977                 if (!cur_root) {
2978                         ret = 1;
2979                         break;
2980                 }
2981         }
2982 out:
2983         path->lowest_level = 0;
2984         return ret;
2985 }
2986
2987 /*
2988  * note, this releases the path
2989  */
2990 static int noinline relocate_one_reference(struct btrfs_root *extent_root,
2991                                   struct btrfs_path *path,
2992                                   struct btrfs_key *extent_key,
2993                                   u64 *last_file_objectid,
2994                                   u64 *last_file_offset,
2995                                   u64 *last_file_root,
2996                                   u64 last_extent)
2997 {
2998         struct inode *inode;
2999         struct btrfs_root *found_root;
3000         struct btrfs_key root_location;
3001         struct btrfs_key found_key;
3002         struct btrfs_extent_ref *ref;
3003         u64 ref_root;
3004         u64 ref_gen;
3005         u64 ref_objectid;
3006         u64 ref_offset;
3007         int ret;
3008         int level;
3009
3010         WARN_ON(!mutex_is_locked(&extent_root->fs_info->alloc_mutex));
3011
3012         ref = btrfs_item_ptr(path->nodes[0], path->slots[0],
3013                              struct btrfs_extent_ref);
3014         ref_root = btrfs_ref_root(path->nodes[0], ref);
3015         ref_gen = btrfs_ref_generation(path->nodes[0], ref);
3016         ref_objectid = btrfs_ref_objectid(path->nodes[0], ref);
3017         ref_offset = btrfs_ref_offset(path->nodes[0], ref);
3018         btrfs_release_path(extent_root, path);
3019
3020         root_location.objectid = ref_root;
3021         if (ref_gen == 0)
3022                 root_location.offset = 0;
3023         else
3024                 root_location.offset = (u64)-1;
3025         root_location.type = BTRFS_ROOT_ITEM_KEY;
3026
3027         found_root = btrfs_read_fs_root_no_name(extent_root->fs_info,
3028                                                 &root_location);
3029         BUG_ON(!found_root);
3030         mutex_unlock(&extent_root->fs_info->alloc_mutex);
3031
3032         if (ref_objectid >= BTRFS_FIRST_FREE_OBJECTID) {
3033                 found_key.objectid = ref_objectid;
3034                 found_key.type = BTRFS_EXTENT_DATA_KEY;
3035                 found_key.offset = ref_offset;
3036                 level = 0;
3037
3038                 if (last_extent == extent_key->objectid &&
3039                     *last_file_objectid == ref_objectid &&
3040                     *last_file_offset == ref_offset &&
3041                     *last_file_root == ref_root)
3042                         goto out;
3043
3044                 ret = find_root_for_ref(extent_root, path, &found_key,
3045                                         level, 1, &found_root,
3046                                         extent_key->objectid);
3047
3048                 if (ret)
3049                         goto out;
3050
3051                 if (last_extent == extent_key->objectid &&
3052                     *last_file_objectid == ref_objectid &&
3053                     *last_file_offset == ref_offset &&
3054                     *last_file_root == ref_root)
3055                         goto out;
3056
3057                 inode = btrfs_iget_locked(extent_root->fs_info->sb,
3058                                           ref_objectid, found_root);
3059                 if (inode->i_state & I_NEW) {
3060                         /* the inode and parent dir are two different roots */
3061                         BTRFS_I(inode)->root = found_root;
3062                         BTRFS_I(inode)->location.objectid = ref_objectid;
3063                         BTRFS_I(inode)->location.type = BTRFS_INODE_ITEM_KEY;
3064                         BTRFS_I(inode)->location.offset = 0;
3065                         btrfs_read_locked_inode(inode);
3066                         unlock_new_inode(inode);
3067
3068                 }
3069                 /* this can happen if the reference is not against
3070                  * the latest version of the tree root
3071                  */
3072                 if (is_bad_inode(inode))
3073                         goto out;
3074
3075                 *last_file_objectid = inode->i_ino;
3076                 *last_file_root = found_root->root_key.objectid;
3077                 *last_file_offset = ref_offset;
3078
3079                 relocate_inode_pages(inode, ref_offset, extent_key->offset);
3080                 iput(inode);
3081         } else {
3082                 struct btrfs_trans_handle *trans;
3083                 struct extent_buffer *eb;
3084                 int needs_lock = 0;
3085
3086                 eb = read_tree_block(found_root, extent_key->objectid,
3087                                      extent_key->offset, 0);
3088                 btrfs_tree_lock(eb);
3089                 level = btrfs_header_level(eb);
3090
3091                 if (level == 0)
3092                         btrfs_item_key_to_cpu(eb, &found_key, 0);
3093                 else
3094                         btrfs_node_key_to_cpu(eb, &found_key, 0);
3095
3096                 btrfs_tree_unlock(eb);
3097                 free_extent_buffer(eb);
3098
3099                 ret = find_root_for_ref(extent_root, path, &found_key,
3100                                         level, 0, &found_root,
3101                                         extent_key->objectid);
3102
3103                 if (ret)
3104                         goto out;
3105
3106                 /*
3107                  * right here almost anything could happen to our key,
3108                  * but that's ok.  The cow below will either relocate it
3109                  * or someone else will have relocated it.  Either way,
3110                  * it is in a different spot than it was before and
3111                  * we're happy.
3112                  */
3113
3114                 trans = btrfs_start_transaction(found_root, 1);
3115
3116                 if (found_root == extent_root->fs_info->extent_root ||
3117                     found_root == extent_root->fs_info->chunk_root ||
3118                     found_root == extent_root->fs_info->dev_root) {
3119                         needs_lock = 1;
3120                         mutex_lock(&extent_root->fs_info->alloc_mutex);
3121                 }
3122
3123                 path->lowest_level = level;
3124                 path->reada = 2;
3125                 ret = btrfs_search_slot(trans, found_root, &found_key, path,
3126                                         0, 1);
3127                 path->lowest_level = 0;
3128                 btrfs_release_path(found_root, path);
3129
3130                 if (found_root == found_root->fs_info->extent_root)
3131                         btrfs_extent_post_op(trans, found_root);
3132                 if (needs_lock)
3133                         mutex_unlock(&extent_root->fs_info->alloc_mutex);
3134
3135                 btrfs_end_transaction(trans, found_root);
3136
3137         }
3138 out:
3139         mutex_lock(&extent_root->fs_info->alloc_mutex);
3140         return 0;
3141 }
3142
3143 static int noinline del_extent_zero(struct btrfs_root *extent_root,
3144                                     struct btrfs_path *path,
3145                                     struct btrfs_key *extent_key)
3146 {
3147         int ret;
3148         struct btrfs_trans_handle *trans;
3149
3150         trans = btrfs_start_transaction(extent_root, 1);
3151         ret = btrfs_search_slot(trans, extent_root, extent_key, path, -1, 1);
3152         if (ret > 0) {
3153                 ret = -EIO;
3154                 goto out;
3155         }
3156         if (ret < 0)
3157                 goto out;
3158         ret = btrfs_del_item(trans, extent_root, path);
3159 out:
3160         btrfs_end_transaction(trans, extent_root);
3161         return ret;
3162 }
3163
3164 static int noinline relocate_one_extent(struct btrfs_root *extent_root,
3165                                         struct btrfs_path *path,
3166                                         struct btrfs_key *extent_key)
3167 {
3168         struct btrfs_key key;
3169         struct btrfs_key found_key;
3170         struct extent_buffer *leaf;
3171         u64 last_file_objectid = 0;
3172         u64 last_file_root = 0;
3173         u64 last_file_offset = (u64)-1;
3174         u64 last_extent = 0;
3175         u32 nritems;
3176         u32 item_size;
3177         int ret = 0;
3178
3179         if (extent_key->objectid == 0) {
3180                 ret = del_extent_zero(extent_root, path, extent_key);
3181                 goto out;
3182         }
3183         key.objectid = extent_key->objectid;
3184         key.type = BTRFS_EXTENT_REF_KEY;
3185         key.offset = 0;
3186
3187         while(1) {
3188                 ret = btrfs_search_slot(NULL, extent_root, &key, path, 0, 0);
3189
3190                 if (ret < 0)
3191                         goto out;
3192
3193                 ret = 0;
3194                 leaf = path->nodes[0];
3195                 nritems = btrfs_header_nritems(leaf);
3196                 if (path->slots[0] == nritems) {
3197                         ret = btrfs_next_leaf(extent_root, path);
3198                         if (ret > 0) {
3199                                 ret = 0;
3200                                 goto out;
3201                         }
3202                         if (ret < 0)
3203                                 goto out;
3204                         leaf = path->nodes[0];
3205                 }
3206
3207                 btrfs_item_key_to_cpu(leaf, &found_key, path->slots[0]);
3208                 if (found_key.objectid != extent_key->objectid) {
3209                         break;
3210                 }
3211
3212                 if (found_key.type != BTRFS_EXTENT_REF_KEY) {
3213                         break;
3214                 }
3215
3216                 key.offset = found_key.offset + 1;
3217                 item_size = btrfs_item_size_nr(leaf, path->slots[0]);
3218
3219                 ret = relocate_one_reference(extent_root, path, extent_key,
3220                                              &last_file_objectid,
3221                                              &last_file_offset,
3222                                              &last_file_root, last_extent);
3223                 if (ret)
3224                         goto out;
3225                 last_extent = extent_key->objectid;
3226         }
3227         ret = 0;
3228 out:
3229         btrfs_release_path(extent_root, path);
3230         return ret;
3231 }
3232
3233 static u64 update_block_group_flags(struct btrfs_root *root, u64 flags)
3234 {
3235         u64 num_devices;
3236         u64 stripped = BTRFS_BLOCK_GROUP_RAID0 |
3237                 BTRFS_BLOCK_GROUP_RAID1 | BTRFS_BLOCK_GROUP_RAID10;
3238
3239         num_devices = root->fs_info->fs_devices->num_devices;
3240         if (num_devices == 1) {
3241                 stripped |= BTRFS_BLOCK_GROUP_DUP;
3242                 stripped = flags & ~stripped;
3243
3244                 /* turn raid0 into single device chunks */
3245                 if (flags & BTRFS_BLOCK_GROUP_RAID0)
3246                         return stripped;
3247
3248                 /* turn mirroring into duplication */
3249                 if (flags & (BTRFS_BLOCK_GROUP_RAID1 |
3250                              BTRFS_BLOCK_GROUP_RAID10))
3251                         return stripped | BTRFS_BLOCK_GROUP_DUP;
3252                 return flags;
3253         } else {
3254                 /* they already had raid on here, just return */
3255                 if (flags & stripped)
3256                         return flags;
3257
3258                 stripped |= BTRFS_BLOCK_GROUP_DUP;
3259                 stripped = flags & ~stripped;
3260
3261                 /* switch duplicated blocks with raid1 */
3262                 if (flags & BTRFS_BLOCK_GROUP_DUP)
3263                         return stripped | BTRFS_BLOCK_GROUP_RAID1;
3264
3265                 /* turn single device chunks into raid0 */
3266                 return stripped | BTRFS_BLOCK_GROUP_RAID0;
3267         }
3268         return flags;
3269 }
3270
3271 int __alloc_chunk_for_shrink(struct btrfs_root *root,
3272                      struct btrfs_block_group_cache *shrink_block_group,
3273                      int force)
3274 {
3275         struct btrfs_trans_handle *trans;
3276         u64 new_alloc_flags;
3277         u64 calc;
3278
3279         spin_lock(&shrink_block_group->lock);
3280         if (btrfs_block_group_used(&shrink_block_group->item) > 0) {
3281                 spin_unlock(&shrink_block_group->lock);
3282                 mutex_unlock(&root->fs_info->alloc_mutex);
3283
3284                 trans = btrfs_start_transaction(root, 1);
3285                 mutex_lock(&root->fs_info->alloc_mutex);
3286                 spin_lock(&shrink_block_group->lock);
3287
3288                 new_alloc_flags = update_block_group_flags(root,
3289                                                    shrink_block_group->flags);
3290                 if (new_alloc_flags != shrink_block_group->flags) {
3291                         calc =
3292                              btrfs_block_group_used(&shrink_block_group->item);
3293                 } else {
3294                         calc = shrink_block_group->key.offset;
3295                 }
3296                 spin_unlock(&shrink_block_group->lock);
3297
3298                 do_chunk_alloc(trans, root->fs_info->extent_root,
3299                                calc + 2 * 1024 * 1024, new_alloc_flags, force);
3300
3301                 mutex_unlock(&root->fs_info->alloc_mutex);
3302                 btrfs_end_transaction(trans, root);
3303                 mutex_lock(&root->fs_info->alloc_mutex);
3304         } else
3305                 spin_unlock(&shrink_block_group->lock);
3306         return 0;
3307 }
3308
3309 int btrfs_shrink_extent_tree(struct btrfs_root *root, u64 shrink_start)
3310 {
3311         struct btrfs_trans_handle *trans;
3312         struct btrfs_root *tree_root = root->fs_info->tree_root;
3313         struct btrfs_path *path;
3314         u64 cur_byte;
3315         u64 total_found;
3316         u64 shrink_last_byte;
3317         struct btrfs_block_group_cache *shrink_block_group;
3318         struct btrfs_fs_info *info = root->fs_info;
3319         struct btrfs_key key;
3320         struct btrfs_key found_key;
3321         struct extent_buffer *leaf;
3322         u32 nritems;
3323         int ret;
3324         int progress;
3325
3326         mutex_lock(&root->fs_info->alloc_mutex);
3327         shrink_block_group = btrfs_lookup_block_group(root->fs_info,
3328                                                       shrink_start);
3329         BUG_ON(!shrink_block_group);
3330
3331         shrink_last_byte = shrink_block_group->key.objectid +
3332                 shrink_block_group->key.offset;
3333
3334         shrink_block_group->space_info->total_bytes -=
3335                 shrink_block_group->key.offset;
3336         path = btrfs_alloc_path();
3337         root = root->fs_info->extent_root;
3338         path->reada = 2;
3339
3340         printk("btrfs relocating block group %llu flags %llu\n",
3341                (unsigned long long)shrink_start,
3342                (unsigned long long)shrink_block_group->flags);
3343
3344         __alloc_chunk_for_shrink(root, shrink_block_group, 1);
3345
3346 again:
3347
3348         shrink_block_group->ro = 1;
3349
3350         total_found = 0;
3351         progress = 0;
3352         key.objectid = shrink_start;
3353         key.offset = 0;
3354         key.type = 0;
3355         cur_byte = key.objectid;
3356
3357         ret = btrfs_search_slot(NULL, root, &key, path, 0, 0);
3358         if (ret < 0)
3359                 goto out;
3360
3361         ret = btrfs_previous_item(root, path, 0, BTRFS_EXTENT_ITEM_KEY);
3362         if (ret < 0)
3363                 goto out;
3364
3365         if (ret == 0) {
3366                 leaf = path->nodes[0];
3367                 btrfs_item_key_to_cpu(leaf, &found_key, path->slots[0]);
3368                 if (found_key.objectid + found_key.offset > shrink_start &&
3369                     found_key.objectid < shrink_last_byte) {
3370                         cur_byte = found_key.objectid;
3371                         key.objectid = cur_byte;
3372                 }
3373         }
3374         btrfs_release_path(root, path);
3375
3376         while(1) {
3377                 ret = btrfs_search_slot(NULL, root, &key, path, 0, 0);
3378                 if (ret < 0)
3379                         goto out;
3380
3381 next:
3382                 leaf = path->nodes[0];
3383                 nritems = btrfs_header_nritems(leaf);
3384                 if (path->slots[0] >= nritems) {
3385                         ret = btrfs_next_leaf(root, path);
3386                         if (ret < 0)
3387                                 goto out;
3388                         if (ret == 1) {
3389                                 ret = 0;
3390                                 break;
3391                         }
3392                         leaf = path->nodes[0];
3393                         nritems = btrfs_header_nritems(leaf);
3394                 }
3395
3396                 btrfs_item_key_to_cpu(leaf, &found_key, path->slots[0]);
3397
3398                 if (found_key.objectid >= shrink_last_byte)
3399                         break;
3400
3401                 if (progress && need_resched()) {
3402                         memcpy(&key, &found_key, sizeof(key));
3403                         cond_resched();
3404                         btrfs_release_path(root, path);
3405                         btrfs_search_slot(NULL, root, &key, path, 0, 0);
3406                         progress = 0;
3407                         goto next;
3408                 }
3409                 progress = 1;
3410
3411                 if (btrfs_key_type(&found_key) != BTRFS_EXTENT_ITEM_KEY ||
3412                     found_key.objectid + found_key.offset <= cur_byte) {
3413                         memcpy(&key, &found_key, sizeof(key));
3414                         key.offset++;
3415                         path->slots[0]++;
3416                         goto next;
3417                 }
3418
3419                 total_found++;
3420                 cur_byte = found_key.objectid + found_key.offset;
3421                 key.objectid = cur_byte;
3422                 btrfs_release_path(root, path);
3423                 ret = relocate_one_extent(root, path, &found_key);
3424                 __alloc_chunk_for_shrink(root, shrink_block_group, 0);
3425         }
3426
3427         btrfs_release_path(root, path);
3428
3429         if (total_found > 0) {
3430                 printk("btrfs relocate found %llu last extent was %llu\n",
3431                        (unsigned long long)total_found,
3432                        (unsigned long long)found_key.objectid);
3433                 mutex_unlock(&root->fs_info->alloc_mutex);
3434                 trans = btrfs_start_transaction(tree_root, 1);
3435                 btrfs_commit_transaction(trans, tree_root);
3436
3437                 btrfs_clean_old_snapshots(tree_root);
3438
3439                 btrfs_wait_ordered_extents(tree_root);
3440
3441                 trans = btrfs_start_transaction(tree_root, 1);
3442                 btrfs_commit_transaction(trans, tree_root);
3443                 mutex_lock(&root->fs_info->alloc_mutex);
3444                 goto again;
3445         }
3446
3447         /*
3448          * we've freed all the extents, now remove the block
3449          * group item from the tree
3450          */
3451         mutex_unlock(&root->fs_info->alloc_mutex);
3452
3453         trans = btrfs_start_transaction(root, 1);
3454
3455         mutex_lock(&root->fs_info->alloc_mutex);
3456         memcpy(&key, &shrink_block_group->key, sizeof(key));
3457
3458         ret = btrfs_search_slot(trans, root, &key, path, -1, 1);
3459         if (ret > 0)
3460                 ret = -EIO;
3461         if (ret < 0) {
3462                 btrfs_end_transaction(trans, root);
3463                 goto out;
3464         }
3465
3466         clear_extent_bits(&info->block_group_cache, key.objectid,
3467                           key.objectid + key.offset - 1,
3468                           (unsigned int)-1, GFP_NOFS);
3469
3470
3471         clear_extent_bits(&info->free_space_cache,
3472                            key.objectid, key.objectid + key.offset - 1,
3473                            (unsigned int)-1, GFP_NOFS);
3474
3475         memset(shrink_block_group, 0, sizeof(*shrink_block_group));
3476         kfree(shrink_block_group);
3477
3478         btrfs_del_item(trans, root, path);
3479         btrfs_release_path(root, path);
3480         mutex_unlock(&root->fs_info->alloc_mutex);
3481         btrfs_commit_transaction(trans, root);
3482
3483         mutex_lock(&root->fs_info->alloc_mutex);
3484
3485         /* the code to unpin extents might set a few bits in the free
3486          * space cache for this range again
3487          */
3488         clear_extent_bits(&info->free_space_cache,
3489                            key.objectid, key.objectid + key.offset - 1,
3490                            (unsigned int)-1, GFP_NOFS);
3491 out:
3492         btrfs_free_path(path);
3493         mutex_unlock(&root->fs_info->alloc_mutex);
3494         return ret;
3495 }
3496
3497 int find_first_block_group(struct btrfs_root *root, struct btrfs_path *path,
3498                            struct btrfs_key *key)
3499 {
3500         int ret = 0;
3501         struct btrfs_key found_key;
3502         struct extent_buffer *leaf;
3503         int slot;
3504
3505         ret = btrfs_search_slot(NULL, root, key, path, 0, 0);
3506         if (ret < 0)
3507                 goto out;
3508
3509         while(1) {
3510                 slot = path->slots[0];
3511                 leaf = path->nodes[0];
3512                 if (slot >= btrfs_header_nritems(leaf)) {
3513                         ret = btrfs_next_leaf(root, path);
3514                         if (ret == 0)
3515                                 continue;
3516                         if (ret < 0)
3517                                 goto out;
3518                         break;
3519                 }
3520                 btrfs_item_key_to_cpu(leaf, &found_key, slot);
3521
3522                 if (found_key.objectid >= key->objectid &&
3523                     found_key.type == BTRFS_BLOCK_GROUP_ITEM_KEY) {
3524                         ret = 0;
3525                         goto out;
3526                 }
3527                 path->slots[0]++;
3528         }
3529         ret = -ENOENT;
3530 out:
3531         return ret;
3532 }
3533
3534 int btrfs_read_block_groups(struct btrfs_root *root)
3535 {
3536         struct btrfs_path *path;
3537         int ret;
3538         int bit;
3539         struct btrfs_block_group_cache *cache;
3540         struct btrfs_fs_info *info = root->fs_info;
3541         struct btrfs_space_info *space_info;
3542         struct extent_io_tree *block_group_cache;
3543         struct btrfs_key key;
3544         struct btrfs_key found_key;
3545         struct extent_buffer *leaf;
3546
3547         block_group_cache = &info->block_group_cache;
3548         root = info->extent_root;
3549         key.objectid = 0;
3550         key.offset = 0;
3551         btrfs_set_key_type(&key, BTRFS_BLOCK_GROUP_ITEM_KEY);
3552         path = btrfs_alloc_path();
3553         if (!path)
3554                 return -ENOMEM;
3555
3556         mutex_lock(&root->fs_info->alloc_mutex);
3557         while(1) {
3558                 ret = find_first_block_group(root, path, &key);
3559                 if (ret > 0) {
3560                         ret = 0;
3561                         goto error;
3562                 }
3563                 if (ret != 0)
3564                         goto error;
3565
3566                 leaf = path->nodes[0];
3567                 btrfs_item_key_to_cpu(leaf, &found_key, path->slots[0]);
3568                 cache = kzalloc(sizeof(*cache), GFP_NOFS);
3569                 if (!cache) {
3570                         ret = -ENOMEM;
3571                         break;
3572                 }
3573
3574                 spin_lock_init(&cache->lock);
3575                 read_extent_buffer(leaf, &cache->item,
3576                                    btrfs_item_ptr_offset(leaf, path->slots[0]),
3577                                    sizeof(cache->item));
3578                 memcpy(&cache->key, &found_key, sizeof(found_key));
3579
3580                 key.objectid = found_key.objectid + found_key.offset;
3581                 btrfs_release_path(root, path);
3582                 cache->flags = btrfs_block_group_flags(&cache->item);
3583                 bit = 0;
3584                 if (cache->flags & BTRFS_BLOCK_GROUP_DATA) {
3585                         bit = BLOCK_GROUP_DATA;
3586                 } else if (cache->flags & BTRFS_BLOCK_GROUP_SYSTEM) {
3587                         bit = BLOCK_GROUP_SYSTEM;
3588                 } else if (cache->flags & BTRFS_BLOCK_GROUP_METADATA) {
3589                         bit = BLOCK_GROUP_METADATA;
3590                 }
3591                 set_avail_alloc_bits(info, cache->flags);
3592
3593                 ret = update_space_info(info, cache->flags, found_key.offset,
3594                                         btrfs_block_group_used(&cache->item),
3595                                         &space_info);
3596                 BUG_ON(ret);
3597                 cache->space_info = space_info;
3598
3599                 /* use EXTENT_LOCKED to prevent merging */
3600                 set_extent_bits(block_group_cache, found_key.objectid,
3601                                 found_key.objectid + found_key.offset - 1,
3602                                 EXTENT_LOCKED, GFP_NOFS);
3603                 set_state_private(block_group_cache, found_key.objectid,
3604                                   (unsigned long)cache);
3605                 set_extent_bits(block_group_cache, found_key.objectid,
3606                                 found_key.objectid + found_key.offset - 1,
3607                                 bit | EXTENT_LOCKED, GFP_NOFS);
3608                 if (key.objectid >=
3609                     btrfs_super_total_bytes(&info->super_copy))
3610                         break;
3611         }
3612         ret = 0;
3613 error:
3614         btrfs_free_path(path);
3615         mutex_unlock(&root->fs_info->alloc_mutex);
3616         return ret;
3617 }
3618
3619 int btrfs_make_block_group(struct btrfs_trans_handle *trans,
3620                            struct btrfs_root *root, u64 bytes_used,
3621                            u64 type, u64 chunk_objectid, u64 chunk_offset,
3622                            u64 size)
3623 {
3624         int ret;
3625         int bit = 0;
3626         struct btrfs_root *extent_root;
3627         struct btrfs_block_group_cache *cache;
3628         struct extent_io_tree *block_group_cache;
3629
3630         WARN_ON(!mutex_is_locked(&root->fs_info->alloc_mutex));
3631         extent_root = root->fs_info->extent_root;
3632         block_group_cache = &root->fs_info->block_group_cache;
3633
3634         cache = kzalloc(sizeof(*cache), GFP_NOFS);
3635         BUG_ON(!cache);
3636         cache->key.objectid = chunk_offset;
3637         cache->key.offset = size;
3638         spin_lock_init(&cache->lock);
3639         btrfs_set_key_type(&cache->key, BTRFS_BLOCK_GROUP_ITEM_KEY);
3640
3641         btrfs_set_block_group_used(&cache->item, bytes_used);
3642         btrfs_set_block_group_chunk_objectid(&cache->item, chunk_objectid);
3643         cache->flags = type;
3644         btrfs_set_block_group_flags(&cache->item, type);
3645
3646         ret = update_space_info(root->fs_info, cache->flags, size, bytes_used,
3647                                 &cache->space_info);
3648         BUG_ON(ret);
3649
3650         bit = block_group_state_bits(type);
3651         set_extent_bits(block_group_cache, chunk_offset,
3652                         chunk_offset + size - 1,
3653                         EXTENT_LOCKED, GFP_NOFS);
3654         set_state_private(block_group_cache, chunk_offset,
3655                           (unsigned long)cache);
3656         set_extent_bits(block_group_cache, chunk_offset,
3657                         chunk_offset + size - 1,
3658                         bit | EXTENT_LOCKED, GFP_NOFS);
3659
3660         ret = btrfs_insert_item(trans, extent_root, &cache->key, &cache->item,
3661                                 sizeof(cache->item));
3662         BUG_ON(ret);
3663
3664         finish_current_insert(trans, extent_root);
3665         ret = del_pending_extents(trans, extent_root);
3666         BUG_ON(ret);
3667         set_avail_alloc_bits(extent_root->fs_info, type);
3668
3669         return 0;
3670 }