Btrfs: Record dirty pages tree-log pages in an extent_io tree
[safe/jmp/linux-2.6] / fs / btrfs / extent-tree.c
1 /*
2  * Copyright (C) 2007 Oracle.  All rights reserved.
3  *
4  * This program is free software; you can redistribute it and/or
5  * modify it under the terms of the GNU General Public
6  * License v2 as published by the Free Software Foundation.
7  *
8  * This program is distributed in the hope that it will be useful,
9  * but WITHOUT ANY WARRANTY; without even the implied warranty of
10  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
11  * General Public License for more details.
12  *
13  * You should have received a copy of the GNU General Public
14  * License along with this program; if not, write to the
15  * Free Software Foundation, Inc., 59 Temple Place - Suite 330,
16  * Boston, MA 021110-1307, USA.
17  */
18 #include <linux/sched.h>
19 #include <linux/pagemap.h>
20 #include <linux/writeback.h>
21 #include <linux/blkdev.h>
22 #include "hash.h"
23 #include "crc32c.h"
24 #include "ctree.h"
25 #include "disk-io.h"
26 #include "print-tree.h"
27 #include "transaction.h"
28 #include "volumes.h"
29 #include "locking.h"
30 #include "ref-cache.h"
31
32 #define BLOCK_GROUP_DATA     EXTENT_WRITEBACK
33 #define BLOCK_GROUP_METADATA EXTENT_UPTODATE
34 #define BLOCK_GROUP_SYSTEM   EXTENT_NEW
35
36 #define BLOCK_GROUP_DIRTY EXTENT_DIRTY
37
38 static int finish_current_insert(struct btrfs_trans_handle *trans, struct
39                                  btrfs_root *extent_root);
40 static int del_pending_extents(struct btrfs_trans_handle *trans, struct
41                                btrfs_root *extent_root);
42 static struct btrfs_block_group_cache *
43 __btrfs_find_block_group(struct btrfs_root *root,
44                          struct btrfs_block_group_cache *hint,
45                          u64 search_start, int data, int owner);
46
47 void maybe_lock_mutex(struct btrfs_root *root)
48 {
49         if (root != root->fs_info->extent_root &&
50             root != root->fs_info->chunk_root &&
51             root != root->fs_info->dev_root) {
52                 mutex_lock(&root->fs_info->alloc_mutex);
53         }
54 }
55
56 void maybe_unlock_mutex(struct btrfs_root *root)
57 {
58         if (root != root->fs_info->extent_root &&
59             root != root->fs_info->chunk_root &&
60             root != root->fs_info->dev_root) {
61                 mutex_unlock(&root->fs_info->alloc_mutex);
62         }
63 }
64
65 static int cache_block_group(struct btrfs_root *root,
66                              struct btrfs_block_group_cache *block_group)
67 {
68         struct btrfs_path *path;
69         int ret;
70         struct btrfs_key key;
71         struct extent_buffer *leaf;
72         struct extent_io_tree *free_space_cache;
73         int slot;
74         u64 last = 0;
75         u64 hole_size;
76         u64 first_free;
77         int found = 0;
78
79         if (!block_group)
80                 return 0;
81
82         root = root->fs_info->extent_root;
83         free_space_cache = &root->fs_info->free_space_cache;
84
85         if (block_group->cached)
86                 return 0;
87
88         path = btrfs_alloc_path();
89         if (!path)
90                 return -ENOMEM;
91
92         path->reada = 2;
93         /*
94          * we get into deadlocks with paths held by callers of this function.
95          * since the alloc_mutex is protecting things right now, just
96          * skip the locking here
97          */
98         path->skip_locking = 1;
99         first_free = block_group->key.objectid;
100         key.objectid = block_group->key.objectid;
101         key.offset = 0;
102         btrfs_set_key_type(&key, BTRFS_EXTENT_ITEM_KEY);
103         ret = btrfs_search_slot(NULL, root, &key, path, 0, 0);
104         if (ret < 0)
105                 return ret;
106         ret = btrfs_previous_item(root, path, 0, BTRFS_EXTENT_ITEM_KEY);
107         if (ret < 0)
108                 return ret;
109         if (ret == 0) {
110                 leaf = path->nodes[0];
111                 btrfs_item_key_to_cpu(leaf, &key, path->slots[0]);
112                 if (key.objectid + key.offset > first_free)
113                         first_free = key.objectid + key.offset;
114         }
115         while(1) {
116                 leaf = path->nodes[0];
117                 slot = path->slots[0];
118                 if (slot >= btrfs_header_nritems(leaf)) {
119                         ret = btrfs_next_leaf(root, path);
120                         if (ret < 0)
121                                 goto err;
122                         if (ret == 0) {
123                                 continue;
124                         } else {
125                                 break;
126                         }
127                 }
128                 btrfs_item_key_to_cpu(leaf, &key, slot);
129                 if (key.objectid < block_group->key.objectid) {
130                         goto next;
131                 }
132                 if (key.objectid >= block_group->key.objectid +
133                     block_group->key.offset) {
134                         break;
135                 }
136
137                 if (btrfs_key_type(&key) == BTRFS_EXTENT_ITEM_KEY) {
138                         if (!found) {
139                                 last = first_free;
140                                 found = 1;
141                         }
142                         if (key.objectid > last) {
143                                 hole_size = key.objectid - last;
144                                 set_extent_dirty(free_space_cache, last,
145                                                  last + hole_size - 1,
146                                                  GFP_NOFS);
147                         }
148                         last = key.objectid + key.offset;
149                 }
150 next:
151                 path->slots[0]++;
152         }
153
154         if (!found)
155                 last = first_free;
156         if (block_group->key.objectid +
157             block_group->key.offset > last) {
158                 hole_size = block_group->key.objectid +
159                         block_group->key.offset - last;
160                 set_extent_dirty(free_space_cache, last,
161                                  last + hole_size - 1, GFP_NOFS);
162         }
163         block_group->cached = 1;
164 err:
165         btrfs_free_path(path);
166         return 0;
167 }
168
169 struct btrfs_block_group_cache *btrfs_lookup_first_block_group(struct
170                                                        btrfs_fs_info *info,
171                                                          u64 bytenr)
172 {
173         struct extent_io_tree *block_group_cache;
174         struct btrfs_block_group_cache *block_group = NULL;
175         u64 ptr;
176         u64 start;
177         u64 end;
178         int ret;
179
180         bytenr = max_t(u64, bytenr,
181                        BTRFS_SUPER_INFO_OFFSET + BTRFS_SUPER_INFO_SIZE);
182         block_group_cache = &info->block_group_cache;
183         ret = find_first_extent_bit(block_group_cache,
184                                     bytenr, &start, &end,
185                                     BLOCK_GROUP_DATA | BLOCK_GROUP_METADATA |
186                                     BLOCK_GROUP_SYSTEM);
187         if (ret) {
188                 return NULL;
189         }
190         ret = get_state_private(block_group_cache, start, &ptr);
191         if (ret)
192                 return NULL;
193
194         block_group = (struct btrfs_block_group_cache *)(unsigned long)ptr;
195         return block_group;
196 }
197
198 struct btrfs_block_group_cache *btrfs_lookup_block_group(struct
199                                                          btrfs_fs_info *info,
200                                                          u64 bytenr)
201 {
202         struct extent_io_tree *block_group_cache;
203         struct btrfs_block_group_cache *block_group = NULL;
204         u64 ptr;
205         u64 start;
206         u64 end;
207         int ret;
208
209         bytenr = max_t(u64, bytenr,
210                        BTRFS_SUPER_INFO_OFFSET + BTRFS_SUPER_INFO_SIZE);
211         block_group_cache = &info->block_group_cache;
212         ret = find_first_extent_bit(block_group_cache,
213                                     bytenr, &start, &end,
214                                     BLOCK_GROUP_DATA | BLOCK_GROUP_METADATA |
215                                     BLOCK_GROUP_SYSTEM);
216         if (ret) {
217                 return NULL;
218         }
219         ret = get_state_private(block_group_cache, start, &ptr);
220         if (ret)
221                 return NULL;
222
223         block_group = (struct btrfs_block_group_cache *)(unsigned long)ptr;
224         if (block_group->key.objectid <= bytenr && bytenr <
225             block_group->key.objectid + block_group->key.offset)
226                 return block_group;
227         return NULL;
228 }
229
230 static int block_group_bits(struct btrfs_block_group_cache *cache, u64 bits)
231 {
232         return (cache->flags & bits) == bits;
233 }
234
235 static int noinline find_search_start(struct btrfs_root *root,
236                               struct btrfs_block_group_cache **cache_ret,
237                               u64 *start_ret, u64 num, int data)
238 {
239         int ret;
240         struct btrfs_block_group_cache *cache = *cache_ret;
241         struct extent_io_tree *free_space_cache;
242         struct extent_state *state;
243         u64 last;
244         u64 start = 0;
245         u64 cache_miss = 0;
246         u64 total_fs_bytes;
247         u64 search_start = *start_ret;
248         int wrapped = 0;
249
250         WARN_ON(!mutex_is_locked(&root->fs_info->alloc_mutex));
251         total_fs_bytes = btrfs_super_total_bytes(&root->fs_info->super_copy);
252         free_space_cache = &root->fs_info->free_space_cache;
253
254         if (!cache)
255                 goto out;
256
257 again:
258         ret = cache_block_group(root, cache);
259         if (ret) {
260                 goto out;
261         }
262
263         last = max(search_start, cache->key.objectid);
264         if (!block_group_bits(cache, data) || cache->ro)
265                 goto new_group;
266
267         spin_lock_irq(&free_space_cache->lock);
268         state = find_first_extent_bit_state(free_space_cache, last, EXTENT_DIRTY);
269         while(1) {
270                 if (!state) {
271                         if (!cache_miss)
272                                 cache_miss = last;
273                         spin_unlock_irq(&free_space_cache->lock);
274                         goto new_group;
275                 }
276
277                 start = max(last, state->start);
278                 last = state->end + 1;
279                 if (last - start < num) {
280                         do {
281                                 state = extent_state_next(state);
282                         } while(state && !(state->state & EXTENT_DIRTY));
283                         continue;
284                 }
285                 spin_unlock_irq(&free_space_cache->lock);
286                 if (cache->ro) {
287                         goto new_group;
288                 }
289                 if (start + num > cache->key.objectid + cache->key.offset)
290                         goto new_group;
291                 if (!block_group_bits(cache, data)) {
292                         printk("block group bits don't match %Lu %d\n", cache->flags, data);
293                 }
294                 *start_ret = start;
295                 return 0;
296         }
297 out:
298         cache = btrfs_lookup_block_group(root->fs_info, search_start);
299         if (!cache) {
300                 printk("Unable to find block group for %Lu\n", search_start);
301                 WARN_ON(1);
302         }
303         return -ENOSPC;
304
305 new_group:
306         last = cache->key.objectid + cache->key.offset;
307 wrapped:
308         cache = btrfs_lookup_first_block_group(root->fs_info, last);
309         if (!cache || cache->key.objectid >= total_fs_bytes) {
310 no_cache:
311                 if (!wrapped) {
312                         wrapped = 1;
313                         last = search_start;
314                         goto wrapped;
315                 }
316                 goto out;
317         }
318         if (cache_miss && !cache->cached) {
319                 cache_block_group(root, cache);
320                 last = cache_miss;
321                 cache = btrfs_lookup_first_block_group(root->fs_info, last);
322         }
323         cache_miss = 0;
324         cache = btrfs_find_block_group(root, cache, last, data, 0);
325         if (!cache)
326                 goto no_cache;
327         *cache_ret = cache;
328         goto again;
329 }
330
331 static u64 div_factor(u64 num, int factor)
332 {
333         if (factor == 10)
334                 return num;
335         num *= factor;
336         do_div(num, 10);
337         return num;
338 }
339
340 static int block_group_state_bits(u64 flags)
341 {
342         int bits = 0;
343         if (flags & BTRFS_BLOCK_GROUP_DATA)
344                 bits |= BLOCK_GROUP_DATA;
345         if (flags & BTRFS_BLOCK_GROUP_METADATA)
346                 bits |= BLOCK_GROUP_METADATA;
347         if (flags & BTRFS_BLOCK_GROUP_SYSTEM)
348                 bits |= BLOCK_GROUP_SYSTEM;
349         return bits;
350 }
351
352 static struct btrfs_block_group_cache *
353 __btrfs_find_block_group(struct btrfs_root *root,
354                          struct btrfs_block_group_cache *hint,
355                          u64 search_start, int data, int owner)
356 {
357         struct btrfs_block_group_cache *cache;
358         struct extent_io_tree *block_group_cache;
359         struct btrfs_block_group_cache *found_group = NULL;
360         struct btrfs_fs_info *info = root->fs_info;
361         u64 used;
362         u64 last = 0;
363         u64 start;
364         u64 end;
365         u64 free_check;
366         u64 ptr;
367         int bit;
368         int ret;
369         int full_search = 0;
370         int factor = 10;
371         int wrapped = 0;
372
373         block_group_cache = &info->block_group_cache;
374
375         if (data & BTRFS_BLOCK_GROUP_METADATA)
376                 factor = 9;
377
378         bit = block_group_state_bits(data);
379
380         if (search_start) {
381                 struct btrfs_block_group_cache *shint;
382                 shint = btrfs_lookup_first_block_group(info, search_start);
383                 if (shint && block_group_bits(shint, data) && !shint->ro) {
384                         spin_lock(&shint->lock);
385                         used = btrfs_block_group_used(&shint->item);
386                         if (used + shint->pinned <
387                             div_factor(shint->key.offset, factor)) {
388                                 spin_unlock(&shint->lock);
389                                 return shint;
390                         }
391                         spin_unlock(&shint->lock);
392                 }
393         }
394         if (hint && !hint->ro && block_group_bits(hint, data)) {
395                 spin_lock(&hint->lock);
396                 used = btrfs_block_group_used(&hint->item);
397                 if (used + hint->pinned <
398                     div_factor(hint->key.offset, factor)) {
399                         spin_unlock(&hint->lock);
400                         return hint;
401                 }
402                 spin_unlock(&hint->lock);
403                 last = hint->key.objectid + hint->key.offset;
404         } else {
405                 if (hint)
406                         last = max(hint->key.objectid, search_start);
407                 else
408                         last = search_start;
409         }
410 again:
411         while(1) {
412                 ret = find_first_extent_bit(block_group_cache, last,
413                                             &start, &end, bit);
414                 if (ret)
415                         break;
416
417                 ret = get_state_private(block_group_cache, start, &ptr);
418                 if (ret) {
419                         last = end + 1;
420                         continue;
421                 }
422
423                 cache = (struct btrfs_block_group_cache *)(unsigned long)ptr;
424                 spin_lock(&cache->lock);
425                 last = cache->key.objectid + cache->key.offset;
426                 used = btrfs_block_group_used(&cache->item);
427
428                 if (!cache->ro && block_group_bits(cache, data)) {
429                         free_check = div_factor(cache->key.offset, factor);
430                         if (used + cache->pinned < free_check) {
431                                 found_group = cache;
432                                 spin_unlock(&cache->lock);
433                                 goto found;
434                         }
435                 }
436                 spin_unlock(&cache->lock);
437                 cond_resched();
438         }
439         if (!wrapped) {
440                 last = search_start;
441                 wrapped = 1;
442                 goto again;
443         }
444         if (!full_search && factor < 10) {
445                 last = search_start;
446                 full_search = 1;
447                 factor = 10;
448                 goto again;
449         }
450 found:
451         return found_group;
452 }
453
454 struct btrfs_block_group_cache *btrfs_find_block_group(struct btrfs_root *root,
455                                                  struct btrfs_block_group_cache
456                                                  *hint, u64 search_start,
457                                                  int data, int owner)
458 {
459
460         struct btrfs_block_group_cache *ret;
461         ret = __btrfs_find_block_group(root, hint, search_start, data, owner);
462         return ret;
463 }
464 static u64 hash_extent_ref(u64 root_objectid, u64 ref_generation,
465                            u64 owner, u64 owner_offset)
466 {
467         u32 high_crc = ~(u32)0;
468         u32 low_crc = ~(u32)0;
469         __le64 lenum;
470         lenum = cpu_to_le64(root_objectid);
471         high_crc = btrfs_crc32c(high_crc, &lenum, sizeof(lenum));
472         lenum = cpu_to_le64(ref_generation);
473         low_crc = btrfs_crc32c(low_crc, &lenum, sizeof(lenum));
474         if (owner >= BTRFS_FIRST_FREE_OBJECTID) {
475                 lenum = cpu_to_le64(owner);
476                 low_crc = btrfs_crc32c(low_crc, &lenum, sizeof(lenum));
477                 lenum = cpu_to_le64(owner_offset);
478                 low_crc = btrfs_crc32c(low_crc, &lenum, sizeof(lenum));
479         }
480         return ((u64)high_crc << 32) | (u64)low_crc;
481 }
482
483 static int match_extent_ref(struct extent_buffer *leaf,
484                             struct btrfs_extent_ref *disk_ref,
485                             struct btrfs_extent_ref *cpu_ref)
486 {
487         int ret;
488         int len;
489
490         if (cpu_ref->objectid)
491                 len = sizeof(*cpu_ref);
492         else
493                 len = 2 * sizeof(u64);
494         ret = memcmp_extent_buffer(leaf, cpu_ref, (unsigned long)disk_ref,
495                                    len);
496         return ret == 0;
497 }
498
499 /* simple helper to search for an existing extent at a given offset */
500 int btrfs_lookup_extent(struct btrfs_root *root, struct btrfs_path *path,
501                         u64 start, u64 len)
502 {
503         int ret;
504         struct btrfs_key key;
505
506         maybe_lock_mutex(root);
507         key.objectid = start;
508         key.offset = len;
509         btrfs_set_key_type(&key, BTRFS_EXTENT_ITEM_KEY);
510         ret = btrfs_search_slot(NULL, root->fs_info->extent_root, &key, path,
511                                 0, 0);
512         maybe_unlock_mutex(root);
513         return ret;
514 }
515
516 static int noinline lookup_extent_backref(struct btrfs_trans_handle *trans,
517                                           struct btrfs_root *root,
518                                           struct btrfs_path *path, u64 bytenr,
519                                           u64 root_objectid,
520                                           u64 ref_generation, u64 owner,
521                                           u64 owner_offset, int del)
522 {
523         u64 hash;
524         struct btrfs_key key;
525         struct btrfs_key found_key;
526         struct btrfs_extent_ref ref;
527         struct extent_buffer *leaf;
528         struct btrfs_extent_ref *disk_ref;
529         int ret;
530         int ret2;
531
532         btrfs_set_stack_ref_root(&ref, root_objectid);
533         btrfs_set_stack_ref_generation(&ref, ref_generation);
534         btrfs_set_stack_ref_objectid(&ref, owner);
535         btrfs_set_stack_ref_offset(&ref, owner_offset);
536
537         hash = hash_extent_ref(root_objectid, ref_generation, owner,
538                                owner_offset);
539         key.offset = hash;
540         key.objectid = bytenr;
541         key.type = BTRFS_EXTENT_REF_KEY;
542
543         while (1) {
544                 ret = btrfs_search_slot(trans, root, &key, path,
545                                         del ? -1 : 0, del);
546                 if (ret < 0)
547                         goto out;
548                 leaf = path->nodes[0];
549                 if (ret != 0) {
550                         u32 nritems = btrfs_header_nritems(leaf);
551                         if (path->slots[0] >= nritems) {
552                                 ret2 = btrfs_next_leaf(root, path);
553                                 if (ret2)
554                                         goto out;
555                                 leaf = path->nodes[0];
556                         }
557                         btrfs_item_key_to_cpu(leaf, &found_key, path->slots[0]);
558                         if (found_key.objectid != bytenr ||
559                             found_key.type != BTRFS_EXTENT_REF_KEY)
560                                 goto out;
561                         key.offset = found_key.offset;
562                         if (del) {
563                                 btrfs_release_path(root, path);
564                                 continue;
565                         }
566                 }
567                 disk_ref = btrfs_item_ptr(path->nodes[0],
568                                           path->slots[0],
569                                           struct btrfs_extent_ref);
570                 if (match_extent_ref(path->nodes[0], disk_ref, &ref)) {
571                         ret = 0;
572                         goto out;
573                 }
574                 btrfs_item_key_to_cpu(leaf, &found_key, path->slots[0]);
575                 key.offset = found_key.offset + 1;
576                 btrfs_release_path(root, path);
577         }
578 out:
579         return ret;
580 }
581
582 /*
583  * Back reference rules.  Back refs have three main goals:
584  *
585  * 1) differentiate between all holders of references to an extent so that
586  *    when a reference is dropped we can make sure it was a valid reference
587  *    before freeing the extent.
588  *
589  * 2) Provide enough information to quickly find the holders of an extent
590  *    if we notice a given block is corrupted or bad.
591  *
592  * 3) Make it easy to migrate blocks for FS shrinking or storage pool
593  *    maintenance.  This is actually the same as #2, but with a slightly
594  *    different use case.
595  *
596  * File extents can be referenced by:
597  *
598  * - multiple snapshots, subvolumes, or different generations in one subvol
599  * - different files inside a single subvolume (in theory, not implemented yet)
600  * - different offsets inside a file (bookend extents in file.c)
601  *
602  * The extent ref structure has fields for:
603  *
604  * - Objectid of the subvolume root
605  * - Generation number of the tree holding the reference
606  * - objectid of the file holding the reference
607  * - offset in the file corresponding to the key holding the reference
608  *
609  * When a file extent is allocated the fields are filled in:
610  *     (root_key.objectid, trans->transid, inode objectid, offset in file)
611  *
612  * When a leaf is cow'd new references are added for every file extent found
613  * in the leaf.  It looks the same as the create case, but trans->transid
614  * will be different when the block is cow'd.
615  *
616  *     (root_key.objectid, trans->transid, inode objectid, offset in file)
617  *
618  * When a file extent is removed either during snapshot deletion or file
619  * truncation, the corresponding back reference is found
620  * by searching for:
621  *
622  *     (btrfs_header_owner(leaf), btrfs_header_generation(leaf),
623  *      inode objectid, offset in file)
624  *
625  * Btree extents can be referenced by:
626  *
627  * - Different subvolumes
628  * - Different generations of the same subvolume
629  *
630  * Storing sufficient information for a full reverse mapping of a btree
631  * block would require storing the lowest key of the block in the backref,
632  * and it would require updating that lowest key either before write out or
633  * every time it changed.  Instead, the objectid of the lowest key is stored
634  * along with the level of the tree block.  This provides a hint
635  * about where in the btree the block can be found.  Searches through the
636  * btree only need to look for a pointer to that block, so they stop one
637  * level higher than the level recorded in the backref.
638  *
639  * Some btrees do not do reference counting on their extents.  These
640  * include the extent tree and the tree of tree roots.  Backrefs for these
641  * trees always have a generation of zero.
642  *
643  * When a tree block is created, back references are inserted:
644  *
645  * (root->root_key.objectid, trans->transid or zero, level, lowest_key_objectid)
646  *
647  * When a tree block is cow'd in a reference counted root,
648  * new back references are added for all the blocks it points to.
649  * These are of the form (trans->transid will have increased since creation):
650  *
651  * (root->root_key.objectid, trans->transid, level, lowest_key_objectid)
652  *
653  * Because the lowest_key_objectid and the level are just hints
654  * they are not used when backrefs are deleted.  When a backref is deleted:
655  *
656  * if backref was for a tree root:
657  *     root_objectid = root->root_key.objectid
658  * else
659  *     root_objectid = btrfs_header_owner(parent)
660  *
661  * (root_objectid, btrfs_header_generation(parent) or zero, 0, 0)
662  *
663  * Back Reference Key hashing:
664  *
665  * Back references have four fields, each 64 bits long.  Unfortunately,
666  * This is hashed into a single 64 bit number and placed into the key offset.
667  * The key objectid corresponds to the first byte in the extent, and the
668  * key type is set to BTRFS_EXTENT_REF_KEY
669  */
670 int btrfs_insert_extent_backref(struct btrfs_trans_handle *trans,
671                                  struct btrfs_root *root,
672                                  struct btrfs_path *path, u64 bytenr,
673                                  u64 root_objectid, u64 ref_generation,
674                                  u64 owner, u64 owner_offset)
675 {
676         u64 hash;
677         struct btrfs_key key;
678         struct btrfs_extent_ref ref;
679         struct btrfs_extent_ref *disk_ref;
680         int ret;
681
682         btrfs_set_stack_ref_root(&ref, root_objectid);
683         btrfs_set_stack_ref_generation(&ref, ref_generation);
684         btrfs_set_stack_ref_objectid(&ref, owner);
685         btrfs_set_stack_ref_offset(&ref, owner_offset);
686
687         hash = hash_extent_ref(root_objectid, ref_generation, owner,
688                                owner_offset);
689         key.offset = hash;
690         key.objectid = bytenr;
691         key.type = BTRFS_EXTENT_REF_KEY;
692
693         ret = btrfs_insert_empty_item(trans, root, path, &key, sizeof(ref));
694         while (ret == -EEXIST) {
695                 disk_ref = btrfs_item_ptr(path->nodes[0], path->slots[0],
696                                           struct btrfs_extent_ref);
697                 if (match_extent_ref(path->nodes[0], disk_ref, &ref))
698                         goto out;
699                 key.offset++;
700                 btrfs_release_path(root, path);
701                 ret = btrfs_insert_empty_item(trans, root, path, &key,
702                                               sizeof(ref));
703         }
704         if (ret)
705                 goto out;
706         disk_ref = btrfs_item_ptr(path->nodes[0], path->slots[0],
707                                   struct btrfs_extent_ref);
708         write_extent_buffer(path->nodes[0], &ref, (unsigned long)disk_ref,
709                             sizeof(ref));
710         btrfs_mark_buffer_dirty(path->nodes[0]);
711 out:
712         btrfs_release_path(root, path);
713         return ret;
714 }
715
716 static int __btrfs_inc_extent_ref(struct btrfs_trans_handle *trans,
717                                 struct btrfs_root *root,
718                                 u64 bytenr, u64 num_bytes,
719                                 u64 root_objectid, u64 ref_generation,
720                                 u64 owner, u64 owner_offset)
721 {
722         struct btrfs_path *path;
723         int ret;
724         struct btrfs_key key;
725         struct extent_buffer *l;
726         struct btrfs_extent_item *item;
727         u32 refs;
728
729         WARN_ON(num_bytes < root->sectorsize);
730         path = btrfs_alloc_path();
731         if (!path)
732                 return -ENOMEM;
733
734         path->reada = 1;
735         key.objectid = bytenr;
736         btrfs_set_key_type(&key, BTRFS_EXTENT_ITEM_KEY);
737         key.offset = num_bytes;
738         ret = btrfs_search_slot(trans, root->fs_info->extent_root, &key, path,
739                                 0, 1);
740         if (ret < 0)
741                 return ret;
742         if (ret != 0) {
743                 BUG();
744         }
745         BUG_ON(ret != 0);
746         l = path->nodes[0];
747         item = btrfs_item_ptr(l, path->slots[0], struct btrfs_extent_item);
748         refs = btrfs_extent_refs(l, item);
749         btrfs_set_extent_refs(l, item, refs + 1);
750         btrfs_mark_buffer_dirty(path->nodes[0]);
751
752         btrfs_release_path(root->fs_info->extent_root, path);
753
754         path->reada = 1;
755         ret = btrfs_insert_extent_backref(trans, root->fs_info->extent_root,
756                                           path, bytenr, root_objectid,
757                                           ref_generation, owner, owner_offset);
758         BUG_ON(ret);
759         finish_current_insert(trans, root->fs_info->extent_root);
760         del_pending_extents(trans, root->fs_info->extent_root);
761
762         btrfs_free_path(path);
763         return 0;
764 }
765
766 int btrfs_inc_extent_ref(struct btrfs_trans_handle *trans,
767                                 struct btrfs_root *root,
768                                 u64 bytenr, u64 num_bytes,
769                                 u64 root_objectid, u64 ref_generation,
770                                 u64 owner, u64 owner_offset)
771 {
772         int ret;
773
774         mutex_lock(&root->fs_info->alloc_mutex);
775         ret = __btrfs_inc_extent_ref(trans, root, bytenr, num_bytes,
776                                      root_objectid, ref_generation,
777                                      owner, owner_offset);
778         mutex_unlock(&root->fs_info->alloc_mutex);
779         return ret;
780 }
781
782 int btrfs_extent_post_op(struct btrfs_trans_handle *trans,
783                          struct btrfs_root *root)
784 {
785         finish_current_insert(trans, root->fs_info->extent_root);
786         del_pending_extents(trans, root->fs_info->extent_root);
787         return 0;
788 }
789
790 static int lookup_extent_ref(struct btrfs_trans_handle *trans,
791                              struct btrfs_root *root, u64 bytenr,
792                              u64 num_bytes, u32 *refs)
793 {
794         struct btrfs_path *path;
795         int ret;
796         struct btrfs_key key;
797         struct extent_buffer *l;
798         struct btrfs_extent_item *item;
799
800         WARN_ON(num_bytes < root->sectorsize);
801         path = btrfs_alloc_path();
802         path->reada = 1;
803         key.objectid = bytenr;
804         key.offset = num_bytes;
805         btrfs_set_key_type(&key, BTRFS_EXTENT_ITEM_KEY);
806         ret = btrfs_search_slot(trans, root->fs_info->extent_root, &key, path,
807                                 0, 0);
808         if (ret < 0)
809                 goto out;
810         if (ret != 0) {
811                 btrfs_print_leaf(root, path->nodes[0]);
812                 printk("failed to find block number %Lu\n", bytenr);
813                 BUG();
814         }
815         l = path->nodes[0];
816         item = btrfs_item_ptr(l, path->slots[0], struct btrfs_extent_item);
817         *refs = btrfs_extent_refs(l, item);
818 out:
819         btrfs_free_path(path);
820         return 0;
821 }
822
823
824 static int get_reference_status(struct btrfs_root *root, u64 bytenr,
825                                 u64 parent_gen, u64 ref_objectid,
826                                 u64 *min_generation, u32 *ref_count)
827 {
828         struct btrfs_root *extent_root = root->fs_info->extent_root;
829         struct btrfs_path *path;
830         struct extent_buffer *leaf;
831         struct btrfs_extent_ref *ref_item;
832         struct btrfs_key key;
833         struct btrfs_key found_key;
834         u64 root_objectid = root->root_key.objectid;
835         u64 ref_generation;
836         u32 nritems;
837         int ret;
838
839         key.objectid = bytenr;
840         key.offset = 0;
841         key.type = BTRFS_EXTENT_ITEM_KEY;
842
843         path = btrfs_alloc_path();
844         mutex_lock(&root->fs_info->alloc_mutex);
845         ret = btrfs_search_slot(NULL, extent_root, &key, path, 0, 0);
846         if (ret < 0)
847                 goto out;
848         BUG_ON(ret == 0);
849
850         leaf = path->nodes[0];
851         btrfs_item_key_to_cpu(leaf, &found_key, path->slots[0]);
852
853         if (found_key.objectid != bytenr ||
854             found_key.type != BTRFS_EXTENT_ITEM_KEY) {
855                 ret = 1;
856                 goto out;
857         }
858
859         *ref_count = 0;
860         *min_generation = (u64)-1;
861
862         while (1) {
863                 leaf = path->nodes[0];
864                 nritems = btrfs_header_nritems(leaf);
865                 if (path->slots[0] >= nritems) {
866                         ret = btrfs_next_leaf(extent_root, path);
867                         if (ret < 0)
868                                 goto out;
869                         if (ret == 0)
870                                 continue;
871                         break;
872                 }
873                 btrfs_item_key_to_cpu(leaf, &found_key, path->slots[0]);
874                 if (found_key.objectid != bytenr)
875                         break;
876
877                 if (found_key.type != BTRFS_EXTENT_REF_KEY) {
878                         path->slots[0]++;
879                         continue;
880                 }
881
882                 ref_item = btrfs_item_ptr(leaf, path->slots[0],
883                                           struct btrfs_extent_ref);
884                 ref_generation = btrfs_ref_generation(leaf, ref_item);
885                 /*
886                  * For (parent_gen > 0 && parent_gen > ref_gen):
887                  *
888                  * we reach here through the oldest root, therefore
889                  * all other reference from same snapshot should have
890                  * a larger generation.
891                  */
892                 if ((root_objectid != btrfs_ref_root(leaf, ref_item)) ||
893                     (parent_gen > 0 && parent_gen > ref_generation) ||
894                     (ref_objectid >= BTRFS_FIRST_FREE_OBJECTID &&
895                      ref_objectid != btrfs_ref_objectid(leaf, ref_item))) {
896                         if (ref_count)
897                                 *ref_count = 2;
898                         break;
899                 }
900
901                 *ref_count = 1;
902                 if (*min_generation > ref_generation)
903                         *min_generation = ref_generation;
904
905                 path->slots[0]++;
906         }
907         ret = 0;
908 out:
909         mutex_unlock(&root->fs_info->alloc_mutex);
910         btrfs_free_path(path);
911         return ret;
912 }
913
914 int btrfs_cross_ref_exists(struct btrfs_trans_handle *trans,
915                            struct btrfs_root *root,
916                            struct btrfs_key *key, u64 bytenr)
917 {
918         struct btrfs_root *old_root;
919         struct btrfs_path *path = NULL;
920         struct extent_buffer *eb;
921         struct btrfs_file_extent_item *item;
922         u64 ref_generation;
923         u64 min_generation;
924         u64 extent_start;
925         u32 ref_count;
926         int level;
927         int ret;
928
929         BUG_ON(trans == NULL);
930         BUG_ON(key->type != BTRFS_EXTENT_DATA_KEY);
931         ret = get_reference_status(root, bytenr, 0, key->objectid,
932                                    &min_generation, &ref_count);
933         if (ret)
934                 return ret;
935
936         if (ref_count != 1)
937                 return 1;
938
939         old_root = root->dirty_root->root;
940         ref_generation = old_root->root_key.offset;
941
942         /* all references are created in running transaction */
943         if (min_generation > ref_generation) {
944                 ret = 0;
945                 goto out;
946         }
947
948         path = btrfs_alloc_path();
949         if (!path) {
950                 ret = -ENOMEM;
951                 goto out;
952         }
953
954         path->skip_locking = 1;
955         /* if no item found, the extent is referenced by other snapshot */
956         ret = btrfs_search_slot(NULL, old_root, key, path, 0, 0);
957         if (ret)
958                 goto out;
959
960         eb = path->nodes[0];
961         item = btrfs_item_ptr(eb, path->slots[0],
962                               struct btrfs_file_extent_item);
963         if (btrfs_file_extent_type(eb, item) != BTRFS_FILE_EXTENT_REG ||
964             btrfs_file_extent_disk_bytenr(eb, item) != bytenr) {
965                 ret = 1;
966                 goto out;
967         }
968
969         for (level = BTRFS_MAX_LEVEL - 1; level >= -1; level--) {
970                 if (level >= 0) {
971                         eb = path->nodes[level];
972                         if (!eb)
973                                 continue;
974                         extent_start = eb->start;
975                 } else
976                         extent_start = bytenr;
977
978                 ret = get_reference_status(root, extent_start, ref_generation,
979                                            0, &min_generation, &ref_count);
980                 if (ret)
981                         goto out;
982
983                 if (ref_count != 1) {
984                         ret = 1;
985                         goto out;
986                 }
987                 if (level >= 0)
988                         ref_generation = btrfs_header_generation(eb);
989         }
990         ret = 0;
991 out:
992         if (path)
993                 btrfs_free_path(path);
994         return ret;
995 }
996
997 int btrfs_inc_ref(struct btrfs_trans_handle *trans, struct btrfs_root *root,
998                   struct extent_buffer *buf, int cache_ref)
999 {
1000         u64 bytenr;
1001         u32 nritems;
1002         struct btrfs_key key;
1003         struct btrfs_file_extent_item *fi;
1004         int i;
1005         int level;
1006         int ret;
1007         int faili;
1008         int nr_file_extents = 0;
1009
1010         if (!root->ref_cows)
1011                 return 0;
1012
1013         level = btrfs_header_level(buf);
1014         nritems = btrfs_header_nritems(buf);
1015         for (i = 0; i < nritems; i++) {
1016                 cond_resched();
1017                 if (level == 0) {
1018                         u64 disk_bytenr;
1019                         btrfs_item_key_to_cpu(buf, &key, i);
1020                         if (btrfs_key_type(&key) != BTRFS_EXTENT_DATA_KEY)
1021                                 continue;
1022                         fi = btrfs_item_ptr(buf, i,
1023                                             struct btrfs_file_extent_item);
1024                         if (btrfs_file_extent_type(buf, fi) ==
1025                             BTRFS_FILE_EXTENT_INLINE)
1026                                 continue;
1027                         disk_bytenr = btrfs_file_extent_disk_bytenr(buf, fi);
1028                         if (disk_bytenr == 0)
1029                                 continue;
1030
1031                         if (buf != root->commit_root)
1032                                 nr_file_extents++;
1033
1034                         mutex_lock(&root->fs_info->alloc_mutex);
1035                         ret = __btrfs_inc_extent_ref(trans, root, disk_bytenr,
1036                                     btrfs_file_extent_disk_num_bytes(buf, fi),
1037                                     root->root_key.objectid, trans->transid,
1038                                     key.objectid, key.offset);
1039                         mutex_unlock(&root->fs_info->alloc_mutex);
1040                         if (ret) {
1041                                 faili = i;
1042                                 WARN_ON(1);
1043                                 goto fail;
1044                         }
1045                 } else {
1046                         bytenr = btrfs_node_blockptr(buf, i);
1047                         btrfs_node_key_to_cpu(buf, &key, i);
1048
1049                         mutex_lock(&root->fs_info->alloc_mutex);
1050                         ret = __btrfs_inc_extent_ref(trans, root, bytenr,
1051                                            btrfs_level_size(root, level - 1),
1052                                            root->root_key.objectid,
1053                                            trans->transid,
1054                                            level - 1, key.objectid);
1055                         mutex_unlock(&root->fs_info->alloc_mutex);
1056                         if (ret) {
1057                                 faili = i;
1058                                 WARN_ON(1);
1059                                 goto fail;
1060                         }
1061                 }
1062         }
1063         /* cache orignal leaf block's references */
1064         if (level == 0 && cache_ref && buf != root->commit_root) {
1065                 struct btrfs_leaf_ref *ref;
1066                 struct btrfs_extent_info *info;
1067
1068                 ref = btrfs_alloc_leaf_ref(root, nr_file_extents);
1069                 if (!ref) {
1070                         WARN_ON(1);
1071                         goto out;
1072                 }
1073
1074                 ref->root_gen = root->root_key.offset;
1075                 ref->bytenr = buf->start;
1076                 ref->owner = btrfs_header_owner(buf);
1077                 ref->generation = btrfs_header_generation(buf);
1078                 ref->nritems = nr_file_extents;
1079                 info = ref->extents;
1080
1081                 for (i = 0; nr_file_extents > 0 && i < nritems; i++) {
1082                         u64 disk_bytenr;
1083                         btrfs_item_key_to_cpu(buf, &key, i);
1084                         if (btrfs_key_type(&key) != BTRFS_EXTENT_DATA_KEY)
1085                                 continue;
1086                         fi = btrfs_item_ptr(buf, i,
1087                                             struct btrfs_file_extent_item);
1088                         if (btrfs_file_extent_type(buf, fi) ==
1089                             BTRFS_FILE_EXTENT_INLINE)
1090                                 continue;
1091                         disk_bytenr = btrfs_file_extent_disk_bytenr(buf, fi);
1092                         if (disk_bytenr == 0)
1093                                 continue;
1094
1095                         info->bytenr = disk_bytenr;
1096                         info->num_bytes =
1097                                 btrfs_file_extent_disk_num_bytes(buf, fi);
1098                         info->objectid = key.objectid;
1099                         info->offset = key.offset;
1100                         info++;
1101                 }
1102
1103                 BUG_ON(!root->ref_tree);
1104                 ret = btrfs_add_leaf_ref(root, ref);
1105                 WARN_ON(ret);
1106                 btrfs_free_leaf_ref(root, ref);
1107         }
1108 out:
1109         return 0;
1110 fail:
1111         WARN_ON(1);
1112 #if 0
1113         for (i =0; i < faili; i++) {
1114                 if (level == 0) {
1115                         u64 disk_bytenr;
1116                         btrfs_item_key_to_cpu(buf, &key, i);
1117                         if (btrfs_key_type(&key) != BTRFS_EXTENT_DATA_KEY)
1118                                 continue;
1119                         fi = btrfs_item_ptr(buf, i,
1120                                             struct btrfs_file_extent_item);
1121                         if (btrfs_file_extent_type(buf, fi) ==
1122                             BTRFS_FILE_EXTENT_INLINE)
1123                                 continue;
1124                         disk_bytenr = btrfs_file_extent_disk_bytenr(buf, fi);
1125                         if (disk_bytenr == 0)
1126                                 continue;
1127                         err = btrfs_free_extent(trans, root, disk_bytenr,
1128                                     btrfs_file_extent_disk_num_bytes(buf,
1129                                                                       fi), 0);
1130                         BUG_ON(err);
1131                 } else {
1132                         bytenr = btrfs_node_blockptr(buf, i);
1133                         err = btrfs_free_extent(trans, root, bytenr,
1134                                         btrfs_level_size(root, level - 1), 0);
1135                         BUG_ON(err);
1136                 }
1137         }
1138 #endif
1139         return ret;
1140 }
1141
1142 static int write_one_cache_group(struct btrfs_trans_handle *trans,
1143                                  struct btrfs_root *root,
1144                                  struct btrfs_path *path,
1145                                  struct btrfs_block_group_cache *cache)
1146 {
1147         int ret;
1148         int pending_ret;
1149         struct btrfs_root *extent_root = root->fs_info->extent_root;
1150         unsigned long bi;
1151         struct extent_buffer *leaf;
1152
1153         ret = btrfs_search_slot(trans, extent_root, &cache->key, path, 0, 1);
1154         if (ret < 0)
1155                 goto fail;
1156         BUG_ON(ret);
1157
1158         leaf = path->nodes[0];
1159         bi = btrfs_item_ptr_offset(leaf, path->slots[0]);
1160         write_extent_buffer(leaf, &cache->item, bi, sizeof(cache->item));
1161         btrfs_mark_buffer_dirty(leaf);
1162         btrfs_release_path(extent_root, path);
1163 fail:
1164         finish_current_insert(trans, extent_root);
1165         pending_ret = del_pending_extents(trans, extent_root);
1166         if (ret)
1167                 return ret;
1168         if (pending_ret)
1169                 return pending_ret;
1170         return 0;
1171
1172 }
1173
1174 int btrfs_write_dirty_block_groups(struct btrfs_trans_handle *trans,
1175                                    struct btrfs_root *root)
1176 {
1177         struct extent_io_tree *block_group_cache;
1178         struct btrfs_block_group_cache *cache;
1179         int ret;
1180         int err = 0;
1181         int werr = 0;
1182         struct btrfs_path *path;
1183         u64 last = 0;
1184         u64 start;
1185         u64 end;
1186         u64 ptr;
1187
1188         block_group_cache = &root->fs_info->block_group_cache;
1189         path = btrfs_alloc_path();
1190         if (!path)
1191                 return -ENOMEM;
1192
1193         mutex_lock(&root->fs_info->alloc_mutex);
1194         while(1) {
1195                 ret = find_first_extent_bit(block_group_cache, last,
1196                                             &start, &end, BLOCK_GROUP_DIRTY);
1197                 if (ret)
1198                         break;
1199
1200                 last = end + 1;
1201                 ret = get_state_private(block_group_cache, start, &ptr);
1202                 if (ret)
1203                         break;
1204                 cache = (struct btrfs_block_group_cache *)(unsigned long)ptr;
1205                 err = write_one_cache_group(trans, root,
1206                                             path, cache);
1207                 /*
1208                  * if we fail to write the cache group, we want
1209                  * to keep it marked dirty in hopes that a later
1210                  * write will work
1211                  */
1212                 if (err) {
1213                         werr = err;
1214                         continue;
1215                 }
1216                 clear_extent_bits(block_group_cache, start, end,
1217                                   BLOCK_GROUP_DIRTY, GFP_NOFS);
1218         }
1219         btrfs_free_path(path);
1220         mutex_unlock(&root->fs_info->alloc_mutex);
1221         return werr;
1222 }
1223
1224 static struct btrfs_space_info *__find_space_info(struct btrfs_fs_info *info,
1225                                                   u64 flags)
1226 {
1227         struct list_head *head = &info->space_info;
1228         struct list_head *cur;
1229         struct btrfs_space_info *found;
1230         list_for_each(cur, head) {
1231                 found = list_entry(cur, struct btrfs_space_info, list);
1232                 if (found->flags == flags)
1233                         return found;
1234         }
1235         return NULL;
1236
1237 }
1238
1239 static int update_space_info(struct btrfs_fs_info *info, u64 flags,
1240                              u64 total_bytes, u64 bytes_used,
1241                              struct btrfs_space_info **space_info)
1242 {
1243         struct btrfs_space_info *found;
1244
1245         found = __find_space_info(info, flags);
1246         if (found) {
1247                 found->total_bytes += total_bytes;
1248                 found->bytes_used += bytes_used;
1249                 found->full = 0;
1250                 *space_info = found;
1251                 return 0;
1252         }
1253         found = kmalloc(sizeof(*found), GFP_NOFS);
1254         if (!found)
1255                 return -ENOMEM;
1256
1257         list_add(&found->list, &info->space_info);
1258         found->flags = flags;
1259         found->total_bytes = total_bytes;
1260         found->bytes_used = bytes_used;
1261         found->bytes_pinned = 0;
1262         found->full = 0;
1263         found->force_alloc = 0;
1264         *space_info = found;
1265         return 0;
1266 }
1267
1268 static void set_avail_alloc_bits(struct btrfs_fs_info *fs_info, u64 flags)
1269 {
1270         u64 extra_flags = flags & (BTRFS_BLOCK_GROUP_RAID0 |
1271                                    BTRFS_BLOCK_GROUP_RAID1 |
1272                                    BTRFS_BLOCK_GROUP_RAID10 |
1273                                    BTRFS_BLOCK_GROUP_DUP);
1274         if (extra_flags) {
1275                 if (flags & BTRFS_BLOCK_GROUP_DATA)
1276                         fs_info->avail_data_alloc_bits |= extra_flags;
1277                 if (flags & BTRFS_BLOCK_GROUP_METADATA)
1278                         fs_info->avail_metadata_alloc_bits |= extra_flags;
1279                 if (flags & BTRFS_BLOCK_GROUP_SYSTEM)
1280                         fs_info->avail_system_alloc_bits |= extra_flags;
1281         }
1282 }
1283
1284 static u64 reduce_alloc_profile(struct btrfs_root *root, u64 flags)
1285 {
1286         u64 num_devices = root->fs_info->fs_devices->num_devices;
1287
1288         if (num_devices == 1)
1289                 flags &= ~(BTRFS_BLOCK_GROUP_RAID1 | BTRFS_BLOCK_GROUP_RAID0);
1290         if (num_devices < 4)
1291                 flags &= ~BTRFS_BLOCK_GROUP_RAID10;
1292
1293         if ((flags & BTRFS_BLOCK_GROUP_DUP) &&
1294             (flags & (BTRFS_BLOCK_GROUP_RAID1 |
1295                       BTRFS_BLOCK_GROUP_RAID10))) {
1296                 flags &= ~BTRFS_BLOCK_GROUP_DUP;
1297         }
1298
1299         if ((flags & BTRFS_BLOCK_GROUP_RAID1) &&
1300             (flags & BTRFS_BLOCK_GROUP_RAID10)) {
1301                 flags &= ~BTRFS_BLOCK_GROUP_RAID1;
1302         }
1303
1304         if ((flags & BTRFS_BLOCK_GROUP_RAID0) &&
1305             ((flags & BTRFS_BLOCK_GROUP_RAID1) |
1306              (flags & BTRFS_BLOCK_GROUP_RAID10) |
1307              (flags & BTRFS_BLOCK_GROUP_DUP)))
1308                 flags &= ~BTRFS_BLOCK_GROUP_RAID0;
1309         return flags;
1310 }
1311
1312 static int do_chunk_alloc(struct btrfs_trans_handle *trans,
1313                           struct btrfs_root *extent_root, u64 alloc_bytes,
1314                           u64 flags, int force)
1315 {
1316         struct btrfs_space_info *space_info;
1317         u64 thresh;
1318         u64 start;
1319         u64 num_bytes;
1320         int ret;
1321
1322         flags = reduce_alloc_profile(extent_root, flags);
1323
1324         space_info = __find_space_info(extent_root->fs_info, flags);
1325         if (!space_info) {
1326                 ret = update_space_info(extent_root->fs_info, flags,
1327                                         0, 0, &space_info);
1328                 BUG_ON(ret);
1329         }
1330         BUG_ON(!space_info);
1331
1332         if (space_info->force_alloc) {
1333                 force = 1;
1334                 space_info->force_alloc = 0;
1335         }
1336         if (space_info->full)
1337                 goto out;
1338
1339         thresh = div_factor(space_info->total_bytes, 6);
1340         if (!force &&
1341            (space_info->bytes_used + space_info->bytes_pinned + alloc_bytes) <
1342             thresh)
1343                 goto out;
1344
1345         mutex_lock(&extent_root->fs_info->chunk_mutex);
1346         ret = btrfs_alloc_chunk(trans, extent_root, &start, &num_bytes, flags);
1347         if (ret == -ENOSPC) {
1348 printk("space info full %Lu\n", flags);
1349                 space_info->full = 1;
1350                 goto out_unlock;
1351         }
1352         BUG_ON(ret);
1353
1354         ret = btrfs_make_block_group(trans, extent_root, 0, flags,
1355                      BTRFS_FIRST_CHUNK_TREE_OBJECTID, start, num_bytes);
1356         BUG_ON(ret);
1357 out_unlock:
1358         mutex_unlock(&extent_root->fs_info->chunk_mutex);
1359 out:
1360         return 0;
1361 }
1362
1363 static int update_block_group(struct btrfs_trans_handle *trans,
1364                               struct btrfs_root *root,
1365                               u64 bytenr, u64 num_bytes, int alloc,
1366                               int mark_free)
1367 {
1368         struct btrfs_block_group_cache *cache;
1369         struct btrfs_fs_info *info = root->fs_info;
1370         u64 total = num_bytes;
1371         u64 old_val;
1372         u64 byte_in_group;
1373         u64 start;
1374         u64 end;
1375
1376         WARN_ON(!mutex_is_locked(&root->fs_info->alloc_mutex));
1377         while(total) {
1378                 cache = btrfs_lookup_block_group(info, bytenr);
1379                 if (!cache) {
1380                         return -1;
1381                 }
1382                 byte_in_group = bytenr - cache->key.objectid;
1383                 WARN_ON(byte_in_group > cache->key.offset);
1384                 start = cache->key.objectid;
1385                 end = start + cache->key.offset - 1;
1386                 set_extent_bits(&info->block_group_cache, start, end,
1387                                 BLOCK_GROUP_DIRTY, GFP_NOFS);
1388
1389                 spin_lock(&cache->lock);
1390                 old_val = btrfs_block_group_used(&cache->item);
1391                 num_bytes = min(total, cache->key.offset - byte_in_group);
1392                 if (alloc) {
1393                         old_val += num_bytes;
1394                         cache->space_info->bytes_used += num_bytes;
1395                         btrfs_set_block_group_used(&cache->item, old_val);
1396                         spin_unlock(&cache->lock);
1397                 } else {
1398                         old_val -= num_bytes;
1399                         cache->space_info->bytes_used -= num_bytes;
1400                         btrfs_set_block_group_used(&cache->item, old_val);
1401                         spin_unlock(&cache->lock);
1402                         if (mark_free) {
1403                                 set_extent_dirty(&info->free_space_cache,
1404                                                  bytenr, bytenr + num_bytes - 1,
1405                                                  GFP_NOFS);
1406                         }
1407                 }
1408                 total -= num_bytes;
1409                 bytenr += num_bytes;
1410         }
1411         return 0;
1412 }
1413
1414 static u64 first_logical_byte(struct btrfs_root *root, u64 search_start)
1415 {
1416         u64 start;
1417         u64 end;
1418         int ret;
1419         ret = find_first_extent_bit(&root->fs_info->block_group_cache,
1420                                     search_start, &start, &end,
1421                                     BLOCK_GROUP_DATA | BLOCK_GROUP_METADATA |
1422                                     BLOCK_GROUP_SYSTEM);
1423         if (ret)
1424                 return 0;
1425         return start;
1426 }
1427
1428
1429 int btrfs_update_pinned_extents(struct btrfs_root *root,
1430                                 u64 bytenr, u64 num, int pin)
1431 {
1432         u64 len;
1433         struct btrfs_block_group_cache *cache;
1434         struct btrfs_fs_info *fs_info = root->fs_info;
1435
1436         WARN_ON(!mutex_is_locked(&root->fs_info->alloc_mutex));
1437         if (pin) {
1438                 set_extent_dirty(&fs_info->pinned_extents,
1439                                 bytenr, bytenr + num - 1, GFP_NOFS);
1440         } else {
1441                 clear_extent_dirty(&fs_info->pinned_extents,
1442                                 bytenr, bytenr + num - 1, GFP_NOFS);
1443         }
1444         while (num > 0) {
1445                 cache = btrfs_lookup_block_group(fs_info, bytenr);
1446                 if (!cache) {
1447                         u64 first = first_logical_byte(root, bytenr);
1448                         WARN_ON(first < bytenr);
1449                         len = min(first - bytenr, num);
1450                 } else {
1451                         len = min(num, cache->key.offset -
1452                                   (bytenr - cache->key.objectid));
1453                 }
1454                 if (pin) {
1455                         if (cache) {
1456                                 spin_lock(&cache->lock);
1457                                 cache->pinned += len;
1458                                 cache->space_info->bytes_pinned += len;
1459                                 spin_unlock(&cache->lock);
1460                         }
1461                         fs_info->total_pinned += len;
1462                 } else {
1463                         if (cache) {
1464                                 spin_lock(&cache->lock);
1465                                 cache->pinned -= len;
1466                                 cache->space_info->bytes_pinned -= len;
1467                                 spin_unlock(&cache->lock);
1468                         }
1469                         fs_info->total_pinned -= len;
1470                 }
1471                 bytenr += len;
1472                 num -= len;
1473         }
1474         return 0;
1475 }
1476
1477 int btrfs_copy_pinned(struct btrfs_root *root, struct extent_io_tree *copy)
1478 {
1479         u64 last = 0;
1480         u64 start;
1481         u64 end;
1482         struct extent_io_tree *pinned_extents = &root->fs_info->pinned_extents;
1483         int ret;
1484
1485         while(1) {
1486                 ret = find_first_extent_bit(pinned_extents, last,
1487                                             &start, &end, EXTENT_DIRTY);
1488                 if (ret)
1489                         break;
1490                 set_extent_dirty(copy, start, end, GFP_NOFS);
1491                 last = end + 1;
1492         }
1493         return 0;
1494 }
1495
1496 int btrfs_finish_extent_commit(struct btrfs_trans_handle *trans,
1497                                struct btrfs_root *root,
1498                                struct extent_io_tree *unpin)
1499 {
1500         u64 start;
1501         u64 end;
1502         int ret;
1503         struct extent_io_tree *free_space_cache;
1504         free_space_cache = &root->fs_info->free_space_cache;
1505
1506         mutex_lock(&root->fs_info->alloc_mutex);
1507         while(1) {
1508                 ret = find_first_extent_bit(unpin, 0, &start, &end,
1509                                             EXTENT_DIRTY);
1510                 if (ret)
1511                         break;
1512                 btrfs_update_pinned_extents(root, start, end + 1 - start, 0);
1513                 clear_extent_dirty(unpin, start, end, GFP_NOFS);
1514                 set_extent_dirty(free_space_cache, start, end, GFP_NOFS);
1515                 if (need_resched()) {
1516                         mutex_unlock(&root->fs_info->alloc_mutex);
1517                         cond_resched();
1518                         mutex_lock(&root->fs_info->alloc_mutex);
1519                 }
1520         }
1521         mutex_unlock(&root->fs_info->alloc_mutex);
1522         return 0;
1523 }
1524
1525 static int finish_current_insert(struct btrfs_trans_handle *trans,
1526                                  struct btrfs_root *extent_root)
1527 {
1528         u64 start;
1529         u64 end;
1530         struct btrfs_fs_info *info = extent_root->fs_info;
1531         struct extent_buffer *eb;
1532         struct btrfs_path *path;
1533         struct btrfs_key ins;
1534         struct btrfs_disk_key first;
1535         struct btrfs_extent_item extent_item;
1536         int ret;
1537         int level;
1538         int err = 0;
1539
1540         WARN_ON(!mutex_is_locked(&extent_root->fs_info->alloc_mutex));
1541         btrfs_set_stack_extent_refs(&extent_item, 1);
1542         btrfs_set_key_type(&ins, BTRFS_EXTENT_ITEM_KEY);
1543         path = btrfs_alloc_path();
1544
1545         while(1) {
1546                 ret = find_first_extent_bit(&info->extent_ins, 0, &start,
1547                                             &end, EXTENT_LOCKED);
1548                 if (ret)
1549                         break;
1550
1551                 ins.objectid = start;
1552                 ins.offset = end + 1 - start;
1553                 err = btrfs_insert_item(trans, extent_root, &ins,
1554                                         &extent_item, sizeof(extent_item));
1555                 clear_extent_bits(&info->extent_ins, start, end, EXTENT_LOCKED,
1556                                   GFP_NOFS);
1557
1558                 eb = btrfs_find_create_tree_block(extent_root, ins.objectid,
1559                                            ins.offset);
1560
1561                 if (!btrfs_buffer_uptodate(eb, trans->transid))
1562                         btrfs_read_buffer(eb, trans->transid);
1563
1564                 btrfs_tree_lock(eb);
1565                 level = btrfs_header_level(eb);
1566                 if (level == 0) {
1567                         btrfs_item_key(eb, &first, 0);
1568                 } else {
1569                         btrfs_node_key(eb, &first, 0);
1570                 }
1571                 btrfs_tree_unlock(eb);
1572                 free_extent_buffer(eb);
1573                 /*
1574                  * the first key is just a hint, so the race we've created
1575                  * against reading it is fine
1576                  */
1577                 err = btrfs_insert_extent_backref(trans, extent_root, path,
1578                                           start, extent_root->root_key.objectid,
1579                                           0, level,
1580                                           btrfs_disk_key_objectid(&first));
1581                 BUG_ON(err);
1582                 if (need_resched()) {
1583                         mutex_unlock(&extent_root->fs_info->alloc_mutex);
1584                         cond_resched();
1585                         mutex_lock(&extent_root->fs_info->alloc_mutex);
1586                 }
1587         }
1588         btrfs_free_path(path);
1589         return 0;
1590 }
1591
1592 static int pin_down_bytes(struct btrfs_root *root, u64 bytenr, u32 num_bytes,
1593                           int is_data, int pending)
1594 {
1595         int err = 0;
1596
1597         WARN_ON(!mutex_is_locked(&root->fs_info->alloc_mutex));
1598         if (!pending) {
1599                 struct extent_buffer *buf;
1600
1601                 if (is_data)
1602                         goto pinit;
1603
1604                 buf = btrfs_find_tree_block(root, bytenr, num_bytes);
1605                 if (buf) {
1606                         /* we can reuse a block if it hasn't been written
1607                          * and it is from this transaction.  We can't
1608                          * reuse anything from the tree log root because
1609                          * it has tiny sub-transactions.
1610                          */
1611                         if (btrfs_buffer_uptodate(buf, 0) &&
1612                             btrfs_try_tree_lock(buf)) {
1613                                 u64 transid =
1614                                     root->fs_info->running_transaction->transid;
1615                                 u64 header_transid =
1616                                         btrfs_header_generation(buf);
1617                                 if (btrfs_header_owner(buf) !=
1618                                     BTRFS_TREE_LOG_OBJECTID &&
1619                                     header_transid == transid &&
1620                                     !btrfs_header_flag(buf,
1621                                                BTRFS_HEADER_FLAG_WRITTEN)) {
1622                                         clean_tree_block(NULL, root, buf);
1623                                         btrfs_tree_unlock(buf);
1624                                         free_extent_buffer(buf);
1625                                         return 1;
1626                                 }
1627                                 btrfs_tree_unlock(buf);
1628                         }
1629                         free_extent_buffer(buf);
1630                 }
1631 pinit:
1632                 btrfs_update_pinned_extents(root, bytenr, num_bytes, 1);
1633         } else {
1634                 set_extent_bits(&root->fs_info->pending_del,
1635                                 bytenr, bytenr + num_bytes - 1,
1636                                 EXTENT_LOCKED, GFP_NOFS);
1637         }
1638         BUG_ON(err < 0);
1639         return 0;
1640 }
1641
1642 /*
1643  * remove an extent from the root, returns 0 on success
1644  */
1645 static int __free_extent(struct btrfs_trans_handle *trans, struct btrfs_root
1646                          *root, u64 bytenr, u64 num_bytes,
1647                          u64 root_objectid, u64 ref_generation,
1648                          u64 owner_objectid, u64 owner_offset, int pin,
1649                          int mark_free)
1650 {
1651         struct btrfs_path *path;
1652         struct btrfs_key key;
1653         struct btrfs_fs_info *info = root->fs_info;
1654         struct btrfs_root *extent_root = info->extent_root;
1655         struct extent_buffer *leaf;
1656         int ret;
1657         int extent_slot = 0;
1658         int found_extent = 0;
1659         int num_to_del = 1;
1660         struct btrfs_extent_item *ei;
1661         u32 refs;
1662
1663         WARN_ON(!mutex_is_locked(&root->fs_info->alloc_mutex));
1664         key.objectid = bytenr;
1665         btrfs_set_key_type(&key, BTRFS_EXTENT_ITEM_KEY);
1666         key.offset = num_bytes;
1667         path = btrfs_alloc_path();
1668         if (!path)
1669                 return -ENOMEM;
1670
1671         path->reada = 1;
1672         ret = lookup_extent_backref(trans, extent_root, path,
1673                                     bytenr, root_objectid,
1674                                     ref_generation,
1675                                     owner_objectid, owner_offset, 1);
1676         if (ret == 0) {
1677                 struct btrfs_key found_key;
1678                 extent_slot = path->slots[0];
1679                 while(extent_slot > 0) {
1680                         extent_slot--;
1681                         btrfs_item_key_to_cpu(path->nodes[0], &found_key,
1682                                               extent_slot);
1683                         if (found_key.objectid != bytenr)
1684                                 break;
1685                         if (found_key.type == BTRFS_EXTENT_ITEM_KEY &&
1686                             found_key.offset == num_bytes) {
1687                                 found_extent = 1;
1688                                 break;
1689                         }
1690                         if (path->slots[0] - extent_slot > 5)
1691                                 break;
1692                 }
1693                 if (!found_extent)
1694                         ret = btrfs_del_item(trans, extent_root, path);
1695         } else {
1696                 btrfs_print_leaf(extent_root, path->nodes[0]);
1697                 WARN_ON(1);
1698                 printk("Unable to find ref byte nr %Lu root %Lu "
1699                        " gen %Lu owner %Lu offset %Lu\n", bytenr,
1700                        root_objectid, ref_generation, owner_objectid,
1701                        owner_offset);
1702         }
1703         if (!found_extent) {
1704                 btrfs_release_path(extent_root, path);
1705                 ret = btrfs_search_slot(trans, extent_root, &key, path, -1, 1);
1706                 if (ret < 0)
1707                         return ret;
1708                 BUG_ON(ret);
1709                 extent_slot = path->slots[0];
1710         }
1711
1712         leaf = path->nodes[0];
1713         ei = btrfs_item_ptr(leaf, extent_slot,
1714                             struct btrfs_extent_item);
1715         refs = btrfs_extent_refs(leaf, ei);
1716         BUG_ON(refs == 0);
1717         refs -= 1;
1718         btrfs_set_extent_refs(leaf, ei, refs);
1719
1720         btrfs_mark_buffer_dirty(leaf);
1721
1722         if (refs == 0 && found_extent && path->slots[0] == extent_slot + 1) {
1723                 /* if the back ref and the extent are next to each other
1724                  * they get deleted below in one shot
1725                  */
1726                 path->slots[0] = extent_slot;
1727                 num_to_del = 2;
1728         } else if (found_extent) {
1729                 /* otherwise delete the extent back ref */
1730                 ret = btrfs_del_item(trans, extent_root, path);
1731                 BUG_ON(ret);
1732                 /* if refs are 0, we need to setup the path for deletion */
1733                 if (refs == 0) {
1734                         btrfs_release_path(extent_root, path);
1735                         ret = btrfs_search_slot(trans, extent_root, &key, path,
1736                                                 -1, 1);
1737                         if (ret < 0)
1738                                 return ret;
1739                         BUG_ON(ret);
1740                 }
1741         }
1742
1743         if (refs == 0) {
1744                 u64 super_used;
1745                 u64 root_used;
1746 #ifdef BIO_RW_DISCARD
1747                 u64 map_length = num_bytes;
1748                 struct btrfs_multi_bio *multi = NULL;
1749 #endif
1750
1751                 if (pin) {
1752                         ret = pin_down_bytes(root, bytenr, num_bytes,
1753                              owner_objectid >= BTRFS_FIRST_FREE_OBJECTID, 0);
1754                         if (ret > 0)
1755                                 mark_free = 1;
1756                         BUG_ON(ret < 0);
1757                 }
1758
1759                 /* block accounting for super block */
1760                 spin_lock_irq(&info->delalloc_lock);
1761                 super_used = btrfs_super_bytes_used(&info->super_copy);
1762                 btrfs_set_super_bytes_used(&info->super_copy,
1763                                            super_used - num_bytes);
1764                 spin_unlock_irq(&info->delalloc_lock);
1765
1766                 /* block accounting for root item */
1767                 root_used = btrfs_root_used(&root->root_item);
1768                 btrfs_set_root_used(&root->root_item,
1769                                            root_used - num_bytes);
1770                 ret = btrfs_del_items(trans, extent_root, path, path->slots[0],
1771                                       num_to_del);
1772                 if (ret) {
1773                         return ret;
1774                 }
1775                 ret = update_block_group(trans, root, bytenr, num_bytes, 0,
1776                                          mark_free);
1777                 BUG_ON(ret);
1778
1779 #ifdef BIO_RW_DISCARD
1780                 /* Tell the block device(s) that the sectors can be discarded */
1781                 ret = btrfs_map_block(&root->fs_info->mapping_tree, READ,
1782                                       bytenr, &map_length, &multi, 0);
1783                 if (!ret) {
1784                         struct btrfs_bio_stripe *stripe = multi->stripes;
1785                         int i;
1786
1787                         if (map_length > num_bytes)
1788                                 map_length = num_bytes;
1789
1790                         for (i = 0; i < multi->num_stripes; i++, stripe++) {
1791                                 blkdev_issue_discard(stripe->dev->bdev,
1792                                                      stripe->physical >> 9,
1793                                                      map_length >> 9);
1794                         }
1795                         kfree(multi);
1796                 }
1797 #endif
1798         }
1799         btrfs_free_path(path);
1800         finish_current_insert(trans, extent_root);
1801         return ret;
1802 }
1803
1804 /*
1805  * find all the blocks marked as pending in the radix tree and remove
1806  * them from the extent map
1807  */
1808 static int del_pending_extents(struct btrfs_trans_handle *trans, struct
1809                                btrfs_root *extent_root)
1810 {
1811         int ret;
1812         int err = 0;
1813         u64 start;
1814         u64 end;
1815         struct extent_io_tree *pending_del;
1816         struct extent_io_tree *pinned_extents;
1817
1818         WARN_ON(!mutex_is_locked(&extent_root->fs_info->alloc_mutex));
1819         pending_del = &extent_root->fs_info->pending_del;
1820         pinned_extents = &extent_root->fs_info->pinned_extents;
1821
1822         while(1) {
1823                 ret = find_first_extent_bit(pending_del, 0, &start, &end,
1824                                             EXTENT_LOCKED);
1825                 if (ret)
1826                         break;
1827                 clear_extent_bits(pending_del, start, end, EXTENT_LOCKED,
1828                                   GFP_NOFS);
1829                 if (!test_range_bit(&extent_root->fs_info->extent_ins,
1830                                     start, end, EXTENT_LOCKED, 0)) {
1831                         btrfs_update_pinned_extents(extent_root, start,
1832                                               end + 1 - start, 1);
1833                         ret = __free_extent(trans, extent_root,
1834                                              start, end + 1 - start,
1835                                              extent_root->root_key.objectid,
1836                                              0, 0, 0, 0, 0);
1837                 } else {
1838                         clear_extent_bits(&extent_root->fs_info->extent_ins,
1839                                           start, end, EXTENT_LOCKED, GFP_NOFS);
1840                 }
1841                 if (ret)
1842                         err = ret;
1843
1844                 if (need_resched()) {
1845                         mutex_unlock(&extent_root->fs_info->alloc_mutex);
1846                         cond_resched();
1847                         mutex_lock(&extent_root->fs_info->alloc_mutex);
1848                 }
1849         }
1850         return err;
1851 }
1852
1853 /*
1854  * remove an extent from the root, returns 0 on success
1855  */
1856 static int __btrfs_free_extent(struct btrfs_trans_handle *trans,
1857                                struct btrfs_root *root, u64 bytenr,
1858                                u64 num_bytes, u64 root_objectid,
1859                                u64 ref_generation, u64 owner_objectid,
1860                                u64 owner_offset, int pin)
1861 {
1862         struct btrfs_root *extent_root = root->fs_info->extent_root;
1863         int pending_ret;
1864         int ret;
1865
1866         WARN_ON(num_bytes < root->sectorsize);
1867         if (!root->ref_cows)
1868                 ref_generation = 0;
1869
1870         if (root == extent_root) {
1871                 pin_down_bytes(root, bytenr, num_bytes, 0, 1);
1872                 return 0;
1873         }
1874         /* if metadata always pin */
1875         if (owner_objectid < BTRFS_FIRST_FREE_OBJECTID) {
1876                 if (root->root_key.objectid == BTRFS_TREE_LOG_OBJECTID) {
1877                         /* btrfs_free_reserved_extent */
1878                         set_extent_dirty(&root->fs_info->free_space_cache,
1879                                  bytenr, bytenr + num_bytes - 1, GFP_NOFS);
1880                         return 0;
1881                 }
1882                 pin = 1;
1883         }
1884
1885         /* if data pin when any transaction has committed this */
1886         if (ref_generation != trans->transid)
1887                 pin = 1;
1888
1889         ret = __free_extent(trans, root, bytenr, num_bytes, root_objectid,
1890                             ref_generation, owner_objectid, owner_offset,
1891                             pin, pin == 0);
1892
1893         finish_current_insert(trans, root->fs_info->extent_root);
1894         pending_ret = del_pending_extents(trans, root->fs_info->extent_root);
1895         return ret ? ret : pending_ret;
1896 }
1897
1898 int btrfs_free_extent(struct btrfs_trans_handle *trans,
1899                       struct btrfs_root *root, u64 bytenr,
1900                       u64 num_bytes, u64 root_objectid,
1901                       u64 ref_generation, u64 owner_objectid,
1902                       u64 owner_offset, int pin)
1903 {
1904         int ret;
1905
1906         maybe_lock_mutex(root);
1907         ret = __btrfs_free_extent(trans, root, bytenr, num_bytes,
1908                                   root_objectid, ref_generation,
1909                                   owner_objectid, owner_offset, pin);
1910         maybe_unlock_mutex(root);
1911         return ret;
1912 }
1913
1914 static u64 stripe_align(struct btrfs_root *root, u64 val)
1915 {
1916         u64 mask = ((u64)root->stripesize - 1);
1917         u64 ret = (val + mask) & ~mask;
1918         return ret;
1919 }
1920
1921 /*
1922  * walks the btree of allocated extents and find a hole of a given size.
1923  * The key ins is changed to record the hole:
1924  * ins->objectid == block start
1925  * ins->flags = BTRFS_EXTENT_ITEM_KEY
1926  * ins->offset == number of blocks
1927  * Any available blocks before search_start are skipped.
1928  */
1929 static int noinline find_free_extent(struct btrfs_trans_handle *trans,
1930                                      struct btrfs_root *orig_root,
1931                                      u64 num_bytes, u64 empty_size,
1932                                      u64 search_start, u64 search_end,
1933                                      u64 hint_byte, struct btrfs_key *ins,
1934                                      u64 exclude_start, u64 exclude_nr,
1935                                      int data)
1936 {
1937         int ret;
1938         u64 orig_search_start;
1939         struct btrfs_root * root = orig_root->fs_info->extent_root;
1940         struct btrfs_fs_info *info = root->fs_info;
1941         u64 total_needed = num_bytes;
1942         u64 *last_ptr = NULL;
1943         struct btrfs_block_group_cache *block_group;
1944         int full_scan = 0;
1945         int wrapped = 0;
1946         int chunk_alloc_done = 0;
1947         int empty_cluster = 2 * 1024 * 1024;
1948         int allowed_chunk_alloc = 0;
1949
1950         WARN_ON(num_bytes < root->sectorsize);
1951         btrfs_set_key_type(ins, BTRFS_EXTENT_ITEM_KEY);
1952
1953         if (orig_root->ref_cows || empty_size)
1954                 allowed_chunk_alloc = 1;
1955
1956         if (data & BTRFS_BLOCK_GROUP_METADATA) {
1957                 last_ptr = &root->fs_info->last_alloc;
1958                 empty_cluster = 256 * 1024;
1959         }
1960
1961         if ((data & BTRFS_BLOCK_GROUP_DATA) && btrfs_test_opt(root, SSD)) {
1962                 last_ptr = &root->fs_info->last_data_alloc;
1963         }
1964         if (root->root_key.objectid == BTRFS_TREE_LOG_OBJECTID) {
1965                 last_ptr = &root->fs_info->last_log_alloc;
1966                 if (!last_ptr == 0 && root->fs_info->last_alloc) {
1967                         *last_ptr = root->fs_info->last_alloc + empty_cluster;
1968                 }
1969         }
1970
1971         if (last_ptr) {
1972                 if (*last_ptr)
1973                         hint_byte = *last_ptr;
1974                 else {
1975                         empty_size += empty_cluster;
1976                 }
1977         }
1978
1979         search_start = max(search_start, first_logical_byte(root, 0));
1980         orig_search_start = search_start;
1981
1982         if (search_end == (u64)-1)
1983                 search_end = btrfs_super_total_bytes(&info->super_copy);
1984
1985         if (hint_byte) {
1986                 block_group = btrfs_lookup_first_block_group(info, hint_byte);
1987                 if (!block_group)
1988                         hint_byte = search_start;
1989                 block_group = btrfs_find_block_group(root, block_group,
1990                                                      hint_byte, data, 1);
1991                 if (last_ptr && *last_ptr == 0 && block_group)
1992                         hint_byte = block_group->key.objectid;
1993         } else {
1994                 block_group = btrfs_find_block_group(root,
1995                                                      trans->block_group,
1996                                                      search_start, data, 1);
1997         }
1998         search_start = max(search_start, hint_byte);
1999
2000         total_needed += empty_size;
2001
2002 check_failed:
2003         if (!block_group) {
2004                 block_group = btrfs_lookup_first_block_group(info,
2005                                                              search_start);
2006                 if (!block_group)
2007                         block_group = btrfs_lookup_first_block_group(info,
2008                                                        orig_search_start);
2009         }
2010         if (full_scan && !chunk_alloc_done) {
2011                 if (allowed_chunk_alloc) {
2012                         do_chunk_alloc(trans, root,
2013                                      num_bytes + 2 * 1024 * 1024, data, 1);
2014                         allowed_chunk_alloc = 0;
2015                 } else if (block_group && block_group_bits(block_group, data)) {
2016                         block_group->space_info->force_alloc = 1;
2017                 }
2018                 chunk_alloc_done = 1;
2019         }
2020         ret = find_search_start(root, &block_group, &search_start,
2021                                 total_needed, data);
2022         if (ret == -ENOSPC && last_ptr && *last_ptr) {
2023                 *last_ptr = 0;
2024                 block_group = btrfs_lookup_first_block_group(info,
2025                                                              orig_search_start);
2026                 search_start = orig_search_start;
2027                 ret = find_search_start(root, &block_group, &search_start,
2028                                         total_needed, data);
2029         }
2030         if (ret == -ENOSPC)
2031                 goto enospc;
2032         if (ret)
2033                 goto error;
2034
2035         if (last_ptr && *last_ptr && search_start != *last_ptr) {
2036                 *last_ptr = 0;
2037                 if (!empty_size) {
2038                         empty_size += empty_cluster;
2039                         total_needed += empty_size;
2040                 }
2041                 block_group = btrfs_lookup_first_block_group(info,
2042                                                        orig_search_start);
2043                 search_start = orig_search_start;
2044                 ret = find_search_start(root, &block_group,
2045                                         &search_start, total_needed, data);
2046                 if (ret == -ENOSPC)
2047                         goto enospc;
2048                 if (ret)
2049                         goto error;
2050         }
2051
2052         search_start = stripe_align(root, search_start);
2053         ins->objectid = search_start;
2054         ins->offset = num_bytes;
2055
2056         if (ins->objectid + num_bytes >= search_end)
2057                 goto enospc;
2058
2059         if (ins->objectid + num_bytes >
2060             block_group->key.objectid + block_group->key.offset) {
2061                 search_start = block_group->key.objectid +
2062                         block_group->key.offset;
2063                 goto new_group;
2064         }
2065
2066         if (test_range_bit(&info->extent_ins, ins->objectid,
2067                            ins->objectid + num_bytes -1, EXTENT_LOCKED, 0)) {
2068                 search_start = ins->objectid + num_bytes;
2069                 goto new_group;
2070         }
2071
2072         if (test_range_bit(&info->pinned_extents, ins->objectid,
2073                            ins->objectid + num_bytes -1, EXTENT_DIRTY, 0)) {
2074                 search_start = ins->objectid + num_bytes;
2075                 goto new_group;
2076         }
2077
2078         if (exclude_nr > 0 && (ins->objectid + num_bytes > exclude_start &&
2079             ins->objectid < exclude_start + exclude_nr)) {
2080                 search_start = exclude_start + exclude_nr;
2081                 goto new_group;
2082         }
2083
2084         if (!(data & BTRFS_BLOCK_GROUP_DATA)) {
2085                 block_group = btrfs_lookup_block_group(info, ins->objectid);
2086                 if (block_group)
2087                         trans->block_group = block_group;
2088         }
2089         ins->offset = num_bytes;
2090         if (last_ptr) {
2091                 *last_ptr = ins->objectid + ins->offset;
2092                 if (*last_ptr ==
2093                     btrfs_super_total_bytes(&root->fs_info->super_copy)) {
2094                         *last_ptr = 0;
2095                 }
2096         }
2097         return 0;
2098
2099 new_group:
2100         if (search_start + num_bytes >= search_end) {
2101 enospc:
2102                 search_start = orig_search_start;
2103                 if (full_scan) {
2104                         ret = -ENOSPC;
2105                         goto error;
2106                 }
2107                 if (wrapped) {
2108                         if (!full_scan)
2109                                 total_needed -= empty_size;
2110                         full_scan = 1;
2111                 } else
2112                         wrapped = 1;
2113         }
2114         block_group = btrfs_lookup_first_block_group(info, search_start);
2115         cond_resched();
2116         block_group = btrfs_find_block_group(root, block_group,
2117                                              search_start, data, 0);
2118         goto check_failed;
2119
2120 error:
2121         return ret;
2122 }
2123
2124 static int __btrfs_reserve_extent(struct btrfs_trans_handle *trans,
2125                                   struct btrfs_root *root,
2126                                   u64 num_bytes, u64 min_alloc_size,
2127                                   u64 empty_size, u64 hint_byte,
2128                                   u64 search_end, struct btrfs_key *ins,
2129                                   u64 data)
2130 {
2131         int ret;
2132         u64 search_start = 0;
2133         u64 alloc_profile;
2134         struct btrfs_fs_info *info = root->fs_info;
2135
2136         if (data) {
2137                 alloc_profile = info->avail_data_alloc_bits &
2138                                 info->data_alloc_profile;
2139                 data = BTRFS_BLOCK_GROUP_DATA | alloc_profile;
2140         } else if (root == root->fs_info->chunk_root) {
2141                 alloc_profile = info->avail_system_alloc_bits &
2142                                 info->system_alloc_profile;
2143                 data = BTRFS_BLOCK_GROUP_SYSTEM | alloc_profile;
2144         } else {
2145                 alloc_profile = info->avail_metadata_alloc_bits &
2146                                 info->metadata_alloc_profile;
2147                 data = BTRFS_BLOCK_GROUP_METADATA | alloc_profile;
2148         }
2149 again:
2150         data = reduce_alloc_profile(root, data);
2151         /*
2152          * the only place that sets empty_size is btrfs_realloc_node, which
2153          * is not called recursively on allocations
2154          */
2155         if (empty_size || root->ref_cows) {
2156                 if (!(data & BTRFS_BLOCK_GROUP_METADATA)) {
2157                         ret = do_chunk_alloc(trans, root->fs_info->extent_root,
2158                                      2 * 1024 * 1024,
2159                                      BTRFS_BLOCK_GROUP_METADATA |
2160                                      (info->metadata_alloc_profile &
2161                                       info->avail_metadata_alloc_bits), 0);
2162                         BUG_ON(ret);
2163                 }
2164                 ret = do_chunk_alloc(trans, root->fs_info->extent_root,
2165                                      num_bytes + 2 * 1024 * 1024, data, 0);
2166                 BUG_ON(ret);
2167         }
2168
2169         WARN_ON(num_bytes < root->sectorsize);
2170         ret = find_free_extent(trans, root, num_bytes, empty_size,
2171                                search_start, search_end, hint_byte, ins,
2172                                trans->alloc_exclude_start,
2173                                trans->alloc_exclude_nr, data);
2174
2175         if (ret == -ENOSPC && num_bytes > min_alloc_size) {
2176                 num_bytes = num_bytes >> 1;
2177                 num_bytes = max(num_bytes, min_alloc_size);
2178                 do_chunk_alloc(trans, root->fs_info->extent_root,
2179                                num_bytes, data, 1);
2180                 goto again;
2181         }
2182         if (ret) {
2183                 printk("allocation failed flags %Lu\n", data);
2184                 BUG();
2185         }
2186         clear_extent_dirty(&root->fs_info->free_space_cache,
2187                            ins->objectid, ins->objectid + ins->offset - 1,
2188                            GFP_NOFS);
2189         return 0;
2190 }
2191
2192 int btrfs_free_reserved_extent(struct btrfs_root *root, u64 start, u64 len)
2193 {
2194         maybe_lock_mutex(root);
2195         set_extent_dirty(&root->fs_info->free_space_cache,
2196                          start, start + len - 1, GFP_NOFS);
2197         maybe_unlock_mutex(root);
2198         return 0;
2199 }
2200
2201 int btrfs_reserve_extent(struct btrfs_trans_handle *trans,
2202                                   struct btrfs_root *root,
2203                                   u64 num_bytes, u64 min_alloc_size,
2204                                   u64 empty_size, u64 hint_byte,
2205                                   u64 search_end, struct btrfs_key *ins,
2206                                   u64 data)
2207 {
2208         int ret;
2209         maybe_lock_mutex(root);
2210         ret = __btrfs_reserve_extent(trans, root, num_bytes, min_alloc_size,
2211                                      empty_size, hint_byte, search_end, ins,
2212                                      data);
2213         maybe_unlock_mutex(root);
2214         return ret;
2215 }
2216
2217 static int __btrfs_alloc_reserved_extent(struct btrfs_trans_handle *trans,
2218                                          struct btrfs_root *root,
2219                                          u64 root_objectid, u64 ref_generation,
2220                                          u64 owner, u64 owner_offset,
2221                                          struct btrfs_key *ins)
2222 {
2223         int ret;
2224         int pending_ret;
2225         u64 super_used;
2226         u64 root_used;
2227         u64 num_bytes = ins->offset;
2228         u32 sizes[2];
2229         struct btrfs_fs_info *info = root->fs_info;
2230         struct btrfs_root *extent_root = info->extent_root;
2231         struct btrfs_extent_item *extent_item;
2232         struct btrfs_extent_ref *ref;
2233         struct btrfs_path *path;
2234         struct btrfs_key keys[2];
2235
2236         /* block accounting for super block */
2237         spin_lock_irq(&info->delalloc_lock);
2238         super_used = btrfs_super_bytes_used(&info->super_copy);
2239         btrfs_set_super_bytes_used(&info->super_copy, super_used + num_bytes);
2240         spin_unlock_irq(&info->delalloc_lock);
2241
2242         /* block accounting for root item */
2243         root_used = btrfs_root_used(&root->root_item);
2244         btrfs_set_root_used(&root->root_item, root_used + num_bytes);
2245
2246         if (root == extent_root) {
2247                 set_extent_bits(&root->fs_info->extent_ins, ins->objectid,
2248                                 ins->objectid + ins->offset - 1,
2249                                 EXTENT_LOCKED, GFP_NOFS);
2250                 goto update_block;
2251         }
2252
2253         memcpy(&keys[0], ins, sizeof(*ins));
2254         keys[1].offset = hash_extent_ref(root_objectid, ref_generation,
2255                                          owner, owner_offset);
2256         keys[1].objectid = ins->objectid;
2257         keys[1].type = BTRFS_EXTENT_REF_KEY;
2258         sizes[0] = sizeof(*extent_item);
2259         sizes[1] = sizeof(*ref);
2260
2261         path = btrfs_alloc_path();
2262         BUG_ON(!path);
2263
2264         ret = btrfs_insert_empty_items(trans, extent_root, path, keys,
2265                                        sizes, 2);
2266
2267         BUG_ON(ret);
2268         extent_item = btrfs_item_ptr(path->nodes[0], path->slots[0],
2269                                      struct btrfs_extent_item);
2270         btrfs_set_extent_refs(path->nodes[0], extent_item, 1);
2271         ref = btrfs_item_ptr(path->nodes[0], path->slots[0] + 1,
2272                              struct btrfs_extent_ref);
2273
2274         btrfs_set_ref_root(path->nodes[0], ref, root_objectid);
2275         btrfs_set_ref_generation(path->nodes[0], ref, ref_generation);
2276         btrfs_set_ref_objectid(path->nodes[0], ref, owner);
2277         btrfs_set_ref_offset(path->nodes[0], ref, owner_offset);
2278
2279         btrfs_mark_buffer_dirty(path->nodes[0]);
2280
2281         trans->alloc_exclude_start = 0;
2282         trans->alloc_exclude_nr = 0;
2283         btrfs_free_path(path);
2284         finish_current_insert(trans, extent_root);
2285         pending_ret = del_pending_extents(trans, extent_root);
2286
2287         if (ret)
2288                 goto out;
2289         if (pending_ret) {
2290                 ret = pending_ret;
2291                 goto out;
2292         }
2293
2294 update_block:
2295         ret = update_block_group(trans, root, ins->objectid, ins->offset, 1, 0);
2296         if (ret) {
2297                 printk("update block group failed for %Lu %Lu\n",
2298                        ins->objectid, ins->offset);
2299                 BUG();
2300         }
2301 out:
2302         return ret;
2303 }
2304
2305 int btrfs_alloc_reserved_extent(struct btrfs_trans_handle *trans,
2306                                 struct btrfs_root *root,
2307                                 u64 root_objectid, u64 ref_generation,
2308                                 u64 owner, u64 owner_offset,
2309                                 struct btrfs_key *ins)
2310 {
2311         int ret;
2312         maybe_lock_mutex(root);
2313         ret = __btrfs_alloc_reserved_extent(trans, root, root_objectid,
2314                                             ref_generation, owner,
2315                                             owner_offset, ins);
2316         maybe_unlock_mutex(root);
2317         return ret;
2318 }
2319
2320 /*
2321  * this is used by the tree logging recovery code.  It records that
2322  * an extent has been allocated and makes sure to clear the free
2323  * space cache bits as well
2324  */
2325 int btrfs_alloc_logged_extent(struct btrfs_trans_handle *trans,
2326                                 struct btrfs_root *root,
2327                                 u64 root_objectid, u64 ref_generation,
2328                                 u64 owner, u64 owner_offset,
2329                                 struct btrfs_key *ins)
2330 {
2331         int ret;
2332         struct btrfs_block_group_cache *block_group;
2333
2334         maybe_lock_mutex(root);
2335         block_group = btrfs_lookup_block_group(root->fs_info, ins->objectid);
2336         cache_block_group(root, block_group);
2337
2338         clear_extent_dirty(&root->fs_info->free_space_cache,
2339                            ins->objectid, ins->objectid + ins->offset - 1,
2340                            GFP_NOFS);
2341         ret = __btrfs_alloc_reserved_extent(trans, root, root_objectid,
2342                                             ref_generation, owner,
2343                                             owner_offset, ins);
2344         maybe_unlock_mutex(root);
2345         return ret;
2346 }
2347
2348 /*
2349  * finds a free extent and does all the dirty work required for allocation
2350  * returns the key for the extent through ins, and a tree buffer for
2351  * the first block of the extent through buf.
2352  *
2353  * returns 0 if everything worked, non-zero otherwise.
2354  */
2355 int btrfs_alloc_extent(struct btrfs_trans_handle *trans,
2356                        struct btrfs_root *root,
2357                        u64 num_bytes, u64 min_alloc_size,
2358                        u64 root_objectid, u64 ref_generation,
2359                        u64 owner, u64 owner_offset,
2360                        u64 empty_size, u64 hint_byte,
2361                        u64 search_end, struct btrfs_key *ins, u64 data)
2362 {
2363         int ret;
2364
2365         maybe_lock_mutex(root);
2366
2367         ret = __btrfs_reserve_extent(trans, root, num_bytes,
2368                                      min_alloc_size, empty_size, hint_byte,
2369                                      search_end, ins, data);
2370         BUG_ON(ret);
2371         if (root_objectid != BTRFS_TREE_LOG_OBJECTID) {
2372                 ret = __btrfs_alloc_reserved_extent(trans, root, root_objectid,
2373                                                     ref_generation, owner,
2374                                                     owner_offset, ins);
2375                 BUG_ON(ret);
2376
2377         }
2378         maybe_unlock_mutex(root);
2379         return ret;
2380 }
2381
2382 struct extent_buffer *btrfs_init_new_buffer(struct btrfs_trans_handle *trans,
2383                                             struct btrfs_root *root,
2384                                             u64 bytenr, u32 blocksize)
2385 {
2386         struct extent_buffer *buf;
2387
2388         buf = btrfs_find_create_tree_block(root, bytenr, blocksize);
2389         if (!buf)
2390                 return ERR_PTR(-ENOMEM);
2391         btrfs_set_header_generation(buf, trans->transid);
2392         btrfs_tree_lock(buf);
2393         clean_tree_block(trans, root, buf);
2394         btrfs_set_buffer_uptodate(buf);
2395         if (root->root_key.objectid == BTRFS_TREE_LOG_OBJECTID) {
2396                 set_extent_dirty(&root->dirty_log_pages, buf->start,
2397                          buf->start + buf->len - 1, GFP_NOFS);
2398         } else {
2399                 set_extent_dirty(&trans->transaction->dirty_pages, buf->start,
2400                          buf->start + buf->len - 1, GFP_NOFS);
2401         }
2402         trans->blocks_used++;
2403         return buf;
2404 }
2405
2406 /*
2407  * helper function to allocate a block for a given tree
2408  * returns the tree buffer or NULL.
2409  */
2410 struct extent_buffer *btrfs_alloc_free_block(struct btrfs_trans_handle *trans,
2411                                              struct btrfs_root *root,
2412                                              u32 blocksize,
2413                                              u64 root_objectid,
2414                                              u64 ref_generation,
2415                                              u64 first_objectid,
2416                                              int level,
2417                                              u64 hint,
2418                                              u64 empty_size)
2419 {
2420         struct btrfs_key ins;
2421         int ret;
2422         struct extent_buffer *buf;
2423
2424         ret = btrfs_alloc_extent(trans, root, blocksize, blocksize,
2425                                  root_objectid, ref_generation,
2426                                  level, first_objectid, empty_size, hint,
2427                                  (u64)-1, &ins, 0);
2428         if (ret) {
2429                 BUG_ON(ret > 0);
2430                 return ERR_PTR(ret);
2431         }
2432
2433         buf = btrfs_init_new_buffer(trans, root, ins.objectid, blocksize);
2434         return buf;
2435 }
2436
2437 int btrfs_drop_leaf_ref(struct btrfs_trans_handle *trans,
2438                         struct btrfs_root *root, struct extent_buffer *leaf)
2439 {
2440         u64 leaf_owner;
2441         u64 leaf_generation;
2442         struct btrfs_key key;
2443         struct btrfs_file_extent_item *fi;
2444         int i;
2445         int nritems;
2446         int ret;
2447
2448         BUG_ON(!btrfs_is_leaf(leaf));
2449         nritems = btrfs_header_nritems(leaf);
2450         leaf_owner = btrfs_header_owner(leaf);
2451         leaf_generation = btrfs_header_generation(leaf);
2452
2453         for (i = 0; i < nritems; i++) {
2454                 u64 disk_bytenr;
2455                 cond_resched();
2456
2457                 btrfs_item_key_to_cpu(leaf, &key, i);
2458                 if (btrfs_key_type(&key) != BTRFS_EXTENT_DATA_KEY)
2459                         continue;
2460                 fi = btrfs_item_ptr(leaf, i, struct btrfs_file_extent_item);
2461                 if (btrfs_file_extent_type(leaf, fi) ==
2462                     BTRFS_FILE_EXTENT_INLINE)
2463                         continue;
2464                 /*
2465                  * FIXME make sure to insert a trans record that
2466                  * repeats the snapshot del on crash
2467                  */
2468                 disk_bytenr = btrfs_file_extent_disk_bytenr(leaf, fi);
2469                 if (disk_bytenr == 0)
2470                         continue;
2471
2472                 mutex_lock(&root->fs_info->alloc_mutex);
2473                 ret = __btrfs_free_extent(trans, root, disk_bytenr,
2474                                 btrfs_file_extent_disk_num_bytes(leaf, fi),
2475                                 leaf_owner, leaf_generation,
2476                                 key.objectid, key.offset, 0);
2477                 mutex_unlock(&root->fs_info->alloc_mutex);
2478
2479                 atomic_inc(&root->fs_info->throttle_gen);
2480                 wake_up(&root->fs_info->transaction_throttle);
2481                 cond_resched();
2482
2483                 BUG_ON(ret);
2484         }
2485         return 0;
2486 }
2487
2488 static int noinline cache_drop_leaf_ref(struct btrfs_trans_handle *trans,
2489                                         struct btrfs_root *root,
2490                                         struct btrfs_leaf_ref *ref)
2491 {
2492         int i;
2493         int ret;
2494         struct btrfs_extent_info *info = ref->extents;
2495
2496         for (i = 0; i < ref->nritems; i++) {
2497                 mutex_lock(&root->fs_info->alloc_mutex);
2498                 ret = __btrfs_free_extent(trans, root,
2499                                         info->bytenr, info->num_bytes,
2500                                         ref->owner, ref->generation,
2501                                         info->objectid, info->offset, 0);
2502                 mutex_unlock(&root->fs_info->alloc_mutex);
2503
2504                 atomic_inc(&root->fs_info->throttle_gen);
2505                 wake_up(&root->fs_info->transaction_throttle);
2506                 cond_resched();
2507
2508                 BUG_ON(ret);
2509                 info++;
2510         }
2511
2512         return 0;
2513 }
2514
2515 int drop_snap_lookup_refcount(struct btrfs_root *root, u64 start, u64 len,
2516                               u32 *refs)
2517 {
2518         int ret;
2519
2520         ret = lookup_extent_ref(NULL, root, start, len, refs);
2521         BUG_ON(ret);
2522
2523 #if 0 // some debugging code in case we see problems here
2524         /* if the refs count is one, it won't get increased again.  But
2525          * if the ref count is > 1, someone may be decreasing it at
2526          * the same time we are.
2527          */
2528         if (*refs != 1) {
2529                 struct extent_buffer *eb = NULL;
2530                 eb = btrfs_find_create_tree_block(root, start, len);
2531                 if (eb)
2532                         btrfs_tree_lock(eb);
2533
2534                 mutex_lock(&root->fs_info->alloc_mutex);
2535                 ret = lookup_extent_ref(NULL, root, start, len, refs);
2536                 BUG_ON(ret);
2537                 mutex_unlock(&root->fs_info->alloc_mutex);
2538
2539                 if (eb) {
2540                         btrfs_tree_unlock(eb);
2541                         free_extent_buffer(eb);
2542                 }
2543                 if (*refs == 1) {
2544                         printk("block %llu went down to one during drop_snap\n",
2545                                (unsigned long long)start);
2546                 }
2547
2548         }
2549 #endif
2550
2551         cond_resched();
2552         return ret;
2553 }
2554
2555 /*
2556  * helper function for drop_snapshot, this walks down the tree dropping ref
2557  * counts as it goes.
2558  */
2559 static int noinline walk_down_tree(struct btrfs_trans_handle *trans,
2560                                    struct btrfs_root *root,
2561                                    struct btrfs_path *path, int *level)
2562 {
2563         u64 root_owner;
2564         u64 root_gen;
2565         u64 bytenr;
2566         u64 ptr_gen;
2567         struct extent_buffer *next;
2568         struct extent_buffer *cur;
2569         struct extent_buffer *parent;
2570         struct btrfs_leaf_ref *ref;
2571         u32 blocksize;
2572         int ret;
2573         u32 refs;
2574
2575         WARN_ON(*level < 0);
2576         WARN_ON(*level >= BTRFS_MAX_LEVEL);
2577         ret = drop_snap_lookup_refcount(root, path->nodes[*level]->start,
2578                                 path->nodes[*level]->len, &refs);
2579         BUG_ON(ret);
2580         if (refs > 1)
2581                 goto out;
2582
2583         /*
2584          * walk down to the last node level and free all the leaves
2585          */
2586         while(*level >= 0) {
2587                 WARN_ON(*level < 0);
2588                 WARN_ON(*level >= BTRFS_MAX_LEVEL);
2589                 cur = path->nodes[*level];
2590
2591                 if (btrfs_header_level(cur) != *level)
2592                         WARN_ON(1);
2593
2594                 if (path->slots[*level] >=
2595                     btrfs_header_nritems(cur))
2596                         break;
2597                 if (*level == 0) {
2598                         ret = btrfs_drop_leaf_ref(trans, root, cur);
2599                         BUG_ON(ret);
2600                         break;
2601                 }
2602                 bytenr = btrfs_node_blockptr(cur, path->slots[*level]);
2603                 ptr_gen = btrfs_node_ptr_generation(cur, path->slots[*level]);
2604                 blocksize = btrfs_level_size(root, *level - 1);
2605
2606                 ret = drop_snap_lookup_refcount(root, bytenr, blocksize, &refs);
2607                 BUG_ON(ret);
2608                 if (refs != 1) {
2609                         parent = path->nodes[*level];
2610                         root_owner = btrfs_header_owner(parent);
2611                         root_gen = btrfs_header_generation(parent);
2612                         path->slots[*level]++;
2613
2614                         mutex_lock(&root->fs_info->alloc_mutex);
2615                         ret = __btrfs_free_extent(trans, root, bytenr,
2616                                                 blocksize, root_owner,
2617                                                 root_gen, 0, 0, 1);
2618                         BUG_ON(ret);
2619                         mutex_unlock(&root->fs_info->alloc_mutex);
2620
2621                         atomic_inc(&root->fs_info->throttle_gen);
2622                         wake_up(&root->fs_info->transaction_throttle);
2623                         cond_resched();
2624
2625                         continue;
2626                 }
2627                 /*
2628                  * at this point, we have a single ref, and since the
2629                  * only place referencing this extent is a dead root
2630                  * the reference count should never go higher.
2631                  * So, we don't need to check it again
2632                  */
2633                 if (*level == 1) {
2634                         struct btrfs_key key;
2635                         btrfs_node_key_to_cpu(cur, &key, path->slots[*level]);
2636                         ref = btrfs_lookup_leaf_ref(root, bytenr);
2637                         if (ref) {
2638                                 ret = cache_drop_leaf_ref(trans, root, ref);
2639                                 BUG_ON(ret);
2640                                 btrfs_remove_leaf_ref(root, ref);
2641                                 btrfs_free_leaf_ref(root, ref);
2642                                 *level = 0;
2643                                 break;
2644                         }
2645                         if (printk_ratelimit())
2646                                 printk("leaf ref miss for bytenr %llu\n",
2647                                        (unsigned long long)bytenr);
2648                 }
2649                 next = btrfs_find_tree_block(root, bytenr, blocksize);
2650                 if (!next || !btrfs_buffer_uptodate(next, ptr_gen)) {
2651                         free_extent_buffer(next);
2652
2653                         next = read_tree_block(root, bytenr, blocksize,
2654                                                ptr_gen);
2655                         cond_resched();
2656 #if 0
2657                         /*
2658                          * this is a debugging check and can go away
2659                          * the ref should never go all the way down to 1
2660                          * at this point
2661                          */
2662                         ret = lookup_extent_ref(NULL, root, bytenr, blocksize,
2663                                                 &refs);
2664                         BUG_ON(ret);
2665                         WARN_ON(refs != 1);
2666 #endif
2667                 }
2668                 WARN_ON(*level <= 0);
2669                 if (path->nodes[*level-1])
2670                         free_extent_buffer(path->nodes[*level-1]);
2671                 path->nodes[*level-1] = next;
2672                 *level = btrfs_header_level(next);
2673                 path->slots[*level] = 0;
2674                 cond_resched();
2675         }
2676 out:
2677         WARN_ON(*level < 0);
2678         WARN_ON(*level >= BTRFS_MAX_LEVEL);
2679
2680         if (path->nodes[*level] == root->node) {
2681                 parent = path->nodes[*level];
2682                 bytenr = path->nodes[*level]->start;
2683         } else {
2684                 parent = path->nodes[*level + 1];
2685                 bytenr = btrfs_node_blockptr(parent, path->slots[*level + 1]);
2686         }
2687
2688         blocksize = btrfs_level_size(root, *level);
2689         root_owner = btrfs_header_owner(parent);
2690         root_gen = btrfs_header_generation(parent);
2691
2692         mutex_lock(&root->fs_info->alloc_mutex);
2693         ret = __btrfs_free_extent(trans, root, bytenr, blocksize,
2694                                   root_owner, root_gen, 0, 0, 1);
2695         free_extent_buffer(path->nodes[*level]);
2696         path->nodes[*level] = NULL;
2697         *level += 1;
2698         BUG_ON(ret);
2699         mutex_unlock(&root->fs_info->alloc_mutex);
2700
2701         cond_resched();
2702         return 0;
2703 }
2704
2705 /*
2706  * helper for dropping snapshots.  This walks back up the tree in the path
2707  * to find the first node higher up where we haven't yet gone through
2708  * all the slots
2709  */
2710 static int noinline walk_up_tree(struct btrfs_trans_handle *trans,
2711                                  struct btrfs_root *root,
2712                                  struct btrfs_path *path, int *level)
2713 {
2714         u64 root_owner;
2715         u64 root_gen;
2716         struct btrfs_root_item *root_item = &root->root_item;
2717         int i;
2718         int slot;
2719         int ret;
2720
2721         for(i = *level; i < BTRFS_MAX_LEVEL - 1 && path->nodes[i]; i++) {
2722                 slot = path->slots[i];
2723                 if (slot < btrfs_header_nritems(path->nodes[i]) - 1) {
2724                         struct extent_buffer *node;
2725                         struct btrfs_disk_key disk_key;
2726                         node = path->nodes[i];
2727                         path->slots[i]++;
2728                         *level = i;
2729                         WARN_ON(*level == 0);
2730                         btrfs_node_key(node, &disk_key, path->slots[i]);
2731                         memcpy(&root_item->drop_progress,
2732                                &disk_key, sizeof(disk_key));
2733                         root_item->drop_level = i;
2734                         return 0;
2735                 } else {
2736                         if (path->nodes[*level] == root->node) {
2737                                 root_owner = root->root_key.objectid;
2738                                 root_gen =
2739                                    btrfs_header_generation(path->nodes[*level]);
2740                         } else {
2741                                 struct extent_buffer *node;
2742                                 node = path->nodes[*level + 1];
2743                                 root_owner = btrfs_header_owner(node);
2744                                 root_gen = btrfs_header_generation(node);
2745                         }
2746                         ret = btrfs_free_extent(trans, root,
2747                                                 path->nodes[*level]->start,
2748                                                 path->nodes[*level]->len,
2749                                                 root_owner, root_gen, 0, 0, 1);
2750                         BUG_ON(ret);
2751                         free_extent_buffer(path->nodes[*level]);
2752                         path->nodes[*level] = NULL;
2753                         *level = i + 1;
2754                 }
2755         }
2756         return 1;
2757 }
2758
2759 /*
2760  * drop the reference count on the tree rooted at 'snap'.  This traverses
2761  * the tree freeing any blocks that have a ref count of zero after being
2762  * decremented.
2763  */
2764 int btrfs_drop_snapshot(struct btrfs_trans_handle *trans, struct btrfs_root
2765                         *root)
2766 {
2767         int ret = 0;
2768         int wret;
2769         int level;
2770         struct btrfs_path *path;
2771         int i;
2772         int orig_level;
2773         struct btrfs_root_item *root_item = &root->root_item;
2774
2775         WARN_ON(!mutex_is_locked(&root->fs_info->drop_mutex));
2776         path = btrfs_alloc_path();
2777         BUG_ON(!path);
2778
2779         level = btrfs_header_level(root->node);
2780         orig_level = level;
2781         if (btrfs_disk_key_objectid(&root_item->drop_progress) == 0) {
2782                 path->nodes[level] = root->node;
2783                 extent_buffer_get(root->node);
2784                 path->slots[level] = 0;
2785         } else {
2786                 struct btrfs_key key;
2787                 struct btrfs_disk_key found_key;
2788                 struct extent_buffer *node;
2789
2790                 btrfs_disk_key_to_cpu(&key, &root_item->drop_progress);
2791                 level = root_item->drop_level;
2792                 path->lowest_level = level;
2793                 wret = btrfs_search_slot(NULL, root, &key, path, 0, 0);
2794                 if (wret < 0) {
2795                         ret = wret;
2796                         goto out;
2797                 }
2798                 node = path->nodes[level];
2799                 btrfs_node_key(node, &found_key, path->slots[level]);
2800                 WARN_ON(memcmp(&found_key, &root_item->drop_progress,
2801                                sizeof(found_key)));
2802                 /*
2803                  * unlock our path, this is safe because only this
2804                  * function is allowed to delete this snapshot
2805                  */
2806                 for (i = 0; i < BTRFS_MAX_LEVEL; i++) {
2807                         if (path->nodes[i] && path->locks[i]) {
2808                                 path->locks[i] = 0;
2809                                 btrfs_tree_unlock(path->nodes[i]);
2810                         }
2811                 }
2812         }
2813         while(1) {
2814                 wret = walk_down_tree(trans, root, path, &level);
2815                 if (wret > 0)
2816                         break;
2817                 if (wret < 0)
2818                         ret = wret;
2819
2820                 wret = walk_up_tree(trans, root, path, &level);
2821                 if (wret > 0)
2822                         break;
2823                 if (wret < 0)
2824                         ret = wret;
2825                 if (trans->transaction->in_commit) {
2826                         ret = -EAGAIN;
2827                         break;
2828                 }
2829                 atomic_inc(&root->fs_info->throttle_gen);
2830                 wake_up(&root->fs_info->transaction_throttle);
2831         }
2832         for (i = 0; i <= orig_level; i++) {
2833                 if (path->nodes[i]) {
2834                         free_extent_buffer(path->nodes[i]);
2835                         path->nodes[i] = NULL;
2836                 }
2837         }
2838 out:
2839         btrfs_free_path(path);
2840         return ret;
2841 }
2842
2843 int btrfs_free_block_groups(struct btrfs_fs_info *info)
2844 {
2845         u64 start;
2846         u64 end;
2847         u64 ptr;
2848         int ret;
2849
2850         mutex_lock(&info->alloc_mutex);
2851         while(1) {
2852                 ret = find_first_extent_bit(&info->block_group_cache, 0,
2853                                             &start, &end, (unsigned int)-1);
2854                 if (ret)
2855                         break;
2856                 ret = get_state_private(&info->block_group_cache, start, &ptr);
2857                 if (!ret)
2858                         kfree((void *)(unsigned long)ptr);
2859                 clear_extent_bits(&info->block_group_cache, start,
2860                                   end, (unsigned int)-1, GFP_NOFS);
2861         }
2862         while(1) {
2863                 ret = find_first_extent_bit(&info->free_space_cache, 0,
2864                                             &start, &end, EXTENT_DIRTY);
2865                 if (ret)
2866                         break;
2867                 clear_extent_dirty(&info->free_space_cache, start,
2868                                    end, GFP_NOFS);
2869         }
2870         mutex_unlock(&info->alloc_mutex);
2871         return 0;
2872 }
2873
2874 static unsigned long calc_ra(unsigned long start, unsigned long last,
2875                              unsigned long nr)
2876 {
2877         return min(last, start + nr - 1);
2878 }
2879
2880 static int noinline relocate_inode_pages(struct inode *inode, u64 start,
2881                                          u64 len)
2882 {
2883         u64 page_start;
2884         u64 page_end;
2885         unsigned long last_index;
2886         unsigned long i;
2887         struct page *page;
2888         struct extent_io_tree *io_tree = &BTRFS_I(inode)->io_tree;
2889         struct file_ra_state *ra;
2890         unsigned long total_read = 0;
2891         unsigned long ra_pages;
2892         struct btrfs_ordered_extent *ordered;
2893         struct btrfs_trans_handle *trans;
2894
2895         ra = kzalloc(sizeof(*ra), GFP_NOFS);
2896
2897         mutex_lock(&inode->i_mutex);
2898         i = start >> PAGE_CACHE_SHIFT;
2899         last_index = (start + len - 1) >> PAGE_CACHE_SHIFT;
2900
2901         ra_pages = BTRFS_I(inode)->root->fs_info->bdi.ra_pages;
2902
2903         file_ra_state_init(ra, inode->i_mapping);
2904
2905         for (; i <= last_index; i++) {
2906                 if (total_read % ra_pages == 0) {
2907                         btrfs_force_ra(inode->i_mapping, ra, NULL, i,
2908                                        calc_ra(i, last_index, ra_pages));
2909                 }
2910                 total_read++;
2911 again:
2912                 if (((u64)i << PAGE_CACHE_SHIFT) > i_size_read(inode))
2913                         goto truncate_racing;
2914                 page = grab_cache_page(inode->i_mapping, i);
2915                 if (!page) {
2916                         goto out_unlock;
2917                 }
2918                 if (!PageUptodate(page)) {
2919                         btrfs_readpage(NULL, page);
2920                         lock_page(page);
2921                         if (!PageUptodate(page)) {
2922                                 unlock_page(page);
2923                                 page_cache_release(page);
2924                                 goto out_unlock;
2925                         }
2926                 }
2927                 wait_on_page_writeback(page);
2928
2929                 page_start = (u64)page->index << PAGE_CACHE_SHIFT;
2930                 page_end = page_start + PAGE_CACHE_SIZE - 1;
2931                 lock_extent(io_tree, page_start, page_end, GFP_NOFS);
2932
2933                 ordered = btrfs_lookup_ordered_extent(inode, page_start);
2934                 if (ordered) {
2935                         unlock_extent(io_tree, page_start, page_end, GFP_NOFS);
2936                         unlock_page(page);
2937                         page_cache_release(page);
2938                         btrfs_start_ordered_extent(inode, ordered, 1);
2939                         btrfs_put_ordered_extent(ordered);
2940                         goto again;
2941                 }
2942                 set_page_extent_mapped(page);
2943
2944                 /*
2945                  * make sure page_mkwrite is called for this page if userland
2946                  * wants to change it from mmap
2947                  */
2948                 clear_page_dirty_for_io(page);
2949
2950                 btrfs_set_extent_delalloc(inode, page_start, page_end);
2951                 set_page_dirty(page);
2952
2953                 unlock_extent(io_tree, page_start, page_end, GFP_NOFS);
2954                 unlock_page(page);
2955                 page_cache_release(page);
2956         }
2957
2958 out_unlock:
2959         /* we have to start the IO in order to get the ordered extents
2960          * instantiated.  This allows the relocation to code to wait
2961          * for all the ordered extents to hit the disk.
2962          *
2963          * Otherwise, it would constantly loop over the same extents
2964          * because the old ones don't get deleted  until the IO is
2965          * started
2966          */
2967         btrfs_fdatawrite_range(inode->i_mapping, start, start + len - 1,
2968                                WB_SYNC_NONE);
2969         kfree(ra);
2970         trans = btrfs_start_transaction(BTRFS_I(inode)->root, 1);
2971         if (trans) {
2972                 btrfs_end_transaction(trans, BTRFS_I(inode)->root);
2973                 mark_inode_dirty(inode);
2974         }
2975         mutex_unlock(&inode->i_mutex);
2976         return 0;
2977
2978 truncate_racing:
2979         vmtruncate(inode, inode->i_size);
2980         balance_dirty_pages_ratelimited_nr(inode->i_mapping,
2981                                            total_read);
2982         goto out_unlock;
2983 }
2984
2985 /*
2986  * The back references tell us which tree holds a ref on a block,
2987  * but it is possible for the tree root field in the reference to
2988  * reflect the original root before a snapshot was made.  In this
2989  * case we should search through all the children of a given root
2990  * to find potential holders of references on a block.
2991  *
2992  * Instead, we do something a little less fancy and just search
2993  * all the roots for a given key/block combination.
2994  */
2995 static int find_root_for_ref(struct btrfs_root *root,
2996                              struct btrfs_path *path,
2997                              struct btrfs_key *key0,
2998                              int level,
2999                              int file_key,
3000                              struct btrfs_root **found_root,
3001                              u64 bytenr)
3002 {
3003         struct btrfs_key root_location;
3004         struct btrfs_root *cur_root = *found_root;
3005         struct btrfs_file_extent_item *file_extent;
3006         u64 root_search_start = BTRFS_FS_TREE_OBJECTID;
3007         u64 found_bytenr;
3008         int ret;
3009
3010         root_location.offset = (u64)-1;
3011         root_location.type = BTRFS_ROOT_ITEM_KEY;
3012         path->lowest_level = level;
3013         path->reada = 0;
3014         while(1) {
3015                 ret = btrfs_search_slot(NULL, cur_root, key0, path, 0, 0);
3016                 found_bytenr = 0;
3017                 if (ret == 0 && file_key) {
3018                         struct extent_buffer *leaf = path->nodes[0];
3019                         file_extent = btrfs_item_ptr(leaf, path->slots[0],
3020                                              struct btrfs_file_extent_item);
3021                         if (btrfs_file_extent_type(leaf, file_extent) ==
3022                             BTRFS_FILE_EXTENT_REG) {
3023                                 found_bytenr =
3024                                         btrfs_file_extent_disk_bytenr(leaf,
3025                                                                file_extent);
3026                        }
3027                 } else if (!file_key) {
3028                         if (path->nodes[level])
3029                                 found_bytenr = path->nodes[level]->start;
3030                 }
3031
3032                 btrfs_release_path(cur_root, path);
3033
3034                 if (found_bytenr == bytenr) {
3035                         *found_root = cur_root;
3036                         ret = 0;
3037                         goto out;
3038                 }
3039                 ret = btrfs_search_root(root->fs_info->tree_root,
3040                                         root_search_start, &root_search_start);
3041                 if (ret)
3042                         break;
3043
3044                 root_location.objectid = root_search_start;
3045                 cur_root = btrfs_read_fs_root_no_name(root->fs_info,
3046                                                       &root_location);
3047                 if (!cur_root) {
3048                         ret = 1;
3049                         break;
3050                 }
3051         }
3052 out:
3053         path->lowest_level = 0;
3054         return ret;
3055 }
3056
3057 /*
3058  * note, this releases the path
3059  */
3060 static int noinline relocate_one_reference(struct btrfs_root *extent_root,
3061                                   struct btrfs_path *path,
3062                                   struct btrfs_key *extent_key,
3063                                   u64 *last_file_objectid,
3064                                   u64 *last_file_offset,
3065                                   u64 *last_file_root,
3066                                   u64 last_extent)
3067 {
3068         struct inode *inode;
3069         struct btrfs_root *found_root;
3070         struct btrfs_key root_location;
3071         struct btrfs_key found_key;
3072         struct btrfs_extent_ref *ref;
3073         u64 ref_root;
3074         u64 ref_gen;
3075         u64 ref_objectid;
3076         u64 ref_offset;
3077         int ret;
3078         int level;
3079
3080         WARN_ON(!mutex_is_locked(&extent_root->fs_info->alloc_mutex));
3081
3082         ref = btrfs_item_ptr(path->nodes[0], path->slots[0],
3083                              struct btrfs_extent_ref);
3084         ref_root = btrfs_ref_root(path->nodes[0], ref);
3085         ref_gen = btrfs_ref_generation(path->nodes[0], ref);
3086         ref_objectid = btrfs_ref_objectid(path->nodes[0], ref);
3087         ref_offset = btrfs_ref_offset(path->nodes[0], ref);
3088         btrfs_release_path(extent_root, path);
3089
3090         root_location.objectid = ref_root;
3091         if (ref_gen == 0)
3092                 root_location.offset = 0;
3093         else
3094                 root_location.offset = (u64)-1;
3095         root_location.type = BTRFS_ROOT_ITEM_KEY;
3096
3097         found_root = btrfs_read_fs_root_no_name(extent_root->fs_info,
3098                                                 &root_location);
3099         BUG_ON(!found_root);
3100         mutex_unlock(&extent_root->fs_info->alloc_mutex);
3101
3102         if (ref_objectid >= BTRFS_FIRST_FREE_OBJECTID) {
3103                 found_key.objectid = ref_objectid;
3104                 found_key.type = BTRFS_EXTENT_DATA_KEY;
3105                 found_key.offset = ref_offset;
3106                 level = 0;
3107
3108                 if (last_extent == extent_key->objectid &&
3109                     *last_file_objectid == ref_objectid &&
3110                     *last_file_offset == ref_offset &&
3111                     *last_file_root == ref_root)
3112                         goto out;
3113
3114                 ret = find_root_for_ref(extent_root, path, &found_key,
3115                                         level, 1, &found_root,
3116                                         extent_key->objectid);
3117
3118                 if (ret)
3119                         goto out;
3120
3121                 if (last_extent == extent_key->objectid &&
3122                     *last_file_objectid == ref_objectid &&
3123                     *last_file_offset == ref_offset &&
3124                     *last_file_root == ref_root)
3125                         goto out;
3126
3127                 inode = btrfs_iget_locked(extent_root->fs_info->sb,
3128                                           ref_objectid, found_root);
3129                 if (inode->i_state & I_NEW) {
3130                         /* the inode and parent dir are two different roots */
3131                         BTRFS_I(inode)->root = found_root;
3132                         BTRFS_I(inode)->location.objectid = ref_objectid;
3133                         BTRFS_I(inode)->location.type = BTRFS_INODE_ITEM_KEY;
3134                         BTRFS_I(inode)->location.offset = 0;
3135                         btrfs_read_locked_inode(inode);
3136                         unlock_new_inode(inode);
3137
3138                 }
3139                 /* this can happen if the reference is not against
3140                  * the latest version of the tree root
3141                  */
3142                 if (is_bad_inode(inode))
3143                         goto out;
3144
3145                 *last_file_objectid = inode->i_ino;
3146                 *last_file_root = found_root->root_key.objectid;
3147                 *last_file_offset = ref_offset;
3148
3149                 relocate_inode_pages(inode, ref_offset, extent_key->offset);
3150                 iput(inode);
3151         } else {
3152                 struct btrfs_trans_handle *trans;
3153                 struct extent_buffer *eb;
3154                 int needs_lock = 0;
3155
3156                 eb = read_tree_block(found_root, extent_key->objectid,
3157                                      extent_key->offset, 0);
3158                 btrfs_tree_lock(eb);
3159                 level = btrfs_header_level(eb);
3160
3161                 if (level == 0)
3162                         btrfs_item_key_to_cpu(eb, &found_key, 0);
3163                 else
3164                         btrfs_node_key_to_cpu(eb, &found_key, 0);
3165
3166                 btrfs_tree_unlock(eb);
3167                 free_extent_buffer(eb);
3168
3169                 ret = find_root_for_ref(extent_root, path, &found_key,
3170                                         level, 0, &found_root,
3171                                         extent_key->objectid);
3172
3173                 if (ret)
3174                         goto out;
3175
3176                 /*
3177                  * right here almost anything could happen to our key,
3178                  * but that's ok.  The cow below will either relocate it
3179                  * or someone else will have relocated it.  Either way,
3180                  * it is in a different spot than it was before and
3181                  * we're happy.
3182                  */
3183
3184                 trans = btrfs_start_transaction(found_root, 1);
3185
3186                 if (found_root == extent_root->fs_info->extent_root ||
3187                     found_root == extent_root->fs_info->chunk_root ||
3188                     found_root == extent_root->fs_info->dev_root) {
3189                         needs_lock = 1;
3190                         mutex_lock(&extent_root->fs_info->alloc_mutex);
3191                 }
3192
3193                 path->lowest_level = level;
3194                 path->reada = 2;
3195                 ret = btrfs_search_slot(trans, found_root, &found_key, path,
3196                                         0, 1);
3197                 path->lowest_level = 0;
3198                 btrfs_release_path(found_root, path);
3199
3200                 if (found_root == found_root->fs_info->extent_root)
3201                         btrfs_extent_post_op(trans, found_root);
3202                 if (needs_lock)
3203                         mutex_unlock(&extent_root->fs_info->alloc_mutex);
3204
3205                 btrfs_end_transaction(trans, found_root);
3206
3207         }
3208 out:
3209         mutex_lock(&extent_root->fs_info->alloc_mutex);
3210         return 0;
3211 }
3212
3213 static int noinline del_extent_zero(struct btrfs_root *extent_root,
3214                                     struct btrfs_path *path,
3215                                     struct btrfs_key *extent_key)
3216 {
3217         int ret;
3218         struct btrfs_trans_handle *trans;
3219
3220         trans = btrfs_start_transaction(extent_root, 1);
3221         ret = btrfs_search_slot(trans, extent_root, extent_key, path, -1, 1);
3222         if (ret > 0) {
3223                 ret = -EIO;
3224                 goto out;
3225         }
3226         if (ret < 0)
3227                 goto out;
3228         ret = btrfs_del_item(trans, extent_root, path);
3229 out:
3230         btrfs_end_transaction(trans, extent_root);
3231         return ret;
3232 }
3233
3234 static int noinline relocate_one_extent(struct btrfs_root *extent_root,
3235                                         struct btrfs_path *path,
3236                                         struct btrfs_key *extent_key)
3237 {
3238         struct btrfs_key key;
3239         struct btrfs_key found_key;
3240         struct extent_buffer *leaf;
3241         u64 last_file_objectid = 0;
3242         u64 last_file_root = 0;
3243         u64 last_file_offset = (u64)-1;
3244         u64 last_extent = 0;
3245         u32 nritems;
3246         u32 item_size;
3247         int ret = 0;
3248
3249         if (extent_key->objectid == 0) {
3250                 ret = del_extent_zero(extent_root, path, extent_key);
3251                 goto out;
3252         }
3253         key.objectid = extent_key->objectid;
3254         key.type = BTRFS_EXTENT_REF_KEY;
3255         key.offset = 0;
3256
3257         while(1) {
3258                 ret = btrfs_search_slot(NULL, extent_root, &key, path, 0, 0);
3259
3260                 if (ret < 0)
3261                         goto out;
3262
3263                 ret = 0;
3264                 leaf = path->nodes[0];
3265                 nritems = btrfs_header_nritems(leaf);
3266                 if (path->slots[0] == nritems) {
3267                         ret = btrfs_next_leaf(extent_root, path);
3268                         if (ret > 0) {
3269                                 ret = 0;
3270                                 goto out;
3271                         }
3272                         if (ret < 0)
3273                                 goto out;
3274                         leaf = path->nodes[0];
3275                 }
3276
3277                 btrfs_item_key_to_cpu(leaf, &found_key, path->slots[0]);
3278                 if (found_key.objectid != extent_key->objectid) {
3279                         break;
3280                 }
3281
3282                 if (found_key.type != BTRFS_EXTENT_REF_KEY) {
3283                         break;
3284                 }
3285
3286                 key.offset = found_key.offset + 1;
3287                 item_size = btrfs_item_size_nr(leaf, path->slots[0]);
3288
3289                 ret = relocate_one_reference(extent_root, path, extent_key,
3290                                              &last_file_objectid,
3291                                              &last_file_offset,
3292                                              &last_file_root, last_extent);
3293                 if (ret)
3294                         goto out;
3295                 last_extent = extent_key->objectid;
3296         }
3297         ret = 0;
3298 out:
3299         btrfs_release_path(extent_root, path);
3300         return ret;
3301 }
3302
3303 static u64 update_block_group_flags(struct btrfs_root *root, u64 flags)
3304 {
3305         u64 num_devices;
3306         u64 stripped = BTRFS_BLOCK_GROUP_RAID0 |
3307                 BTRFS_BLOCK_GROUP_RAID1 | BTRFS_BLOCK_GROUP_RAID10;
3308
3309         num_devices = root->fs_info->fs_devices->num_devices;
3310         if (num_devices == 1) {
3311                 stripped |= BTRFS_BLOCK_GROUP_DUP;
3312                 stripped = flags & ~stripped;
3313
3314                 /* turn raid0 into single device chunks */
3315                 if (flags & BTRFS_BLOCK_GROUP_RAID0)
3316                         return stripped;
3317
3318                 /* turn mirroring into duplication */
3319                 if (flags & (BTRFS_BLOCK_GROUP_RAID1 |
3320                              BTRFS_BLOCK_GROUP_RAID10))
3321                         return stripped | BTRFS_BLOCK_GROUP_DUP;
3322                 return flags;
3323         } else {
3324                 /* they already had raid on here, just return */
3325                 if (flags & stripped)
3326                         return flags;
3327
3328                 stripped |= BTRFS_BLOCK_GROUP_DUP;
3329                 stripped = flags & ~stripped;
3330
3331                 /* switch duplicated blocks with raid1 */
3332                 if (flags & BTRFS_BLOCK_GROUP_DUP)
3333                         return stripped | BTRFS_BLOCK_GROUP_RAID1;
3334
3335                 /* turn single device chunks into raid0 */
3336                 return stripped | BTRFS_BLOCK_GROUP_RAID0;
3337         }
3338         return flags;
3339 }
3340
3341 int __alloc_chunk_for_shrink(struct btrfs_root *root,
3342                      struct btrfs_block_group_cache *shrink_block_group,
3343                      int force)
3344 {
3345         struct btrfs_trans_handle *trans;
3346         u64 new_alloc_flags;
3347         u64 calc;
3348
3349         spin_lock(&shrink_block_group->lock);
3350         if (btrfs_block_group_used(&shrink_block_group->item) > 0) {
3351                 spin_unlock(&shrink_block_group->lock);
3352                 mutex_unlock(&root->fs_info->alloc_mutex);
3353
3354                 trans = btrfs_start_transaction(root, 1);
3355                 mutex_lock(&root->fs_info->alloc_mutex);
3356                 spin_lock(&shrink_block_group->lock);
3357
3358                 new_alloc_flags = update_block_group_flags(root,
3359                                                    shrink_block_group->flags);
3360                 if (new_alloc_flags != shrink_block_group->flags) {
3361                         calc =
3362                              btrfs_block_group_used(&shrink_block_group->item);
3363                 } else {
3364                         calc = shrink_block_group->key.offset;
3365                 }
3366                 spin_unlock(&shrink_block_group->lock);
3367
3368                 do_chunk_alloc(trans, root->fs_info->extent_root,
3369                                calc + 2 * 1024 * 1024, new_alloc_flags, force);
3370
3371                 mutex_unlock(&root->fs_info->alloc_mutex);
3372                 btrfs_end_transaction(trans, root);
3373                 mutex_lock(&root->fs_info->alloc_mutex);
3374         } else
3375                 spin_unlock(&shrink_block_group->lock);
3376         return 0;
3377 }
3378
3379 int btrfs_shrink_extent_tree(struct btrfs_root *root, u64 shrink_start)
3380 {
3381         struct btrfs_trans_handle *trans;
3382         struct btrfs_root *tree_root = root->fs_info->tree_root;
3383         struct btrfs_path *path;
3384         u64 cur_byte;
3385         u64 total_found;
3386         u64 shrink_last_byte;
3387         struct btrfs_block_group_cache *shrink_block_group;
3388         struct btrfs_fs_info *info = root->fs_info;
3389         struct btrfs_key key;
3390         struct btrfs_key found_key;
3391         struct extent_buffer *leaf;
3392         u32 nritems;
3393         int ret;
3394         int progress;
3395
3396         mutex_lock(&root->fs_info->alloc_mutex);
3397         shrink_block_group = btrfs_lookup_block_group(root->fs_info,
3398                                                       shrink_start);
3399         BUG_ON(!shrink_block_group);
3400
3401         shrink_last_byte = shrink_block_group->key.objectid +
3402                 shrink_block_group->key.offset;
3403
3404         shrink_block_group->space_info->total_bytes -=
3405                 shrink_block_group->key.offset;
3406         path = btrfs_alloc_path();
3407         root = root->fs_info->extent_root;
3408         path->reada = 2;
3409
3410         printk("btrfs relocating block group %llu flags %llu\n",
3411                (unsigned long long)shrink_start,
3412                (unsigned long long)shrink_block_group->flags);
3413
3414         __alloc_chunk_for_shrink(root, shrink_block_group, 1);
3415
3416 again:
3417
3418         shrink_block_group->ro = 1;
3419
3420         total_found = 0;
3421         progress = 0;
3422         key.objectid = shrink_start;
3423         key.offset = 0;
3424         key.type = 0;
3425         cur_byte = key.objectid;
3426
3427         mutex_unlock(&root->fs_info->alloc_mutex);
3428
3429         btrfs_start_delalloc_inodes(root);
3430         btrfs_wait_ordered_extents(tree_root, 0);
3431
3432         mutex_lock(&root->fs_info->alloc_mutex);
3433
3434         ret = btrfs_search_slot(NULL, root, &key, path, 0, 0);
3435         if (ret < 0)
3436                 goto out;
3437
3438         ret = btrfs_previous_item(root, path, 0, BTRFS_EXTENT_ITEM_KEY);
3439         if (ret < 0)
3440                 goto out;
3441
3442         if (ret == 0) {
3443                 leaf = path->nodes[0];
3444                 btrfs_item_key_to_cpu(leaf, &found_key, path->slots[0]);
3445                 if (found_key.objectid + found_key.offset > shrink_start &&
3446                     found_key.objectid < shrink_last_byte) {
3447                         cur_byte = found_key.objectid;
3448                         key.objectid = cur_byte;
3449                 }
3450         }
3451         btrfs_release_path(root, path);
3452
3453         while(1) {
3454                 ret = btrfs_search_slot(NULL, root, &key, path, 0, 0);
3455                 if (ret < 0)
3456                         goto out;
3457
3458 next:
3459                 leaf = path->nodes[0];
3460                 nritems = btrfs_header_nritems(leaf);
3461                 if (path->slots[0] >= nritems) {
3462                         ret = btrfs_next_leaf(root, path);
3463                         if (ret < 0)
3464                                 goto out;
3465                         if (ret == 1) {
3466                                 ret = 0;
3467                                 break;
3468                         }
3469                         leaf = path->nodes[0];
3470                         nritems = btrfs_header_nritems(leaf);
3471                 }
3472
3473                 btrfs_item_key_to_cpu(leaf, &found_key, path->slots[0]);
3474
3475                 if (found_key.objectid >= shrink_last_byte)
3476                         break;
3477
3478                 if (progress && need_resched()) {
3479                         memcpy(&key, &found_key, sizeof(key));
3480                         cond_resched();
3481                         btrfs_release_path(root, path);
3482                         btrfs_search_slot(NULL, root, &key, path, 0, 0);
3483                         progress = 0;
3484                         goto next;
3485                 }
3486                 progress = 1;
3487
3488                 if (btrfs_key_type(&found_key) != BTRFS_EXTENT_ITEM_KEY ||
3489                     found_key.objectid + found_key.offset <= cur_byte) {
3490                         memcpy(&key, &found_key, sizeof(key));
3491                         key.offset++;
3492                         path->slots[0]++;
3493                         goto next;
3494                 }
3495
3496                 total_found++;
3497                 cur_byte = found_key.objectid + found_key.offset;
3498                 key.objectid = cur_byte;
3499                 btrfs_release_path(root, path);
3500                 ret = relocate_one_extent(root, path, &found_key);
3501                 __alloc_chunk_for_shrink(root, shrink_block_group, 0);
3502         }
3503
3504         btrfs_release_path(root, path);
3505
3506         if (total_found > 0) {
3507                 printk("btrfs relocate found %llu last extent was %llu\n",
3508                        (unsigned long long)total_found,
3509                        (unsigned long long)found_key.objectid);
3510                 mutex_unlock(&root->fs_info->alloc_mutex);
3511                 trans = btrfs_start_transaction(tree_root, 1);
3512                 btrfs_commit_transaction(trans, tree_root);
3513
3514                 btrfs_clean_old_snapshots(tree_root);
3515
3516                 btrfs_start_delalloc_inodes(root);
3517                 btrfs_wait_ordered_extents(tree_root, 0);
3518
3519                 trans = btrfs_start_transaction(tree_root, 1);
3520                 btrfs_commit_transaction(trans, tree_root);
3521                 mutex_lock(&root->fs_info->alloc_mutex);
3522                 goto again;
3523         }
3524
3525         /*
3526          * we've freed all the extents, now remove the block
3527          * group item from the tree
3528          */
3529         mutex_unlock(&root->fs_info->alloc_mutex);
3530
3531         trans = btrfs_start_transaction(root, 1);
3532
3533         mutex_lock(&root->fs_info->alloc_mutex);
3534         memcpy(&key, &shrink_block_group->key, sizeof(key));
3535
3536         ret = btrfs_search_slot(trans, root, &key, path, -1, 1);
3537         if (ret > 0)
3538                 ret = -EIO;
3539         if (ret < 0) {
3540                 btrfs_end_transaction(trans, root);
3541                 goto out;
3542         }
3543
3544         clear_extent_bits(&info->block_group_cache, key.objectid,
3545                           key.objectid + key.offset - 1,
3546                           (unsigned int)-1, GFP_NOFS);
3547
3548
3549         clear_extent_bits(&info->free_space_cache,
3550                            key.objectid, key.objectid + key.offset - 1,
3551                            (unsigned int)-1, GFP_NOFS);
3552
3553         /*
3554         memset(shrink_block_group, 0, sizeof(*shrink_block_group));
3555         kfree(shrink_block_group);
3556         */
3557
3558         btrfs_del_item(trans, root, path);
3559         btrfs_release_path(root, path);
3560         mutex_unlock(&root->fs_info->alloc_mutex);
3561         btrfs_commit_transaction(trans, root);
3562
3563         mutex_lock(&root->fs_info->alloc_mutex);
3564
3565         /* the code to unpin extents might set a few bits in the free
3566          * space cache for this range again
3567          */
3568         clear_extent_bits(&info->free_space_cache,
3569                            key.objectid, key.objectid + key.offset - 1,
3570                            (unsigned int)-1, GFP_NOFS);
3571 out:
3572         btrfs_free_path(path);
3573         mutex_unlock(&root->fs_info->alloc_mutex);
3574         return ret;
3575 }
3576
3577 int find_first_block_group(struct btrfs_root *root, struct btrfs_path *path,
3578                            struct btrfs_key *key)
3579 {
3580         int ret = 0;
3581         struct btrfs_key found_key;
3582         struct extent_buffer *leaf;
3583         int slot;
3584
3585         ret = btrfs_search_slot(NULL, root, key, path, 0, 0);
3586         if (ret < 0)
3587                 goto out;
3588
3589         while(1) {
3590                 slot = path->slots[0];
3591                 leaf = path->nodes[0];
3592                 if (slot >= btrfs_header_nritems(leaf)) {
3593                         ret = btrfs_next_leaf(root, path);
3594                         if (ret == 0)
3595                                 continue;
3596                         if (ret < 0)
3597                                 goto out;
3598                         break;
3599                 }
3600                 btrfs_item_key_to_cpu(leaf, &found_key, slot);
3601
3602                 if (found_key.objectid >= key->objectid &&
3603                     found_key.type == BTRFS_BLOCK_GROUP_ITEM_KEY) {
3604                         ret = 0;
3605                         goto out;
3606                 }
3607                 path->slots[0]++;
3608         }
3609         ret = -ENOENT;
3610 out:
3611         return ret;
3612 }
3613
3614 int btrfs_read_block_groups(struct btrfs_root *root)
3615 {
3616         struct btrfs_path *path;
3617         int ret;
3618         int bit;
3619         struct btrfs_block_group_cache *cache;
3620         struct btrfs_fs_info *info = root->fs_info;
3621         struct btrfs_space_info *space_info;
3622         struct extent_io_tree *block_group_cache;
3623         struct btrfs_key key;
3624         struct btrfs_key found_key;
3625         struct extent_buffer *leaf;
3626
3627         block_group_cache = &info->block_group_cache;
3628         root = info->extent_root;
3629         key.objectid = 0;
3630         key.offset = 0;
3631         btrfs_set_key_type(&key, BTRFS_BLOCK_GROUP_ITEM_KEY);
3632         path = btrfs_alloc_path();
3633         if (!path)
3634                 return -ENOMEM;
3635
3636         mutex_lock(&root->fs_info->alloc_mutex);
3637         while(1) {
3638                 ret = find_first_block_group(root, path, &key);
3639                 if (ret > 0) {
3640                         ret = 0;
3641                         goto error;
3642                 }
3643                 if (ret != 0)
3644                         goto error;
3645
3646                 leaf = path->nodes[0];
3647                 btrfs_item_key_to_cpu(leaf, &found_key, path->slots[0]);
3648                 cache = kzalloc(sizeof(*cache), GFP_NOFS);
3649                 if (!cache) {
3650                         ret = -ENOMEM;
3651                         break;
3652                 }
3653
3654                 spin_lock_init(&cache->lock);
3655                 read_extent_buffer(leaf, &cache->item,
3656                                    btrfs_item_ptr_offset(leaf, path->slots[0]),
3657                                    sizeof(cache->item));
3658                 memcpy(&cache->key, &found_key, sizeof(found_key));
3659
3660                 key.objectid = found_key.objectid + found_key.offset;
3661                 btrfs_release_path(root, path);
3662                 cache->flags = btrfs_block_group_flags(&cache->item);
3663                 bit = 0;
3664                 if (cache->flags & BTRFS_BLOCK_GROUP_DATA) {
3665                         bit = BLOCK_GROUP_DATA;
3666                 } else if (cache->flags & BTRFS_BLOCK_GROUP_SYSTEM) {
3667                         bit = BLOCK_GROUP_SYSTEM;
3668                 } else if (cache->flags & BTRFS_BLOCK_GROUP_METADATA) {
3669                         bit = BLOCK_GROUP_METADATA;
3670                 }
3671                 set_avail_alloc_bits(info, cache->flags);
3672
3673                 ret = update_space_info(info, cache->flags, found_key.offset,
3674                                         btrfs_block_group_used(&cache->item),
3675                                         &space_info);
3676                 BUG_ON(ret);
3677                 cache->space_info = space_info;
3678
3679                 /* use EXTENT_LOCKED to prevent merging */
3680                 set_extent_bits(block_group_cache, found_key.objectid,
3681                                 found_key.objectid + found_key.offset - 1,
3682                                 EXTENT_LOCKED, GFP_NOFS);
3683                 set_state_private(block_group_cache, found_key.objectid,
3684                                   (unsigned long)cache);
3685                 set_extent_bits(block_group_cache, found_key.objectid,
3686                                 found_key.objectid + found_key.offset - 1,
3687                                 bit | EXTENT_LOCKED, GFP_NOFS);
3688                 if (key.objectid >=
3689                     btrfs_super_total_bytes(&info->super_copy))
3690                         break;
3691         }
3692         ret = 0;
3693 error:
3694         btrfs_free_path(path);
3695         mutex_unlock(&root->fs_info->alloc_mutex);
3696         return ret;
3697 }
3698
3699 int btrfs_make_block_group(struct btrfs_trans_handle *trans,
3700                            struct btrfs_root *root, u64 bytes_used,
3701                            u64 type, u64 chunk_objectid, u64 chunk_offset,
3702                            u64 size)
3703 {
3704         int ret;
3705         int bit = 0;
3706         struct btrfs_root *extent_root;
3707         struct btrfs_block_group_cache *cache;
3708         struct extent_io_tree *block_group_cache;
3709
3710         WARN_ON(!mutex_is_locked(&root->fs_info->alloc_mutex));
3711         extent_root = root->fs_info->extent_root;
3712         block_group_cache = &root->fs_info->block_group_cache;
3713
3714         root->fs_info->last_trans_new_blockgroup = trans->transid;
3715
3716         cache = kzalloc(sizeof(*cache), GFP_NOFS);
3717         BUG_ON(!cache);
3718         cache->key.objectid = chunk_offset;
3719         cache->key.offset = size;
3720         spin_lock_init(&cache->lock);
3721         btrfs_set_key_type(&cache->key, BTRFS_BLOCK_GROUP_ITEM_KEY);
3722
3723         btrfs_set_block_group_used(&cache->item, bytes_used);
3724         btrfs_set_block_group_chunk_objectid(&cache->item, chunk_objectid);
3725         cache->flags = type;
3726         btrfs_set_block_group_flags(&cache->item, type);
3727
3728         ret = update_space_info(root->fs_info, cache->flags, size, bytes_used,
3729                                 &cache->space_info);
3730         BUG_ON(ret);
3731
3732         bit = block_group_state_bits(type);
3733         set_extent_bits(block_group_cache, chunk_offset,
3734                         chunk_offset + size - 1,
3735                         EXTENT_LOCKED, GFP_NOFS);
3736         set_state_private(block_group_cache, chunk_offset,
3737                           (unsigned long)cache);
3738         set_extent_bits(block_group_cache, chunk_offset,
3739                         chunk_offset + size - 1,
3740                         bit | EXTENT_LOCKED, GFP_NOFS);
3741
3742         ret = btrfs_insert_item(trans, extent_root, &cache->key, &cache->item,
3743                                 sizeof(cache->item));
3744         BUG_ON(ret);
3745
3746         finish_current_insert(trans, extent_root);
3747         ret = del_pending_extents(trans, extent_root);
3748         BUG_ON(ret);
3749         set_avail_alloc_bits(extent_root->fs_info, type);
3750
3751         return 0;
3752 }