2 * Copyright (C) 2007 Oracle. All rights reserved.
4 * This program is free software; you can redistribute it and/or
5 * modify it under the terms of the GNU General Public
6 * License v2 as published by the Free Software Foundation.
8 * This program is distributed in the hope that it will be useful,
9 * but WITHOUT ANY WARRANTY; without even the implied warranty of
10 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
11 * General Public License for more details.
13 * You should have received a copy of the GNU General Public
14 * License along with this program; if not, write to the
15 * Free Software Foundation, Inc., 59 Temple Place - Suite 330,
16 * Boston, MA 021110-1307, USA.
18 #include <linux/sched.h>
19 #include <linux/pagemap.h>
20 #include <linux/writeback.h>
21 #include <linux/blkdev.h>
26 #include "print-tree.h"
27 #include "transaction.h"
30 #include "ref-cache.h"
32 #define BLOCK_GROUP_DATA EXTENT_WRITEBACK
33 #define BLOCK_GROUP_METADATA EXTENT_UPTODATE
34 #define BLOCK_GROUP_SYSTEM EXTENT_NEW
36 #define BLOCK_GROUP_DIRTY EXTENT_DIRTY
38 static int finish_current_insert(struct btrfs_trans_handle *trans, struct
39 btrfs_root *extent_root);
40 static int del_pending_extents(struct btrfs_trans_handle *trans, struct
41 btrfs_root *extent_root);
42 static struct btrfs_block_group_cache *
43 __btrfs_find_block_group(struct btrfs_root *root,
44 struct btrfs_block_group_cache *hint,
45 u64 search_start, int data, int owner);
47 void maybe_lock_mutex(struct btrfs_root *root)
49 if (root != root->fs_info->extent_root &&
50 root != root->fs_info->chunk_root &&
51 root != root->fs_info->dev_root) {
52 mutex_lock(&root->fs_info->alloc_mutex);
56 void maybe_unlock_mutex(struct btrfs_root *root)
58 if (root != root->fs_info->extent_root &&
59 root != root->fs_info->chunk_root &&
60 root != root->fs_info->dev_root) {
61 mutex_unlock(&root->fs_info->alloc_mutex);
65 static int cache_block_group(struct btrfs_root *root,
66 struct btrfs_block_group_cache *block_group)
68 struct btrfs_path *path;
71 struct extent_buffer *leaf;
72 struct extent_io_tree *free_space_cache;
82 root = root->fs_info->extent_root;
83 free_space_cache = &root->fs_info->free_space_cache;
85 if (block_group->cached)
88 path = btrfs_alloc_path();
94 * we get into deadlocks with paths held by callers of this function.
95 * since the alloc_mutex is protecting things right now, just
96 * skip the locking here
98 path->skip_locking = 1;
99 first_free = block_group->key.objectid;
100 key.objectid = block_group->key.objectid;
102 btrfs_set_key_type(&key, BTRFS_EXTENT_ITEM_KEY);
103 ret = btrfs_search_slot(NULL, root, &key, path, 0, 0);
106 ret = btrfs_previous_item(root, path, 0, BTRFS_EXTENT_ITEM_KEY);
110 leaf = path->nodes[0];
111 btrfs_item_key_to_cpu(leaf, &key, path->slots[0]);
112 if (key.objectid + key.offset > first_free)
113 first_free = key.objectid + key.offset;
116 leaf = path->nodes[0];
117 slot = path->slots[0];
118 if (slot >= btrfs_header_nritems(leaf)) {
119 ret = btrfs_next_leaf(root, path);
128 btrfs_item_key_to_cpu(leaf, &key, slot);
129 if (key.objectid < block_group->key.objectid) {
132 if (key.objectid >= block_group->key.objectid +
133 block_group->key.offset) {
137 if (btrfs_key_type(&key) == BTRFS_EXTENT_ITEM_KEY) {
142 if (key.objectid > last) {
143 hole_size = key.objectid - last;
144 set_extent_dirty(free_space_cache, last,
145 last + hole_size - 1,
148 last = key.objectid + key.offset;
156 if (block_group->key.objectid +
157 block_group->key.offset > last) {
158 hole_size = block_group->key.objectid +
159 block_group->key.offset - last;
160 set_extent_dirty(free_space_cache, last,
161 last + hole_size - 1, GFP_NOFS);
163 block_group->cached = 1;
166 btrfs_free_path(path);
170 struct btrfs_block_group_cache *btrfs_lookup_first_block_group(struct
174 struct extent_io_tree *block_group_cache;
175 struct btrfs_block_group_cache *block_group = NULL;
181 bytenr = max_t(u64, bytenr,
182 BTRFS_SUPER_INFO_OFFSET + BTRFS_SUPER_INFO_SIZE);
183 block_group_cache = &info->block_group_cache;
184 ret = find_first_extent_bit(block_group_cache,
185 bytenr, &start, &end,
186 BLOCK_GROUP_DATA | BLOCK_GROUP_METADATA |
191 ret = get_state_private(block_group_cache, start, &ptr);
195 block_group = (struct btrfs_block_group_cache *)(unsigned long)ptr;
199 struct btrfs_block_group_cache *btrfs_lookup_block_group(struct
203 struct extent_io_tree *block_group_cache;
204 struct btrfs_block_group_cache *block_group = NULL;
210 bytenr = max_t(u64, bytenr,
211 BTRFS_SUPER_INFO_OFFSET + BTRFS_SUPER_INFO_SIZE);
212 block_group_cache = &info->block_group_cache;
213 ret = find_first_extent_bit(block_group_cache,
214 bytenr, &start, &end,
215 BLOCK_GROUP_DATA | BLOCK_GROUP_METADATA |
220 ret = get_state_private(block_group_cache, start, &ptr);
224 block_group = (struct btrfs_block_group_cache *)(unsigned long)ptr;
225 if (block_group->key.objectid <= bytenr && bytenr <
226 block_group->key.objectid + block_group->key.offset)
231 static int block_group_bits(struct btrfs_block_group_cache *cache, u64 bits)
233 return (cache->flags & bits) == bits;
236 static int noinline find_search_start(struct btrfs_root *root,
237 struct btrfs_block_group_cache **cache_ret,
238 u64 *start_ret, u64 num, int data)
241 struct btrfs_block_group_cache *cache = *cache_ret;
242 struct extent_io_tree *free_space_cache;
243 struct extent_state *state;
248 u64 search_start = *start_ret;
251 WARN_ON(!mutex_is_locked(&root->fs_info->alloc_mutex));
252 total_fs_bytes = btrfs_super_total_bytes(&root->fs_info->super_copy);
253 free_space_cache = &root->fs_info->free_space_cache;
259 ret = cache_block_group(root, cache);
264 last = max(search_start, cache->key.objectid);
265 if (!block_group_bits(cache, data) || cache->ro)
268 spin_lock_irq(&free_space_cache->lock);
269 state = find_first_extent_bit_state(free_space_cache, last, EXTENT_DIRTY);
274 spin_unlock_irq(&free_space_cache->lock);
278 start = max(last, state->start);
279 last = state->end + 1;
280 if (last - start < num) {
282 state = extent_state_next(state);
283 } while(state && !(state->state & EXTENT_DIRTY));
286 spin_unlock_irq(&free_space_cache->lock);
290 if (start + num > cache->key.objectid + cache->key.offset)
292 if (!block_group_bits(cache, data)) {
293 printk("block group bits don't match %Lu %d\n", cache->flags, data);
299 cache = btrfs_lookup_block_group(root->fs_info, search_start);
301 printk("Unable to find block group for %Lu\n", search_start);
307 last = cache->key.objectid + cache->key.offset;
309 cache = btrfs_lookup_first_block_group(root->fs_info, last);
310 if (!cache || cache->key.objectid >= total_fs_bytes) {
319 if (cache_miss && !cache->cached) {
320 cache_block_group(root, cache);
322 cache = btrfs_lookup_first_block_group(root->fs_info, last);
325 cache = btrfs_find_block_group(root, cache, last, data, 0);
332 static u64 div_factor(u64 num, int factor)
341 static int block_group_state_bits(u64 flags)
344 if (flags & BTRFS_BLOCK_GROUP_DATA)
345 bits |= BLOCK_GROUP_DATA;
346 if (flags & BTRFS_BLOCK_GROUP_METADATA)
347 bits |= BLOCK_GROUP_METADATA;
348 if (flags & BTRFS_BLOCK_GROUP_SYSTEM)
349 bits |= BLOCK_GROUP_SYSTEM;
353 static struct btrfs_block_group_cache *
354 __btrfs_find_block_group(struct btrfs_root *root,
355 struct btrfs_block_group_cache *hint,
356 u64 search_start, int data, int owner)
358 struct btrfs_block_group_cache *cache;
359 struct extent_io_tree *block_group_cache;
360 struct btrfs_block_group_cache *found_group = NULL;
361 struct btrfs_fs_info *info = root->fs_info;
374 block_group_cache = &info->block_group_cache;
376 if (data & BTRFS_BLOCK_GROUP_METADATA)
379 bit = block_group_state_bits(data);
382 struct btrfs_block_group_cache *shint;
383 shint = btrfs_lookup_first_block_group(info, search_start);
384 if (shint && block_group_bits(shint, data) && !shint->ro) {
385 spin_lock(&shint->lock);
386 used = btrfs_block_group_used(&shint->item);
387 if (used + shint->pinned <
388 div_factor(shint->key.offset, factor)) {
389 spin_unlock(&shint->lock);
392 spin_unlock(&shint->lock);
395 if (hint && !hint->ro && block_group_bits(hint, data)) {
396 spin_lock(&hint->lock);
397 used = btrfs_block_group_used(&hint->item);
398 if (used + hint->pinned <
399 div_factor(hint->key.offset, factor)) {
400 spin_unlock(&hint->lock);
403 spin_unlock(&hint->lock);
404 last = hint->key.objectid + hint->key.offset;
407 last = max(hint->key.objectid, search_start);
413 ret = find_first_extent_bit(block_group_cache, last,
418 ret = get_state_private(block_group_cache, start, &ptr);
424 cache = (struct btrfs_block_group_cache *)(unsigned long)ptr;
425 spin_lock(&cache->lock);
426 last = cache->key.objectid + cache->key.offset;
427 used = btrfs_block_group_used(&cache->item);
429 if (!cache->ro && block_group_bits(cache, data)) {
430 free_check = div_factor(cache->key.offset, factor);
431 if (used + cache->pinned < free_check) {
433 spin_unlock(&cache->lock);
437 spin_unlock(&cache->lock);
445 if (!full_search && factor < 10) {
455 struct btrfs_block_group_cache *btrfs_find_block_group(struct btrfs_root *root,
456 struct btrfs_block_group_cache
457 *hint, u64 search_start,
461 struct btrfs_block_group_cache *ret;
462 ret = __btrfs_find_block_group(root, hint, search_start, data, owner);
465 static u64 hash_extent_ref(u64 root_objectid, u64 ref_generation,
466 u64 owner, u64 owner_offset)
468 u32 high_crc = ~(u32)0;
469 u32 low_crc = ~(u32)0;
471 lenum = cpu_to_le64(root_objectid);
472 high_crc = btrfs_crc32c(high_crc, &lenum, sizeof(lenum));
473 lenum = cpu_to_le64(ref_generation);
474 low_crc = btrfs_crc32c(low_crc, &lenum, sizeof(lenum));
475 if (owner >= BTRFS_FIRST_FREE_OBJECTID) {
476 lenum = cpu_to_le64(owner);
477 low_crc = btrfs_crc32c(low_crc, &lenum, sizeof(lenum));
478 lenum = cpu_to_le64(owner_offset);
479 low_crc = btrfs_crc32c(low_crc, &lenum, sizeof(lenum));
481 return ((u64)high_crc << 32) | (u64)low_crc;
484 static int match_extent_ref(struct extent_buffer *leaf,
485 struct btrfs_extent_ref *disk_ref,
486 struct btrfs_extent_ref *cpu_ref)
491 if (cpu_ref->objectid)
492 len = sizeof(*cpu_ref);
494 len = 2 * sizeof(u64);
495 ret = memcmp_extent_buffer(leaf, cpu_ref, (unsigned long)disk_ref,
500 /* simple helper to search for an existing extent at a given offset */
501 int btrfs_lookup_extent(struct btrfs_root *root, struct btrfs_path *path,
505 struct btrfs_key key;
507 maybe_lock_mutex(root);
508 key.objectid = start;
510 btrfs_set_key_type(&key, BTRFS_EXTENT_ITEM_KEY);
511 ret = btrfs_search_slot(NULL, root->fs_info->extent_root, &key, path,
513 maybe_unlock_mutex(root);
517 static int noinline lookup_extent_backref(struct btrfs_trans_handle *trans,
518 struct btrfs_root *root,
519 struct btrfs_path *path, u64 bytenr,
521 u64 ref_generation, u64 owner,
522 u64 owner_offset, int del)
525 struct btrfs_key key;
526 struct btrfs_key found_key;
527 struct btrfs_extent_ref ref;
528 struct extent_buffer *leaf;
529 struct btrfs_extent_ref *disk_ref;
533 btrfs_set_stack_ref_root(&ref, root_objectid);
534 btrfs_set_stack_ref_generation(&ref, ref_generation);
535 btrfs_set_stack_ref_objectid(&ref, owner);
536 btrfs_set_stack_ref_offset(&ref, owner_offset);
538 hash = hash_extent_ref(root_objectid, ref_generation, owner,
541 key.objectid = bytenr;
542 key.type = BTRFS_EXTENT_REF_KEY;
545 ret = btrfs_search_slot(trans, root, &key, path,
549 leaf = path->nodes[0];
551 u32 nritems = btrfs_header_nritems(leaf);
552 if (path->slots[0] >= nritems) {
553 ret2 = btrfs_next_leaf(root, path);
556 leaf = path->nodes[0];
558 btrfs_item_key_to_cpu(leaf, &found_key, path->slots[0]);
559 if (found_key.objectid != bytenr ||
560 found_key.type != BTRFS_EXTENT_REF_KEY)
562 key.offset = found_key.offset;
564 btrfs_release_path(root, path);
568 disk_ref = btrfs_item_ptr(path->nodes[0],
570 struct btrfs_extent_ref);
571 if (match_extent_ref(path->nodes[0], disk_ref, &ref)) {
575 btrfs_item_key_to_cpu(leaf, &found_key, path->slots[0]);
576 key.offset = found_key.offset + 1;
577 btrfs_release_path(root, path);
584 * Back reference rules. Back refs have three main goals:
586 * 1) differentiate between all holders of references to an extent so that
587 * when a reference is dropped we can make sure it was a valid reference
588 * before freeing the extent.
590 * 2) Provide enough information to quickly find the holders of an extent
591 * if we notice a given block is corrupted or bad.
593 * 3) Make it easy to migrate blocks for FS shrinking or storage pool
594 * maintenance. This is actually the same as #2, but with a slightly
595 * different use case.
597 * File extents can be referenced by:
599 * - multiple snapshots, subvolumes, or different generations in one subvol
600 * - different files inside a single subvolume (in theory, not implemented yet)
601 * - different offsets inside a file (bookend extents in file.c)
603 * The extent ref structure has fields for:
605 * - Objectid of the subvolume root
606 * - Generation number of the tree holding the reference
607 * - objectid of the file holding the reference
608 * - offset in the file corresponding to the key holding the reference
610 * When a file extent is allocated the fields are filled in:
611 * (root_key.objectid, trans->transid, inode objectid, offset in file)
613 * When a leaf is cow'd new references are added for every file extent found
614 * in the leaf. It looks the same as the create case, but trans->transid
615 * will be different when the block is cow'd.
617 * (root_key.objectid, trans->transid, inode objectid, offset in file)
619 * When a file extent is removed either during snapshot deletion or file
620 * truncation, the corresponding back reference is found
623 * (btrfs_header_owner(leaf), btrfs_header_generation(leaf),
624 * inode objectid, offset in file)
626 * Btree extents can be referenced by:
628 * - Different subvolumes
629 * - Different generations of the same subvolume
631 * Storing sufficient information for a full reverse mapping of a btree
632 * block would require storing the lowest key of the block in the backref,
633 * and it would require updating that lowest key either before write out or
634 * every time it changed. Instead, the objectid of the lowest key is stored
635 * along with the level of the tree block. This provides a hint
636 * about where in the btree the block can be found. Searches through the
637 * btree only need to look for a pointer to that block, so they stop one
638 * level higher than the level recorded in the backref.
640 * Some btrees do not do reference counting on their extents. These
641 * include the extent tree and the tree of tree roots. Backrefs for these
642 * trees always have a generation of zero.
644 * When a tree block is created, back references are inserted:
646 * (root->root_key.objectid, trans->transid or zero, level, lowest_key_objectid)
648 * When a tree block is cow'd in a reference counted root,
649 * new back references are added for all the blocks it points to.
650 * These are of the form (trans->transid will have increased since creation):
652 * (root->root_key.objectid, trans->transid, level, lowest_key_objectid)
654 * Because the lowest_key_objectid and the level are just hints
655 * they are not used when backrefs are deleted. When a backref is deleted:
657 * if backref was for a tree root:
658 * root_objectid = root->root_key.objectid
660 * root_objectid = btrfs_header_owner(parent)
662 * (root_objectid, btrfs_header_generation(parent) or zero, 0, 0)
664 * Back Reference Key hashing:
666 * Back references have four fields, each 64 bits long. Unfortunately,
667 * This is hashed into a single 64 bit number and placed into the key offset.
668 * The key objectid corresponds to the first byte in the extent, and the
669 * key type is set to BTRFS_EXTENT_REF_KEY
671 int btrfs_insert_extent_backref(struct btrfs_trans_handle *trans,
672 struct btrfs_root *root,
673 struct btrfs_path *path, u64 bytenr,
674 u64 root_objectid, u64 ref_generation,
675 u64 owner, u64 owner_offset)
678 struct btrfs_key key;
679 struct btrfs_extent_ref ref;
680 struct btrfs_extent_ref *disk_ref;
683 btrfs_set_stack_ref_root(&ref, root_objectid);
684 btrfs_set_stack_ref_generation(&ref, ref_generation);
685 btrfs_set_stack_ref_objectid(&ref, owner);
686 btrfs_set_stack_ref_offset(&ref, owner_offset);
688 hash = hash_extent_ref(root_objectid, ref_generation, owner,
691 key.objectid = bytenr;
692 key.type = BTRFS_EXTENT_REF_KEY;
694 ret = btrfs_insert_empty_item(trans, root, path, &key, sizeof(ref));
695 while (ret == -EEXIST) {
696 disk_ref = btrfs_item_ptr(path->nodes[0], path->slots[0],
697 struct btrfs_extent_ref);
698 if (match_extent_ref(path->nodes[0], disk_ref, &ref))
701 btrfs_release_path(root, path);
702 ret = btrfs_insert_empty_item(trans, root, path, &key,
707 disk_ref = btrfs_item_ptr(path->nodes[0], path->slots[0],
708 struct btrfs_extent_ref);
709 write_extent_buffer(path->nodes[0], &ref, (unsigned long)disk_ref,
711 btrfs_mark_buffer_dirty(path->nodes[0]);
713 btrfs_release_path(root, path);
717 static int __btrfs_inc_extent_ref(struct btrfs_trans_handle *trans,
718 struct btrfs_root *root,
719 u64 bytenr, u64 num_bytes,
720 u64 root_objectid, u64 ref_generation,
721 u64 owner, u64 owner_offset)
723 struct btrfs_path *path;
725 struct btrfs_key key;
726 struct extent_buffer *l;
727 struct btrfs_extent_item *item;
730 WARN_ON(num_bytes < root->sectorsize);
731 path = btrfs_alloc_path();
736 key.objectid = bytenr;
737 btrfs_set_key_type(&key, BTRFS_EXTENT_ITEM_KEY);
738 key.offset = num_bytes;
739 ret = btrfs_search_slot(trans, root->fs_info->extent_root, &key, path,
748 item = btrfs_item_ptr(l, path->slots[0], struct btrfs_extent_item);
749 refs = btrfs_extent_refs(l, item);
750 btrfs_set_extent_refs(l, item, refs + 1);
751 btrfs_mark_buffer_dirty(path->nodes[0]);
753 btrfs_release_path(root->fs_info->extent_root, path);
756 ret = btrfs_insert_extent_backref(trans, root->fs_info->extent_root,
757 path, bytenr, root_objectid,
758 ref_generation, owner, owner_offset);
760 finish_current_insert(trans, root->fs_info->extent_root);
761 del_pending_extents(trans, root->fs_info->extent_root);
763 btrfs_free_path(path);
767 int btrfs_inc_extent_ref(struct btrfs_trans_handle *trans,
768 struct btrfs_root *root,
769 u64 bytenr, u64 num_bytes,
770 u64 root_objectid, u64 ref_generation,
771 u64 owner, u64 owner_offset)
775 mutex_lock(&root->fs_info->alloc_mutex);
776 ret = __btrfs_inc_extent_ref(trans, root, bytenr, num_bytes,
777 root_objectid, ref_generation,
778 owner, owner_offset);
779 mutex_unlock(&root->fs_info->alloc_mutex);
783 int btrfs_extent_post_op(struct btrfs_trans_handle *trans,
784 struct btrfs_root *root)
786 finish_current_insert(trans, root->fs_info->extent_root);
787 del_pending_extents(trans, root->fs_info->extent_root);
791 static int lookup_extent_ref(struct btrfs_trans_handle *trans,
792 struct btrfs_root *root, u64 bytenr,
793 u64 num_bytes, u32 *refs)
795 struct btrfs_path *path;
797 struct btrfs_key key;
798 struct extent_buffer *l;
799 struct btrfs_extent_item *item;
801 WARN_ON(num_bytes < root->sectorsize);
802 path = btrfs_alloc_path();
804 key.objectid = bytenr;
805 key.offset = num_bytes;
806 btrfs_set_key_type(&key, BTRFS_EXTENT_ITEM_KEY);
807 ret = btrfs_search_slot(trans, root->fs_info->extent_root, &key, path,
812 btrfs_print_leaf(root, path->nodes[0]);
813 printk("failed to find block number %Lu\n", bytenr);
817 item = btrfs_item_ptr(l, path->slots[0], struct btrfs_extent_item);
818 *refs = btrfs_extent_refs(l, item);
820 btrfs_free_path(path);
825 static int get_reference_status(struct btrfs_root *root, u64 bytenr,
826 u64 parent_gen, u64 ref_objectid,
827 u64 *min_generation, u32 *ref_count)
829 struct btrfs_root *extent_root = root->fs_info->extent_root;
830 struct btrfs_path *path;
831 struct extent_buffer *leaf;
832 struct btrfs_extent_ref *ref_item;
833 struct btrfs_key key;
834 struct btrfs_key found_key;
835 u64 root_objectid = root->root_key.objectid;
840 key.objectid = bytenr;
842 key.type = BTRFS_EXTENT_ITEM_KEY;
844 path = btrfs_alloc_path();
845 mutex_lock(&root->fs_info->alloc_mutex);
846 ret = btrfs_search_slot(NULL, extent_root, &key, path, 0, 0);
851 leaf = path->nodes[0];
852 btrfs_item_key_to_cpu(leaf, &found_key, path->slots[0]);
854 if (found_key.objectid != bytenr ||
855 found_key.type != BTRFS_EXTENT_ITEM_KEY) {
861 *min_generation = (u64)-1;
864 leaf = path->nodes[0];
865 nritems = btrfs_header_nritems(leaf);
866 if (path->slots[0] >= nritems) {
867 ret = btrfs_next_leaf(extent_root, path);
874 btrfs_item_key_to_cpu(leaf, &found_key, path->slots[0]);
875 if (found_key.objectid != bytenr)
878 if (found_key.type != BTRFS_EXTENT_REF_KEY) {
883 ref_item = btrfs_item_ptr(leaf, path->slots[0],
884 struct btrfs_extent_ref);
885 ref_generation = btrfs_ref_generation(leaf, ref_item);
887 * For (parent_gen > 0 && parent_gen > ref_gen):
889 * we reach here through the oldest root, therefore
890 * all other reference from same snapshot should have
891 * a larger generation.
893 if ((root_objectid != btrfs_ref_root(leaf, ref_item)) ||
894 (parent_gen > 0 && parent_gen > ref_generation) ||
895 (ref_objectid >= BTRFS_FIRST_FREE_OBJECTID &&
896 ref_objectid != btrfs_ref_objectid(leaf, ref_item))) {
903 if (*min_generation > ref_generation)
904 *min_generation = ref_generation;
910 mutex_unlock(&root->fs_info->alloc_mutex);
911 btrfs_free_path(path);
915 int btrfs_cross_ref_exists(struct btrfs_trans_handle *trans,
916 struct btrfs_root *root,
917 struct btrfs_key *key, u64 bytenr)
919 struct btrfs_root *old_root;
920 struct btrfs_path *path = NULL;
921 struct extent_buffer *eb;
922 struct btrfs_file_extent_item *item;
930 BUG_ON(trans == NULL);
931 BUG_ON(key->type != BTRFS_EXTENT_DATA_KEY);
932 ret = get_reference_status(root, bytenr, 0, key->objectid,
933 &min_generation, &ref_count);
940 old_root = root->dirty_root->root;
941 ref_generation = old_root->root_key.offset;
943 /* all references are created in running transaction */
944 if (min_generation > ref_generation) {
949 path = btrfs_alloc_path();
955 path->skip_locking = 1;
956 /* if no item found, the extent is referenced by other snapshot */
957 ret = btrfs_search_slot(NULL, old_root, key, path, 0, 0);
962 item = btrfs_item_ptr(eb, path->slots[0],
963 struct btrfs_file_extent_item);
964 if (btrfs_file_extent_type(eb, item) != BTRFS_FILE_EXTENT_REG ||
965 btrfs_file_extent_disk_bytenr(eb, item) != bytenr) {
970 for (level = BTRFS_MAX_LEVEL - 1; level >= -1; level--) {
972 eb = path->nodes[level];
975 extent_start = eb->start;
977 extent_start = bytenr;
979 ret = get_reference_status(root, extent_start, ref_generation,
980 0, &min_generation, &ref_count);
984 if (ref_count != 1) {
989 ref_generation = btrfs_header_generation(eb);
994 btrfs_free_path(path);
998 int btrfs_inc_ref(struct btrfs_trans_handle *trans, struct btrfs_root *root,
999 struct extent_buffer *buf, int cache_ref)
1003 struct btrfs_key key;
1004 struct btrfs_file_extent_item *fi;
1009 int nr_file_extents = 0;
1011 if (!root->ref_cows)
1014 level = btrfs_header_level(buf);
1015 nritems = btrfs_header_nritems(buf);
1016 for (i = 0; i < nritems; i++) {
1020 btrfs_item_key_to_cpu(buf, &key, i);
1021 if (btrfs_key_type(&key) != BTRFS_EXTENT_DATA_KEY)
1023 fi = btrfs_item_ptr(buf, i,
1024 struct btrfs_file_extent_item);
1025 if (btrfs_file_extent_type(buf, fi) ==
1026 BTRFS_FILE_EXTENT_INLINE)
1028 disk_bytenr = btrfs_file_extent_disk_bytenr(buf, fi);
1029 if (disk_bytenr == 0)
1032 if (buf != root->commit_root)
1035 mutex_lock(&root->fs_info->alloc_mutex);
1036 ret = __btrfs_inc_extent_ref(trans, root, disk_bytenr,
1037 btrfs_file_extent_disk_num_bytes(buf, fi),
1038 root->root_key.objectid, trans->transid,
1039 key.objectid, key.offset);
1040 mutex_unlock(&root->fs_info->alloc_mutex);
1047 bytenr = btrfs_node_blockptr(buf, i);
1048 btrfs_node_key_to_cpu(buf, &key, i);
1050 mutex_lock(&root->fs_info->alloc_mutex);
1051 ret = __btrfs_inc_extent_ref(trans, root, bytenr,
1052 btrfs_level_size(root, level - 1),
1053 root->root_key.objectid,
1055 level - 1, key.objectid);
1056 mutex_unlock(&root->fs_info->alloc_mutex);
1064 /* cache orignal leaf block's references */
1065 if (level == 0 && cache_ref && buf != root->commit_root) {
1066 struct btrfs_leaf_ref *ref;
1067 struct btrfs_extent_info *info;
1069 ref = btrfs_alloc_leaf_ref(root, nr_file_extents);
1075 ref->root_gen = root->root_key.offset;
1076 ref->bytenr = buf->start;
1077 ref->owner = btrfs_header_owner(buf);
1078 ref->generation = btrfs_header_generation(buf);
1079 ref->nritems = nr_file_extents;
1080 info = ref->extents;
1082 for (i = 0; nr_file_extents > 0 && i < nritems; i++) {
1084 btrfs_item_key_to_cpu(buf, &key, i);
1085 if (btrfs_key_type(&key) != BTRFS_EXTENT_DATA_KEY)
1087 fi = btrfs_item_ptr(buf, i,
1088 struct btrfs_file_extent_item);
1089 if (btrfs_file_extent_type(buf, fi) ==
1090 BTRFS_FILE_EXTENT_INLINE)
1092 disk_bytenr = btrfs_file_extent_disk_bytenr(buf, fi);
1093 if (disk_bytenr == 0)
1096 info->bytenr = disk_bytenr;
1098 btrfs_file_extent_disk_num_bytes(buf, fi);
1099 info->objectid = key.objectid;
1100 info->offset = key.offset;
1104 BUG_ON(!root->ref_tree);
1105 ret = btrfs_add_leaf_ref(root, ref);
1107 btrfs_free_leaf_ref(root, ref);
1114 for (i =0; i < faili; i++) {
1117 btrfs_item_key_to_cpu(buf, &key, i);
1118 if (btrfs_key_type(&key) != BTRFS_EXTENT_DATA_KEY)
1120 fi = btrfs_item_ptr(buf, i,
1121 struct btrfs_file_extent_item);
1122 if (btrfs_file_extent_type(buf, fi) ==
1123 BTRFS_FILE_EXTENT_INLINE)
1125 disk_bytenr = btrfs_file_extent_disk_bytenr(buf, fi);
1126 if (disk_bytenr == 0)
1128 err = btrfs_free_extent(trans, root, disk_bytenr,
1129 btrfs_file_extent_disk_num_bytes(buf,
1133 bytenr = btrfs_node_blockptr(buf, i);
1134 err = btrfs_free_extent(trans, root, bytenr,
1135 btrfs_level_size(root, level - 1), 0);
1143 static int write_one_cache_group(struct btrfs_trans_handle *trans,
1144 struct btrfs_root *root,
1145 struct btrfs_path *path,
1146 struct btrfs_block_group_cache *cache)
1150 struct btrfs_root *extent_root = root->fs_info->extent_root;
1152 struct extent_buffer *leaf;
1154 ret = btrfs_search_slot(trans, extent_root, &cache->key, path, 0, 1);
1159 leaf = path->nodes[0];
1160 bi = btrfs_item_ptr_offset(leaf, path->slots[0]);
1161 write_extent_buffer(leaf, &cache->item, bi, sizeof(cache->item));
1162 btrfs_mark_buffer_dirty(leaf);
1163 btrfs_release_path(extent_root, path);
1165 finish_current_insert(trans, extent_root);
1166 pending_ret = del_pending_extents(trans, extent_root);
1175 int btrfs_write_dirty_block_groups(struct btrfs_trans_handle *trans,
1176 struct btrfs_root *root)
1178 struct extent_io_tree *block_group_cache;
1179 struct btrfs_block_group_cache *cache;
1183 struct btrfs_path *path;
1189 block_group_cache = &root->fs_info->block_group_cache;
1190 path = btrfs_alloc_path();
1194 mutex_lock(&root->fs_info->alloc_mutex);
1196 ret = find_first_extent_bit(block_group_cache, last,
1197 &start, &end, BLOCK_GROUP_DIRTY);
1202 ret = get_state_private(block_group_cache, start, &ptr);
1205 cache = (struct btrfs_block_group_cache *)(unsigned long)ptr;
1206 err = write_one_cache_group(trans, root,
1209 * if we fail to write the cache group, we want
1210 * to keep it marked dirty in hopes that a later
1217 clear_extent_bits(block_group_cache, start, end,
1218 BLOCK_GROUP_DIRTY, GFP_NOFS);
1220 btrfs_free_path(path);
1221 mutex_unlock(&root->fs_info->alloc_mutex);
1225 static struct btrfs_space_info *__find_space_info(struct btrfs_fs_info *info,
1228 struct list_head *head = &info->space_info;
1229 struct list_head *cur;
1230 struct btrfs_space_info *found;
1231 list_for_each(cur, head) {
1232 found = list_entry(cur, struct btrfs_space_info, list);
1233 if (found->flags == flags)
1240 static int update_space_info(struct btrfs_fs_info *info, u64 flags,
1241 u64 total_bytes, u64 bytes_used,
1242 struct btrfs_space_info **space_info)
1244 struct btrfs_space_info *found;
1246 found = __find_space_info(info, flags);
1248 found->total_bytes += total_bytes;
1249 found->bytes_used += bytes_used;
1251 *space_info = found;
1254 found = kmalloc(sizeof(*found), GFP_NOFS);
1258 list_add(&found->list, &info->space_info);
1259 found->flags = flags;
1260 found->total_bytes = total_bytes;
1261 found->bytes_used = bytes_used;
1262 found->bytes_pinned = 0;
1264 found->force_alloc = 0;
1265 *space_info = found;
1269 static void set_avail_alloc_bits(struct btrfs_fs_info *fs_info, u64 flags)
1271 u64 extra_flags = flags & (BTRFS_BLOCK_GROUP_RAID0 |
1272 BTRFS_BLOCK_GROUP_RAID1 |
1273 BTRFS_BLOCK_GROUP_RAID10 |
1274 BTRFS_BLOCK_GROUP_DUP);
1276 if (flags & BTRFS_BLOCK_GROUP_DATA)
1277 fs_info->avail_data_alloc_bits |= extra_flags;
1278 if (flags & BTRFS_BLOCK_GROUP_METADATA)
1279 fs_info->avail_metadata_alloc_bits |= extra_flags;
1280 if (flags & BTRFS_BLOCK_GROUP_SYSTEM)
1281 fs_info->avail_system_alloc_bits |= extra_flags;
1285 static u64 reduce_alloc_profile(struct btrfs_root *root, u64 flags)
1287 u64 num_devices = root->fs_info->fs_devices->num_devices;
1289 if (num_devices == 1)
1290 flags &= ~(BTRFS_BLOCK_GROUP_RAID1 | BTRFS_BLOCK_GROUP_RAID0);
1291 if (num_devices < 4)
1292 flags &= ~BTRFS_BLOCK_GROUP_RAID10;
1294 if ((flags & BTRFS_BLOCK_GROUP_DUP) &&
1295 (flags & (BTRFS_BLOCK_GROUP_RAID1 |
1296 BTRFS_BLOCK_GROUP_RAID10))) {
1297 flags &= ~BTRFS_BLOCK_GROUP_DUP;
1300 if ((flags & BTRFS_BLOCK_GROUP_RAID1) &&
1301 (flags & BTRFS_BLOCK_GROUP_RAID10)) {
1302 flags &= ~BTRFS_BLOCK_GROUP_RAID1;
1305 if ((flags & BTRFS_BLOCK_GROUP_RAID0) &&
1306 ((flags & BTRFS_BLOCK_GROUP_RAID1) |
1307 (flags & BTRFS_BLOCK_GROUP_RAID10) |
1308 (flags & BTRFS_BLOCK_GROUP_DUP)))
1309 flags &= ~BTRFS_BLOCK_GROUP_RAID0;
1313 static int do_chunk_alloc(struct btrfs_trans_handle *trans,
1314 struct btrfs_root *extent_root, u64 alloc_bytes,
1315 u64 flags, int force)
1317 struct btrfs_space_info *space_info;
1323 flags = reduce_alloc_profile(extent_root, flags);
1325 space_info = __find_space_info(extent_root->fs_info, flags);
1327 ret = update_space_info(extent_root->fs_info, flags,
1331 BUG_ON(!space_info);
1333 if (space_info->force_alloc) {
1335 space_info->force_alloc = 0;
1337 if (space_info->full)
1340 thresh = div_factor(space_info->total_bytes, 6);
1342 (space_info->bytes_used + space_info->bytes_pinned + alloc_bytes) <
1346 mutex_lock(&extent_root->fs_info->chunk_mutex);
1347 ret = btrfs_alloc_chunk(trans, extent_root, &start, &num_bytes, flags);
1348 if (ret == -ENOSPC) {
1349 printk("space info full %Lu\n", flags);
1350 space_info->full = 1;
1355 ret = btrfs_make_block_group(trans, extent_root, 0, flags,
1356 BTRFS_FIRST_CHUNK_TREE_OBJECTID, start, num_bytes);
1359 mutex_unlock(&extent_root->fs_info->chunk_mutex);
1364 static int update_block_group(struct btrfs_trans_handle *trans,
1365 struct btrfs_root *root,
1366 u64 bytenr, u64 num_bytes, int alloc,
1369 struct btrfs_block_group_cache *cache;
1370 struct btrfs_fs_info *info = root->fs_info;
1371 u64 total = num_bytes;
1377 WARN_ON(!mutex_is_locked(&root->fs_info->alloc_mutex));
1379 cache = btrfs_lookup_block_group(info, bytenr);
1383 byte_in_group = bytenr - cache->key.objectid;
1384 WARN_ON(byte_in_group > cache->key.offset);
1385 start = cache->key.objectid;
1386 end = start + cache->key.offset - 1;
1387 set_extent_bits(&info->block_group_cache, start, end,
1388 BLOCK_GROUP_DIRTY, GFP_NOFS);
1390 spin_lock(&cache->lock);
1391 old_val = btrfs_block_group_used(&cache->item);
1392 num_bytes = min(total, cache->key.offset - byte_in_group);
1394 old_val += num_bytes;
1395 cache->space_info->bytes_used += num_bytes;
1396 btrfs_set_block_group_used(&cache->item, old_val);
1397 spin_unlock(&cache->lock);
1399 old_val -= num_bytes;
1400 cache->space_info->bytes_used -= num_bytes;
1401 btrfs_set_block_group_used(&cache->item, old_val);
1402 spin_unlock(&cache->lock);
1404 set_extent_dirty(&info->free_space_cache,
1405 bytenr, bytenr + num_bytes - 1,
1410 bytenr += num_bytes;
1415 static u64 first_logical_byte(struct btrfs_root *root, u64 search_start)
1420 ret = find_first_extent_bit(&root->fs_info->block_group_cache,
1421 search_start, &start, &end,
1422 BLOCK_GROUP_DATA | BLOCK_GROUP_METADATA |
1423 BLOCK_GROUP_SYSTEM);
1430 int btrfs_update_pinned_extents(struct btrfs_root *root,
1431 u64 bytenr, u64 num, int pin)
1434 struct btrfs_block_group_cache *cache;
1435 struct btrfs_fs_info *fs_info = root->fs_info;
1437 WARN_ON(!mutex_is_locked(&root->fs_info->alloc_mutex));
1439 set_extent_dirty(&fs_info->pinned_extents,
1440 bytenr, bytenr + num - 1, GFP_NOFS);
1442 clear_extent_dirty(&fs_info->pinned_extents,
1443 bytenr, bytenr + num - 1, GFP_NOFS);
1446 cache = btrfs_lookup_block_group(fs_info, bytenr);
1448 u64 first = first_logical_byte(root, bytenr);
1449 WARN_ON(first < bytenr);
1450 len = min(first - bytenr, num);
1452 len = min(num, cache->key.offset -
1453 (bytenr - cache->key.objectid));
1457 spin_lock(&cache->lock);
1458 cache->pinned += len;
1459 cache->space_info->bytes_pinned += len;
1460 spin_unlock(&cache->lock);
1462 fs_info->total_pinned += len;
1465 spin_lock(&cache->lock);
1466 cache->pinned -= len;
1467 cache->space_info->bytes_pinned -= len;
1468 spin_unlock(&cache->lock);
1470 fs_info->total_pinned -= len;
1478 int btrfs_copy_pinned(struct btrfs_root *root, struct extent_io_tree *copy)
1483 struct extent_io_tree *pinned_extents = &root->fs_info->pinned_extents;
1487 ret = find_first_extent_bit(pinned_extents, last,
1488 &start, &end, EXTENT_DIRTY);
1491 set_extent_dirty(copy, start, end, GFP_NOFS);
1497 int btrfs_finish_extent_commit(struct btrfs_trans_handle *trans,
1498 struct btrfs_root *root,
1499 struct extent_io_tree *unpin)
1504 struct extent_io_tree *free_space_cache;
1505 free_space_cache = &root->fs_info->free_space_cache;
1507 mutex_lock(&root->fs_info->alloc_mutex);
1509 ret = find_first_extent_bit(unpin, 0, &start, &end,
1513 btrfs_update_pinned_extents(root, start, end + 1 - start, 0);
1514 clear_extent_dirty(unpin, start, end, GFP_NOFS);
1515 set_extent_dirty(free_space_cache, start, end, GFP_NOFS);
1516 if (need_resched()) {
1517 mutex_unlock(&root->fs_info->alloc_mutex);
1519 mutex_lock(&root->fs_info->alloc_mutex);
1522 mutex_unlock(&root->fs_info->alloc_mutex);
1526 static int finish_current_insert(struct btrfs_trans_handle *trans,
1527 struct btrfs_root *extent_root)
1531 struct btrfs_fs_info *info = extent_root->fs_info;
1532 struct extent_buffer *eb;
1533 struct btrfs_path *path;
1534 struct btrfs_key ins;
1535 struct btrfs_disk_key first;
1536 struct btrfs_extent_item extent_item;
1541 WARN_ON(!mutex_is_locked(&extent_root->fs_info->alloc_mutex));
1542 btrfs_set_stack_extent_refs(&extent_item, 1);
1543 btrfs_set_key_type(&ins, BTRFS_EXTENT_ITEM_KEY);
1544 path = btrfs_alloc_path();
1547 ret = find_first_extent_bit(&info->extent_ins, 0, &start,
1548 &end, EXTENT_LOCKED);
1552 ins.objectid = start;
1553 ins.offset = end + 1 - start;
1554 err = btrfs_insert_item(trans, extent_root, &ins,
1555 &extent_item, sizeof(extent_item));
1556 clear_extent_bits(&info->extent_ins, start, end, EXTENT_LOCKED,
1559 eb = btrfs_find_create_tree_block(extent_root, ins.objectid,
1562 if (!btrfs_buffer_uptodate(eb, trans->transid))
1563 btrfs_read_buffer(eb, trans->transid);
1565 btrfs_tree_lock(eb);
1566 level = btrfs_header_level(eb);
1568 btrfs_item_key(eb, &first, 0);
1570 btrfs_node_key(eb, &first, 0);
1572 btrfs_tree_unlock(eb);
1573 free_extent_buffer(eb);
1575 * the first key is just a hint, so the race we've created
1576 * against reading it is fine
1578 err = btrfs_insert_extent_backref(trans, extent_root, path,
1579 start, extent_root->root_key.objectid,
1581 btrfs_disk_key_objectid(&first));
1583 if (need_resched()) {
1584 mutex_unlock(&extent_root->fs_info->alloc_mutex);
1586 mutex_lock(&extent_root->fs_info->alloc_mutex);
1589 btrfs_free_path(path);
1593 static int pin_down_bytes(struct btrfs_root *root, u64 bytenr, u32 num_bytes,
1594 int is_data, int pending)
1598 WARN_ON(!mutex_is_locked(&root->fs_info->alloc_mutex));
1600 struct extent_buffer *buf;
1605 buf = btrfs_find_tree_block(root, bytenr, num_bytes);
1607 /* we can reuse a block if it hasn't been written
1608 * and it is from this transaction. We can't
1609 * reuse anything from the tree log root because
1610 * it has tiny sub-transactions.
1612 if (btrfs_buffer_uptodate(buf, 0) &&
1613 btrfs_try_tree_lock(buf)) {
1615 root->fs_info->running_transaction->transid;
1616 u64 header_transid =
1617 btrfs_header_generation(buf);
1618 if (btrfs_header_owner(buf) !=
1619 BTRFS_TREE_LOG_OBJECTID &&
1620 header_transid == transid &&
1621 !btrfs_header_flag(buf,
1622 BTRFS_HEADER_FLAG_WRITTEN)) {
1623 clean_tree_block(NULL, root, buf);
1624 btrfs_tree_unlock(buf);
1625 free_extent_buffer(buf);
1628 btrfs_tree_unlock(buf);
1630 free_extent_buffer(buf);
1633 btrfs_update_pinned_extents(root, bytenr, num_bytes, 1);
1635 set_extent_bits(&root->fs_info->pending_del,
1636 bytenr, bytenr + num_bytes - 1,
1637 EXTENT_LOCKED, GFP_NOFS);
1644 * remove an extent from the root, returns 0 on success
1646 static int __free_extent(struct btrfs_trans_handle *trans, struct btrfs_root
1647 *root, u64 bytenr, u64 num_bytes,
1648 u64 root_objectid, u64 ref_generation,
1649 u64 owner_objectid, u64 owner_offset, int pin,
1652 struct btrfs_path *path;
1653 struct btrfs_key key;
1654 struct btrfs_fs_info *info = root->fs_info;
1655 struct btrfs_root *extent_root = info->extent_root;
1656 struct extent_buffer *leaf;
1658 int extent_slot = 0;
1659 int found_extent = 0;
1661 struct btrfs_extent_item *ei;
1664 WARN_ON(!mutex_is_locked(&root->fs_info->alloc_mutex));
1665 key.objectid = bytenr;
1666 btrfs_set_key_type(&key, BTRFS_EXTENT_ITEM_KEY);
1667 key.offset = num_bytes;
1668 path = btrfs_alloc_path();
1673 ret = lookup_extent_backref(trans, extent_root, path,
1674 bytenr, root_objectid,
1676 owner_objectid, owner_offset, 1);
1678 struct btrfs_key found_key;
1679 extent_slot = path->slots[0];
1680 while(extent_slot > 0) {
1682 btrfs_item_key_to_cpu(path->nodes[0], &found_key,
1684 if (found_key.objectid != bytenr)
1686 if (found_key.type == BTRFS_EXTENT_ITEM_KEY &&
1687 found_key.offset == num_bytes) {
1691 if (path->slots[0] - extent_slot > 5)
1695 ret = btrfs_del_item(trans, extent_root, path);
1697 btrfs_print_leaf(extent_root, path->nodes[0]);
1699 printk("Unable to find ref byte nr %Lu root %Lu "
1700 " gen %Lu owner %Lu offset %Lu\n", bytenr,
1701 root_objectid, ref_generation, owner_objectid,
1704 if (!found_extent) {
1705 btrfs_release_path(extent_root, path);
1706 ret = btrfs_search_slot(trans, extent_root, &key, path, -1, 1);
1710 extent_slot = path->slots[0];
1713 leaf = path->nodes[0];
1714 ei = btrfs_item_ptr(leaf, extent_slot,
1715 struct btrfs_extent_item);
1716 refs = btrfs_extent_refs(leaf, ei);
1719 btrfs_set_extent_refs(leaf, ei, refs);
1721 btrfs_mark_buffer_dirty(leaf);
1723 if (refs == 0 && found_extent && path->slots[0] == extent_slot + 1) {
1724 /* if the back ref and the extent are next to each other
1725 * they get deleted below in one shot
1727 path->slots[0] = extent_slot;
1729 } else if (found_extent) {
1730 /* otherwise delete the extent back ref */
1731 ret = btrfs_del_item(trans, extent_root, path);
1733 /* if refs are 0, we need to setup the path for deletion */
1735 btrfs_release_path(extent_root, path);
1736 ret = btrfs_search_slot(trans, extent_root, &key, path,
1747 #ifdef BIO_RW_DISCARD
1748 u64 map_length = num_bytes;
1749 struct btrfs_multi_bio *multi = NULL;
1753 ret = pin_down_bytes(root, bytenr, num_bytes,
1754 owner_objectid >= BTRFS_FIRST_FREE_OBJECTID, 0);
1760 /* block accounting for super block */
1761 spin_lock_irq(&info->delalloc_lock);
1762 super_used = btrfs_super_bytes_used(&info->super_copy);
1763 btrfs_set_super_bytes_used(&info->super_copy,
1764 super_used - num_bytes);
1765 spin_unlock_irq(&info->delalloc_lock);
1767 /* block accounting for root item */
1768 root_used = btrfs_root_used(&root->root_item);
1769 btrfs_set_root_used(&root->root_item,
1770 root_used - num_bytes);
1771 ret = btrfs_del_items(trans, extent_root, path, path->slots[0],
1776 ret = update_block_group(trans, root, bytenr, num_bytes, 0,
1780 #ifdef BIO_RW_DISCARD
1781 /* Tell the block device(s) that the sectors can be discarded */
1782 ret = btrfs_map_block(&root->fs_info->mapping_tree, READ,
1783 bytenr, &map_length, &multi, 0);
1785 struct btrfs_bio_stripe *stripe = multi->stripes;
1788 if (map_length > num_bytes)
1789 map_length = num_bytes;
1791 for (i = 0; i < multi->num_stripes; i++, stripe++) {
1792 blkdev_issue_discard(stripe->dev->bdev,
1793 stripe->physical >> 9,
1800 btrfs_free_path(path);
1801 finish_current_insert(trans, extent_root);
1806 * find all the blocks marked as pending in the radix tree and remove
1807 * them from the extent map
1809 static int del_pending_extents(struct btrfs_trans_handle *trans, struct
1810 btrfs_root *extent_root)
1816 struct extent_io_tree *pending_del;
1817 struct extent_io_tree *pinned_extents;
1819 WARN_ON(!mutex_is_locked(&extent_root->fs_info->alloc_mutex));
1820 pending_del = &extent_root->fs_info->pending_del;
1821 pinned_extents = &extent_root->fs_info->pinned_extents;
1824 ret = find_first_extent_bit(pending_del, 0, &start, &end,
1828 clear_extent_bits(pending_del, start, end, EXTENT_LOCKED,
1830 if (!test_range_bit(&extent_root->fs_info->extent_ins,
1831 start, end, EXTENT_LOCKED, 0)) {
1832 btrfs_update_pinned_extents(extent_root, start,
1833 end + 1 - start, 1);
1834 ret = __free_extent(trans, extent_root,
1835 start, end + 1 - start,
1836 extent_root->root_key.objectid,
1839 clear_extent_bits(&extent_root->fs_info->extent_ins,
1840 start, end, EXTENT_LOCKED, GFP_NOFS);
1845 if (need_resched()) {
1846 mutex_unlock(&extent_root->fs_info->alloc_mutex);
1848 mutex_lock(&extent_root->fs_info->alloc_mutex);
1855 * remove an extent from the root, returns 0 on success
1857 static int __btrfs_free_extent(struct btrfs_trans_handle *trans,
1858 struct btrfs_root *root, u64 bytenr,
1859 u64 num_bytes, u64 root_objectid,
1860 u64 ref_generation, u64 owner_objectid,
1861 u64 owner_offset, int pin)
1863 struct btrfs_root *extent_root = root->fs_info->extent_root;
1867 WARN_ON(num_bytes < root->sectorsize);
1868 if (!root->ref_cows)
1871 if (root == extent_root) {
1872 pin_down_bytes(root, bytenr, num_bytes, 0, 1);
1875 /* if metadata always pin */
1876 if (owner_objectid < BTRFS_FIRST_FREE_OBJECTID) {
1877 if (root->root_key.objectid == BTRFS_TREE_LOG_OBJECTID) {
1878 /* btrfs_free_reserved_extent */
1879 set_extent_dirty(&root->fs_info->free_space_cache,
1880 bytenr, bytenr + num_bytes - 1, GFP_NOFS);
1886 /* if data pin when any transaction has committed this */
1887 if (ref_generation != trans->transid)
1890 ret = __free_extent(trans, root, bytenr, num_bytes, root_objectid,
1891 ref_generation, owner_objectid, owner_offset,
1894 finish_current_insert(trans, root->fs_info->extent_root);
1895 pending_ret = del_pending_extents(trans, root->fs_info->extent_root);
1896 return ret ? ret : pending_ret;
1899 int btrfs_free_extent(struct btrfs_trans_handle *trans,
1900 struct btrfs_root *root, u64 bytenr,
1901 u64 num_bytes, u64 root_objectid,
1902 u64 ref_generation, u64 owner_objectid,
1903 u64 owner_offset, int pin)
1907 maybe_lock_mutex(root);
1908 ret = __btrfs_free_extent(trans, root, bytenr, num_bytes,
1909 root_objectid, ref_generation,
1910 owner_objectid, owner_offset, pin);
1911 maybe_unlock_mutex(root);
1915 static u64 stripe_align(struct btrfs_root *root, u64 val)
1917 u64 mask = ((u64)root->stripesize - 1);
1918 u64 ret = (val + mask) & ~mask;
1923 * walks the btree of allocated extents and find a hole of a given size.
1924 * The key ins is changed to record the hole:
1925 * ins->objectid == block start
1926 * ins->flags = BTRFS_EXTENT_ITEM_KEY
1927 * ins->offset == number of blocks
1928 * Any available blocks before search_start are skipped.
1930 static int noinline find_free_extent(struct btrfs_trans_handle *trans,
1931 struct btrfs_root *orig_root,
1932 u64 num_bytes, u64 empty_size,
1933 u64 search_start, u64 search_end,
1934 u64 hint_byte, struct btrfs_key *ins,
1935 u64 exclude_start, u64 exclude_nr,
1939 u64 orig_search_start;
1940 struct btrfs_root * root = orig_root->fs_info->extent_root;
1941 struct btrfs_fs_info *info = root->fs_info;
1942 u64 total_needed = num_bytes;
1943 u64 *last_ptr = NULL;
1944 struct btrfs_block_group_cache *block_group;
1947 int chunk_alloc_done = 0;
1948 int empty_cluster = 2 * 1024 * 1024;
1949 int allowed_chunk_alloc = 0;
1951 WARN_ON(num_bytes < root->sectorsize);
1952 btrfs_set_key_type(ins, BTRFS_EXTENT_ITEM_KEY);
1954 if (orig_root->ref_cows || empty_size)
1955 allowed_chunk_alloc = 1;
1957 if (data & BTRFS_BLOCK_GROUP_METADATA) {
1958 last_ptr = &root->fs_info->last_alloc;
1959 empty_cluster = 256 * 1024;
1962 if ((data & BTRFS_BLOCK_GROUP_DATA) && btrfs_test_opt(root, SSD)) {
1963 last_ptr = &root->fs_info->last_data_alloc;
1965 if (root->root_key.objectid == BTRFS_TREE_LOG_OBJECTID) {
1966 last_ptr = &root->fs_info->last_log_alloc;
1967 if (!last_ptr == 0 && root->fs_info->last_alloc) {
1968 *last_ptr = root->fs_info->last_alloc + empty_cluster;
1974 hint_byte = *last_ptr;
1976 empty_size += empty_cluster;
1980 search_start = max(search_start, first_logical_byte(root, 0));
1981 orig_search_start = search_start;
1983 if (search_end == (u64)-1)
1984 search_end = btrfs_super_total_bytes(&info->super_copy);
1987 block_group = btrfs_lookup_first_block_group(info, hint_byte);
1989 hint_byte = search_start;
1990 block_group = btrfs_find_block_group(root, block_group,
1991 hint_byte, data, 1);
1992 if (last_ptr && *last_ptr == 0 && block_group)
1993 hint_byte = block_group->key.objectid;
1995 block_group = btrfs_find_block_group(root,
1997 search_start, data, 1);
1999 search_start = max(search_start, hint_byte);
2001 total_needed += empty_size;
2005 block_group = btrfs_lookup_first_block_group(info,
2008 block_group = btrfs_lookup_first_block_group(info,
2011 if (full_scan && !chunk_alloc_done) {
2012 if (allowed_chunk_alloc) {
2013 do_chunk_alloc(trans, root,
2014 num_bytes + 2 * 1024 * 1024, data, 1);
2015 allowed_chunk_alloc = 0;
2016 } else if (block_group && block_group_bits(block_group, data)) {
2017 block_group->space_info->force_alloc = 1;
2019 chunk_alloc_done = 1;
2021 ret = find_search_start(root, &block_group, &search_start,
2022 total_needed, data);
2023 if (ret == -ENOSPC && last_ptr && *last_ptr) {
2025 block_group = btrfs_lookup_first_block_group(info,
2027 search_start = orig_search_start;
2028 ret = find_search_start(root, &block_group, &search_start,
2029 total_needed, data);
2036 if (last_ptr && *last_ptr && search_start != *last_ptr) {
2039 empty_size += empty_cluster;
2040 total_needed += empty_size;
2042 block_group = btrfs_lookup_first_block_group(info,
2044 search_start = orig_search_start;
2045 ret = find_search_start(root, &block_group,
2046 &search_start, total_needed, data);
2053 search_start = stripe_align(root, search_start);
2054 ins->objectid = search_start;
2055 ins->offset = num_bytes;
2057 if (ins->objectid + num_bytes >= search_end)
2060 if (ins->objectid + num_bytes >
2061 block_group->key.objectid + block_group->key.offset) {
2062 search_start = block_group->key.objectid +
2063 block_group->key.offset;
2067 if (test_range_bit(&info->extent_ins, ins->objectid,
2068 ins->objectid + num_bytes -1, EXTENT_LOCKED, 0)) {
2069 search_start = ins->objectid + num_bytes;
2073 if (test_range_bit(&info->pinned_extents, ins->objectid,
2074 ins->objectid + num_bytes -1, EXTENT_DIRTY, 0)) {
2075 search_start = ins->objectid + num_bytes;
2079 if (exclude_nr > 0 && (ins->objectid + num_bytes > exclude_start &&
2080 ins->objectid < exclude_start + exclude_nr)) {
2081 search_start = exclude_start + exclude_nr;
2085 if (!(data & BTRFS_BLOCK_GROUP_DATA)) {
2086 block_group = btrfs_lookup_block_group(info, ins->objectid);
2088 trans->block_group = block_group;
2090 ins->offset = num_bytes;
2092 *last_ptr = ins->objectid + ins->offset;
2094 btrfs_super_total_bytes(&root->fs_info->super_copy)) {
2101 if (search_start + num_bytes >= search_end) {
2103 search_start = orig_search_start;
2110 total_needed -= empty_size;
2115 block_group = btrfs_lookup_first_block_group(info, search_start);
2117 block_group = btrfs_find_block_group(root, block_group,
2118 search_start, data, 0);
2125 static int __btrfs_reserve_extent(struct btrfs_trans_handle *trans,
2126 struct btrfs_root *root,
2127 u64 num_bytes, u64 min_alloc_size,
2128 u64 empty_size, u64 hint_byte,
2129 u64 search_end, struct btrfs_key *ins,
2133 u64 search_start = 0;
2135 struct btrfs_fs_info *info = root->fs_info;
2138 alloc_profile = info->avail_data_alloc_bits &
2139 info->data_alloc_profile;
2140 data = BTRFS_BLOCK_GROUP_DATA | alloc_profile;
2141 } else if (root == root->fs_info->chunk_root) {
2142 alloc_profile = info->avail_system_alloc_bits &
2143 info->system_alloc_profile;
2144 data = BTRFS_BLOCK_GROUP_SYSTEM | alloc_profile;
2146 alloc_profile = info->avail_metadata_alloc_bits &
2147 info->metadata_alloc_profile;
2148 data = BTRFS_BLOCK_GROUP_METADATA | alloc_profile;
2151 data = reduce_alloc_profile(root, data);
2153 * the only place that sets empty_size is btrfs_realloc_node, which
2154 * is not called recursively on allocations
2156 if (empty_size || root->ref_cows) {
2157 if (!(data & BTRFS_BLOCK_GROUP_METADATA)) {
2158 ret = do_chunk_alloc(trans, root->fs_info->extent_root,
2160 BTRFS_BLOCK_GROUP_METADATA |
2161 (info->metadata_alloc_profile &
2162 info->avail_metadata_alloc_bits), 0);
2165 ret = do_chunk_alloc(trans, root->fs_info->extent_root,
2166 num_bytes + 2 * 1024 * 1024, data, 0);
2170 WARN_ON(num_bytes < root->sectorsize);
2171 ret = find_free_extent(trans, root, num_bytes, empty_size,
2172 search_start, search_end, hint_byte, ins,
2173 trans->alloc_exclude_start,
2174 trans->alloc_exclude_nr, data);
2176 if (ret == -ENOSPC && num_bytes > min_alloc_size) {
2177 num_bytes = num_bytes >> 1;
2178 num_bytes = max(num_bytes, min_alloc_size);
2179 do_chunk_alloc(trans, root->fs_info->extent_root,
2180 num_bytes, data, 1);
2184 printk("allocation failed flags %Lu\n", data);
2187 clear_extent_dirty(&root->fs_info->free_space_cache,
2188 ins->objectid, ins->objectid + ins->offset - 1,
2193 int btrfs_free_reserved_extent(struct btrfs_root *root, u64 start, u64 len)
2195 maybe_lock_mutex(root);
2196 set_extent_dirty(&root->fs_info->free_space_cache,
2197 start, start + len - 1, GFP_NOFS);
2198 maybe_unlock_mutex(root);
2202 int btrfs_reserve_extent(struct btrfs_trans_handle *trans,
2203 struct btrfs_root *root,
2204 u64 num_bytes, u64 min_alloc_size,
2205 u64 empty_size, u64 hint_byte,
2206 u64 search_end, struct btrfs_key *ins,
2210 maybe_lock_mutex(root);
2211 ret = __btrfs_reserve_extent(trans, root, num_bytes, min_alloc_size,
2212 empty_size, hint_byte, search_end, ins,
2214 maybe_unlock_mutex(root);
2218 static int __btrfs_alloc_reserved_extent(struct btrfs_trans_handle *trans,
2219 struct btrfs_root *root,
2220 u64 root_objectid, u64 ref_generation,
2221 u64 owner, u64 owner_offset,
2222 struct btrfs_key *ins)
2228 u64 num_bytes = ins->offset;
2230 struct btrfs_fs_info *info = root->fs_info;
2231 struct btrfs_root *extent_root = info->extent_root;
2232 struct btrfs_extent_item *extent_item;
2233 struct btrfs_extent_ref *ref;
2234 struct btrfs_path *path;
2235 struct btrfs_key keys[2];
2237 /* block accounting for super block */
2238 spin_lock_irq(&info->delalloc_lock);
2239 super_used = btrfs_super_bytes_used(&info->super_copy);
2240 btrfs_set_super_bytes_used(&info->super_copy, super_used + num_bytes);
2241 spin_unlock_irq(&info->delalloc_lock);
2243 /* block accounting for root item */
2244 root_used = btrfs_root_used(&root->root_item);
2245 btrfs_set_root_used(&root->root_item, root_used + num_bytes);
2247 if (root == extent_root) {
2248 set_extent_bits(&root->fs_info->extent_ins, ins->objectid,
2249 ins->objectid + ins->offset - 1,
2250 EXTENT_LOCKED, GFP_NOFS);
2254 memcpy(&keys[0], ins, sizeof(*ins));
2255 keys[1].offset = hash_extent_ref(root_objectid, ref_generation,
2256 owner, owner_offset);
2257 keys[1].objectid = ins->objectid;
2258 keys[1].type = BTRFS_EXTENT_REF_KEY;
2259 sizes[0] = sizeof(*extent_item);
2260 sizes[1] = sizeof(*ref);
2262 path = btrfs_alloc_path();
2265 ret = btrfs_insert_empty_items(trans, extent_root, path, keys,
2269 extent_item = btrfs_item_ptr(path->nodes[0], path->slots[0],
2270 struct btrfs_extent_item);
2271 btrfs_set_extent_refs(path->nodes[0], extent_item, 1);
2272 ref = btrfs_item_ptr(path->nodes[0], path->slots[0] + 1,
2273 struct btrfs_extent_ref);
2275 btrfs_set_ref_root(path->nodes[0], ref, root_objectid);
2276 btrfs_set_ref_generation(path->nodes[0], ref, ref_generation);
2277 btrfs_set_ref_objectid(path->nodes[0], ref, owner);
2278 btrfs_set_ref_offset(path->nodes[0], ref, owner_offset);
2280 btrfs_mark_buffer_dirty(path->nodes[0]);
2282 trans->alloc_exclude_start = 0;
2283 trans->alloc_exclude_nr = 0;
2284 btrfs_free_path(path);
2285 finish_current_insert(trans, extent_root);
2286 pending_ret = del_pending_extents(trans, extent_root);
2296 ret = update_block_group(trans, root, ins->objectid, ins->offset, 1, 0);
2298 printk("update block group failed for %Lu %Lu\n",
2299 ins->objectid, ins->offset);
2306 int btrfs_alloc_reserved_extent(struct btrfs_trans_handle *trans,
2307 struct btrfs_root *root,
2308 u64 root_objectid, u64 ref_generation,
2309 u64 owner, u64 owner_offset,
2310 struct btrfs_key *ins)
2313 maybe_lock_mutex(root);
2314 ret = __btrfs_alloc_reserved_extent(trans, root, root_objectid,
2315 ref_generation, owner,
2317 maybe_unlock_mutex(root);
2322 * this is used by the tree logging recovery code. It records that
2323 * an extent has been allocated and makes sure to clear the free
2324 * space cache bits as well
2326 int btrfs_alloc_logged_extent(struct btrfs_trans_handle *trans,
2327 struct btrfs_root *root,
2328 u64 root_objectid, u64 ref_generation,
2329 u64 owner, u64 owner_offset,
2330 struct btrfs_key *ins)
2333 struct btrfs_block_group_cache *block_group;
2335 maybe_lock_mutex(root);
2336 block_group = btrfs_lookup_block_group(root->fs_info, ins->objectid);
2337 cache_block_group(root, block_group);
2339 clear_extent_dirty(&root->fs_info->free_space_cache,
2340 ins->objectid, ins->objectid + ins->offset - 1,
2342 ret = __btrfs_alloc_reserved_extent(trans, root, root_objectid,
2343 ref_generation, owner,
2345 maybe_unlock_mutex(root);
2350 * finds a free extent and does all the dirty work required for allocation
2351 * returns the key for the extent through ins, and a tree buffer for
2352 * the first block of the extent through buf.
2354 * returns 0 if everything worked, non-zero otherwise.
2356 int btrfs_alloc_extent(struct btrfs_trans_handle *trans,
2357 struct btrfs_root *root,
2358 u64 num_bytes, u64 min_alloc_size,
2359 u64 root_objectid, u64 ref_generation,
2360 u64 owner, u64 owner_offset,
2361 u64 empty_size, u64 hint_byte,
2362 u64 search_end, struct btrfs_key *ins, u64 data)
2366 maybe_lock_mutex(root);
2368 ret = __btrfs_reserve_extent(trans, root, num_bytes,
2369 min_alloc_size, empty_size, hint_byte,
2370 search_end, ins, data);
2372 if (root_objectid != BTRFS_TREE_LOG_OBJECTID) {
2373 ret = __btrfs_alloc_reserved_extent(trans, root, root_objectid,
2374 ref_generation, owner,
2379 maybe_unlock_mutex(root);
2383 struct extent_buffer *btrfs_init_new_buffer(struct btrfs_trans_handle *trans,
2384 struct btrfs_root *root,
2385 u64 bytenr, u32 blocksize)
2387 struct extent_buffer *buf;
2389 buf = btrfs_find_create_tree_block(root, bytenr, blocksize);
2391 return ERR_PTR(-ENOMEM);
2392 btrfs_set_header_generation(buf, trans->transid);
2393 btrfs_tree_lock(buf);
2394 clean_tree_block(trans, root, buf);
2395 btrfs_set_buffer_uptodate(buf);
2396 if (root->root_key.objectid == BTRFS_TREE_LOG_OBJECTID) {
2397 set_extent_dirty(&root->dirty_log_pages, buf->start,
2398 buf->start + buf->len - 1, GFP_NOFS);
2400 set_extent_dirty(&trans->transaction->dirty_pages, buf->start,
2401 buf->start + buf->len - 1, GFP_NOFS);
2403 trans->blocks_used++;
2408 * helper function to allocate a block for a given tree
2409 * returns the tree buffer or NULL.
2411 struct extent_buffer *btrfs_alloc_free_block(struct btrfs_trans_handle *trans,
2412 struct btrfs_root *root,
2421 struct btrfs_key ins;
2423 struct extent_buffer *buf;
2425 ret = btrfs_alloc_extent(trans, root, blocksize, blocksize,
2426 root_objectid, ref_generation,
2427 level, first_objectid, empty_size, hint,
2431 return ERR_PTR(ret);
2434 buf = btrfs_init_new_buffer(trans, root, ins.objectid, blocksize);
2438 int btrfs_drop_leaf_ref(struct btrfs_trans_handle *trans,
2439 struct btrfs_root *root, struct extent_buffer *leaf)
2442 u64 leaf_generation;
2443 struct btrfs_key key;
2444 struct btrfs_file_extent_item *fi;
2449 BUG_ON(!btrfs_is_leaf(leaf));
2450 nritems = btrfs_header_nritems(leaf);
2451 leaf_owner = btrfs_header_owner(leaf);
2452 leaf_generation = btrfs_header_generation(leaf);
2454 for (i = 0; i < nritems; i++) {
2458 btrfs_item_key_to_cpu(leaf, &key, i);
2459 if (btrfs_key_type(&key) != BTRFS_EXTENT_DATA_KEY)
2461 fi = btrfs_item_ptr(leaf, i, struct btrfs_file_extent_item);
2462 if (btrfs_file_extent_type(leaf, fi) ==
2463 BTRFS_FILE_EXTENT_INLINE)
2466 * FIXME make sure to insert a trans record that
2467 * repeats the snapshot del on crash
2469 disk_bytenr = btrfs_file_extent_disk_bytenr(leaf, fi);
2470 if (disk_bytenr == 0)
2473 mutex_lock(&root->fs_info->alloc_mutex);
2474 ret = __btrfs_free_extent(trans, root, disk_bytenr,
2475 btrfs_file_extent_disk_num_bytes(leaf, fi),
2476 leaf_owner, leaf_generation,
2477 key.objectid, key.offset, 0);
2478 mutex_unlock(&root->fs_info->alloc_mutex);
2480 atomic_inc(&root->fs_info->throttle_gen);
2481 wake_up(&root->fs_info->transaction_throttle);
2489 static int noinline cache_drop_leaf_ref(struct btrfs_trans_handle *trans,
2490 struct btrfs_root *root,
2491 struct btrfs_leaf_ref *ref)
2495 struct btrfs_extent_info *info = ref->extents;
2497 for (i = 0; i < ref->nritems; i++) {
2498 mutex_lock(&root->fs_info->alloc_mutex);
2499 ret = __btrfs_free_extent(trans, root,
2500 info->bytenr, info->num_bytes,
2501 ref->owner, ref->generation,
2502 info->objectid, info->offset, 0);
2503 mutex_unlock(&root->fs_info->alloc_mutex);
2505 atomic_inc(&root->fs_info->throttle_gen);
2506 wake_up(&root->fs_info->transaction_throttle);
2516 int drop_snap_lookup_refcount(struct btrfs_root *root, u64 start, u64 len,
2521 ret = lookup_extent_ref(NULL, root, start, len, refs);
2524 #if 0 // some debugging code in case we see problems here
2525 /* if the refs count is one, it won't get increased again. But
2526 * if the ref count is > 1, someone may be decreasing it at
2527 * the same time we are.
2530 struct extent_buffer *eb = NULL;
2531 eb = btrfs_find_create_tree_block(root, start, len);
2533 btrfs_tree_lock(eb);
2535 mutex_lock(&root->fs_info->alloc_mutex);
2536 ret = lookup_extent_ref(NULL, root, start, len, refs);
2538 mutex_unlock(&root->fs_info->alloc_mutex);
2541 btrfs_tree_unlock(eb);
2542 free_extent_buffer(eb);
2545 printk("block %llu went down to one during drop_snap\n",
2546 (unsigned long long)start);
2557 * helper function for drop_snapshot, this walks down the tree dropping ref
2558 * counts as it goes.
2560 static int noinline walk_down_tree(struct btrfs_trans_handle *trans,
2561 struct btrfs_root *root,
2562 struct btrfs_path *path, int *level)
2568 struct extent_buffer *next;
2569 struct extent_buffer *cur;
2570 struct extent_buffer *parent;
2571 struct btrfs_leaf_ref *ref;
2576 WARN_ON(*level < 0);
2577 WARN_ON(*level >= BTRFS_MAX_LEVEL);
2578 ret = drop_snap_lookup_refcount(root, path->nodes[*level]->start,
2579 path->nodes[*level]->len, &refs);
2585 * walk down to the last node level and free all the leaves
2587 while(*level >= 0) {
2588 WARN_ON(*level < 0);
2589 WARN_ON(*level >= BTRFS_MAX_LEVEL);
2590 cur = path->nodes[*level];
2592 if (btrfs_header_level(cur) != *level)
2595 if (path->slots[*level] >=
2596 btrfs_header_nritems(cur))
2599 ret = btrfs_drop_leaf_ref(trans, root, cur);
2603 bytenr = btrfs_node_blockptr(cur, path->slots[*level]);
2604 ptr_gen = btrfs_node_ptr_generation(cur, path->slots[*level]);
2605 blocksize = btrfs_level_size(root, *level - 1);
2607 ret = drop_snap_lookup_refcount(root, bytenr, blocksize, &refs);
2610 parent = path->nodes[*level];
2611 root_owner = btrfs_header_owner(parent);
2612 root_gen = btrfs_header_generation(parent);
2613 path->slots[*level]++;
2615 mutex_lock(&root->fs_info->alloc_mutex);
2616 ret = __btrfs_free_extent(trans, root, bytenr,
2617 blocksize, root_owner,
2620 mutex_unlock(&root->fs_info->alloc_mutex);
2622 atomic_inc(&root->fs_info->throttle_gen);
2623 wake_up(&root->fs_info->transaction_throttle);
2629 * at this point, we have a single ref, and since the
2630 * only place referencing this extent is a dead root
2631 * the reference count should never go higher.
2632 * So, we don't need to check it again
2635 struct btrfs_key key;
2636 btrfs_node_key_to_cpu(cur, &key, path->slots[*level]);
2637 ref = btrfs_lookup_leaf_ref(root, bytenr);
2639 ret = cache_drop_leaf_ref(trans, root, ref);
2641 btrfs_remove_leaf_ref(root, ref);
2642 btrfs_free_leaf_ref(root, ref);
2646 if (printk_ratelimit())
2647 printk("leaf ref miss for bytenr %llu\n",
2648 (unsigned long long)bytenr);
2650 next = btrfs_find_tree_block(root, bytenr, blocksize);
2651 if (!next || !btrfs_buffer_uptodate(next, ptr_gen)) {
2652 free_extent_buffer(next);
2654 next = read_tree_block(root, bytenr, blocksize,
2659 * this is a debugging check and can go away
2660 * the ref should never go all the way down to 1
2663 ret = lookup_extent_ref(NULL, root, bytenr, blocksize,
2669 WARN_ON(*level <= 0);
2670 if (path->nodes[*level-1])
2671 free_extent_buffer(path->nodes[*level-1]);
2672 path->nodes[*level-1] = next;
2673 *level = btrfs_header_level(next);
2674 path->slots[*level] = 0;
2678 WARN_ON(*level < 0);
2679 WARN_ON(*level >= BTRFS_MAX_LEVEL);
2681 if (path->nodes[*level] == root->node) {
2682 parent = path->nodes[*level];
2683 bytenr = path->nodes[*level]->start;
2685 parent = path->nodes[*level + 1];
2686 bytenr = btrfs_node_blockptr(parent, path->slots[*level + 1]);
2689 blocksize = btrfs_level_size(root, *level);
2690 root_owner = btrfs_header_owner(parent);
2691 root_gen = btrfs_header_generation(parent);
2693 mutex_lock(&root->fs_info->alloc_mutex);
2694 ret = __btrfs_free_extent(trans, root, bytenr, blocksize,
2695 root_owner, root_gen, 0, 0, 1);
2696 free_extent_buffer(path->nodes[*level]);
2697 path->nodes[*level] = NULL;
2700 mutex_unlock(&root->fs_info->alloc_mutex);
2707 * helper for dropping snapshots. This walks back up the tree in the path
2708 * to find the first node higher up where we haven't yet gone through
2711 static int noinline walk_up_tree(struct btrfs_trans_handle *trans,
2712 struct btrfs_root *root,
2713 struct btrfs_path *path, int *level)
2717 struct btrfs_root_item *root_item = &root->root_item;
2722 for(i = *level; i < BTRFS_MAX_LEVEL - 1 && path->nodes[i]; i++) {
2723 slot = path->slots[i];
2724 if (slot < btrfs_header_nritems(path->nodes[i]) - 1) {
2725 struct extent_buffer *node;
2726 struct btrfs_disk_key disk_key;
2727 node = path->nodes[i];
2730 WARN_ON(*level == 0);
2731 btrfs_node_key(node, &disk_key, path->slots[i]);
2732 memcpy(&root_item->drop_progress,
2733 &disk_key, sizeof(disk_key));
2734 root_item->drop_level = i;
2737 if (path->nodes[*level] == root->node) {
2738 root_owner = root->root_key.objectid;
2740 btrfs_header_generation(path->nodes[*level]);
2742 struct extent_buffer *node;
2743 node = path->nodes[*level + 1];
2744 root_owner = btrfs_header_owner(node);
2745 root_gen = btrfs_header_generation(node);
2747 ret = btrfs_free_extent(trans, root,
2748 path->nodes[*level]->start,
2749 path->nodes[*level]->len,
2750 root_owner, root_gen, 0, 0, 1);
2752 free_extent_buffer(path->nodes[*level]);
2753 path->nodes[*level] = NULL;
2761 * drop the reference count on the tree rooted at 'snap'. This traverses
2762 * the tree freeing any blocks that have a ref count of zero after being
2765 int btrfs_drop_snapshot(struct btrfs_trans_handle *trans, struct btrfs_root
2771 struct btrfs_path *path;
2774 struct btrfs_root_item *root_item = &root->root_item;
2776 WARN_ON(!mutex_is_locked(&root->fs_info->drop_mutex));
2777 path = btrfs_alloc_path();
2780 level = btrfs_header_level(root->node);
2782 if (btrfs_disk_key_objectid(&root_item->drop_progress) == 0) {
2783 path->nodes[level] = root->node;
2784 extent_buffer_get(root->node);
2785 path->slots[level] = 0;
2787 struct btrfs_key key;
2788 struct btrfs_disk_key found_key;
2789 struct extent_buffer *node;
2791 btrfs_disk_key_to_cpu(&key, &root_item->drop_progress);
2792 level = root_item->drop_level;
2793 path->lowest_level = level;
2794 wret = btrfs_search_slot(NULL, root, &key, path, 0, 0);
2799 node = path->nodes[level];
2800 btrfs_node_key(node, &found_key, path->slots[level]);
2801 WARN_ON(memcmp(&found_key, &root_item->drop_progress,
2802 sizeof(found_key)));
2804 * unlock our path, this is safe because only this
2805 * function is allowed to delete this snapshot
2807 for (i = 0; i < BTRFS_MAX_LEVEL; i++) {
2808 if (path->nodes[i] && path->locks[i]) {
2810 btrfs_tree_unlock(path->nodes[i]);
2815 wret = walk_down_tree(trans, root, path, &level);
2821 wret = walk_up_tree(trans, root, path, &level);
2826 if (trans->transaction->in_commit) {
2830 atomic_inc(&root->fs_info->throttle_gen);
2831 wake_up(&root->fs_info->transaction_throttle);
2833 for (i = 0; i <= orig_level; i++) {
2834 if (path->nodes[i]) {
2835 free_extent_buffer(path->nodes[i]);
2836 path->nodes[i] = NULL;
2840 btrfs_free_path(path);
2844 int btrfs_free_block_groups(struct btrfs_fs_info *info)
2851 mutex_lock(&info->alloc_mutex);
2853 ret = find_first_extent_bit(&info->block_group_cache, 0,
2854 &start, &end, (unsigned int)-1);
2857 ret = get_state_private(&info->block_group_cache, start, &ptr);
2859 kfree((void *)(unsigned long)ptr);
2860 clear_extent_bits(&info->block_group_cache, start,
2861 end, (unsigned int)-1, GFP_NOFS);
2864 ret = find_first_extent_bit(&info->free_space_cache, 0,
2865 &start, &end, EXTENT_DIRTY);
2868 clear_extent_dirty(&info->free_space_cache, start,
2871 mutex_unlock(&info->alloc_mutex);
2875 static unsigned long calc_ra(unsigned long start, unsigned long last,
2878 return min(last, start + nr - 1);
2881 static int noinline relocate_inode_pages(struct inode *inode, u64 start,
2886 unsigned long last_index;
2889 struct extent_io_tree *io_tree = &BTRFS_I(inode)->io_tree;
2890 struct file_ra_state *ra;
2891 unsigned long total_read = 0;
2892 unsigned long ra_pages;
2893 struct btrfs_ordered_extent *ordered;
2894 struct btrfs_trans_handle *trans;
2896 ra = kzalloc(sizeof(*ra), GFP_NOFS);
2898 mutex_lock(&inode->i_mutex);
2899 i = start >> PAGE_CACHE_SHIFT;
2900 last_index = (start + len - 1) >> PAGE_CACHE_SHIFT;
2902 ra_pages = BTRFS_I(inode)->root->fs_info->bdi.ra_pages;
2904 file_ra_state_init(ra, inode->i_mapping);
2906 for (; i <= last_index; i++) {
2907 if (total_read % ra_pages == 0) {
2908 btrfs_force_ra(inode->i_mapping, ra, NULL, i,
2909 calc_ra(i, last_index, ra_pages));
2913 if (((u64)i << PAGE_CACHE_SHIFT) > i_size_read(inode))
2914 goto truncate_racing;
2915 page = grab_cache_page(inode->i_mapping, i);
2919 if (!PageUptodate(page)) {
2920 btrfs_readpage(NULL, page);
2922 if (!PageUptodate(page)) {
2924 page_cache_release(page);
2928 wait_on_page_writeback(page);
2930 page_start = (u64)page->index << PAGE_CACHE_SHIFT;
2931 page_end = page_start + PAGE_CACHE_SIZE - 1;
2932 lock_extent(io_tree, page_start, page_end, GFP_NOFS);
2934 ordered = btrfs_lookup_ordered_extent(inode, page_start);
2936 unlock_extent(io_tree, page_start, page_end, GFP_NOFS);
2938 page_cache_release(page);
2939 btrfs_start_ordered_extent(inode, ordered, 1);
2940 btrfs_put_ordered_extent(ordered);
2943 set_page_extent_mapped(page);
2946 * make sure page_mkwrite is called for this page if userland
2947 * wants to change it from mmap
2949 clear_page_dirty_for_io(page);
2951 btrfs_set_extent_delalloc(inode, page_start, page_end);
2952 set_page_dirty(page);
2954 unlock_extent(io_tree, page_start, page_end, GFP_NOFS);
2956 page_cache_release(page);
2960 /* we have to start the IO in order to get the ordered extents
2961 * instantiated. This allows the relocation to code to wait
2962 * for all the ordered extents to hit the disk.
2964 * Otherwise, it would constantly loop over the same extents
2965 * because the old ones don't get deleted until the IO is
2968 btrfs_fdatawrite_range(inode->i_mapping, start, start + len - 1,
2971 trans = btrfs_start_transaction(BTRFS_I(inode)->root, 1);
2973 btrfs_end_transaction(trans, BTRFS_I(inode)->root);
2974 mark_inode_dirty(inode);
2976 mutex_unlock(&inode->i_mutex);
2980 vmtruncate(inode, inode->i_size);
2981 balance_dirty_pages_ratelimited_nr(inode->i_mapping,
2987 * The back references tell us which tree holds a ref on a block,
2988 * but it is possible for the tree root field in the reference to
2989 * reflect the original root before a snapshot was made. In this
2990 * case we should search through all the children of a given root
2991 * to find potential holders of references on a block.
2993 * Instead, we do something a little less fancy and just search
2994 * all the roots for a given key/block combination.
2996 static int find_root_for_ref(struct btrfs_root *root,
2997 struct btrfs_path *path,
2998 struct btrfs_key *key0,
3001 struct btrfs_root **found_root,
3004 struct btrfs_key root_location;
3005 struct btrfs_root *cur_root = *found_root;
3006 struct btrfs_file_extent_item *file_extent;
3007 u64 root_search_start = BTRFS_FS_TREE_OBJECTID;
3011 root_location.offset = (u64)-1;
3012 root_location.type = BTRFS_ROOT_ITEM_KEY;
3013 path->lowest_level = level;
3016 ret = btrfs_search_slot(NULL, cur_root, key0, path, 0, 0);
3018 if (ret == 0 && file_key) {
3019 struct extent_buffer *leaf = path->nodes[0];
3020 file_extent = btrfs_item_ptr(leaf, path->slots[0],
3021 struct btrfs_file_extent_item);
3022 if (btrfs_file_extent_type(leaf, file_extent) ==
3023 BTRFS_FILE_EXTENT_REG) {
3025 btrfs_file_extent_disk_bytenr(leaf,
3028 } else if (!file_key) {
3029 if (path->nodes[level])
3030 found_bytenr = path->nodes[level]->start;
3033 btrfs_release_path(cur_root, path);
3035 if (found_bytenr == bytenr) {
3036 *found_root = cur_root;
3040 ret = btrfs_search_root(root->fs_info->tree_root,
3041 root_search_start, &root_search_start);
3045 root_location.objectid = root_search_start;
3046 cur_root = btrfs_read_fs_root_no_name(root->fs_info,
3054 path->lowest_level = 0;
3059 * note, this releases the path
3061 static int noinline relocate_one_reference(struct btrfs_root *extent_root,
3062 struct btrfs_path *path,
3063 struct btrfs_key *extent_key,
3064 u64 *last_file_objectid,
3065 u64 *last_file_offset,
3066 u64 *last_file_root,
3069 struct inode *inode;
3070 struct btrfs_root *found_root;
3071 struct btrfs_key root_location;
3072 struct btrfs_key found_key;
3073 struct btrfs_extent_ref *ref;
3081 WARN_ON(!mutex_is_locked(&extent_root->fs_info->alloc_mutex));
3083 ref = btrfs_item_ptr(path->nodes[0], path->slots[0],
3084 struct btrfs_extent_ref);
3085 ref_root = btrfs_ref_root(path->nodes[0], ref);
3086 ref_gen = btrfs_ref_generation(path->nodes[0], ref);
3087 ref_objectid = btrfs_ref_objectid(path->nodes[0], ref);
3088 ref_offset = btrfs_ref_offset(path->nodes[0], ref);
3089 btrfs_release_path(extent_root, path);
3091 root_location.objectid = ref_root;
3093 root_location.offset = 0;
3095 root_location.offset = (u64)-1;
3096 root_location.type = BTRFS_ROOT_ITEM_KEY;
3098 found_root = btrfs_read_fs_root_no_name(extent_root->fs_info,
3100 BUG_ON(!found_root);
3101 mutex_unlock(&extent_root->fs_info->alloc_mutex);
3103 if (ref_objectid >= BTRFS_FIRST_FREE_OBJECTID) {
3104 found_key.objectid = ref_objectid;
3105 found_key.type = BTRFS_EXTENT_DATA_KEY;
3106 found_key.offset = ref_offset;
3109 if (last_extent == extent_key->objectid &&
3110 *last_file_objectid == ref_objectid &&
3111 *last_file_offset == ref_offset &&
3112 *last_file_root == ref_root)
3115 ret = find_root_for_ref(extent_root, path, &found_key,
3116 level, 1, &found_root,
3117 extent_key->objectid);
3122 if (last_extent == extent_key->objectid &&
3123 *last_file_objectid == ref_objectid &&
3124 *last_file_offset == ref_offset &&
3125 *last_file_root == ref_root)
3128 inode = btrfs_iget_locked(extent_root->fs_info->sb,
3129 ref_objectid, found_root);
3130 if (inode->i_state & I_NEW) {
3131 /* the inode and parent dir are two different roots */
3132 BTRFS_I(inode)->root = found_root;
3133 BTRFS_I(inode)->location.objectid = ref_objectid;
3134 BTRFS_I(inode)->location.type = BTRFS_INODE_ITEM_KEY;
3135 BTRFS_I(inode)->location.offset = 0;
3136 btrfs_read_locked_inode(inode);
3137 unlock_new_inode(inode);
3140 /* this can happen if the reference is not against
3141 * the latest version of the tree root
3143 if (is_bad_inode(inode))
3146 *last_file_objectid = inode->i_ino;
3147 *last_file_root = found_root->root_key.objectid;
3148 *last_file_offset = ref_offset;
3150 relocate_inode_pages(inode, ref_offset, extent_key->offset);
3153 struct btrfs_trans_handle *trans;
3154 struct extent_buffer *eb;
3157 eb = read_tree_block(found_root, extent_key->objectid,
3158 extent_key->offset, 0);
3159 btrfs_tree_lock(eb);
3160 level = btrfs_header_level(eb);
3163 btrfs_item_key_to_cpu(eb, &found_key, 0);
3165 btrfs_node_key_to_cpu(eb, &found_key, 0);
3167 btrfs_tree_unlock(eb);
3168 free_extent_buffer(eb);
3170 ret = find_root_for_ref(extent_root, path, &found_key,
3171 level, 0, &found_root,
3172 extent_key->objectid);
3178 * right here almost anything could happen to our key,
3179 * but that's ok. The cow below will either relocate it
3180 * or someone else will have relocated it. Either way,
3181 * it is in a different spot than it was before and
3185 trans = btrfs_start_transaction(found_root, 1);
3187 if (found_root == extent_root->fs_info->extent_root ||
3188 found_root == extent_root->fs_info->chunk_root ||
3189 found_root == extent_root->fs_info->dev_root) {
3191 mutex_lock(&extent_root->fs_info->alloc_mutex);
3194 path->lowest_level = level;
3196 ret = btrfs_search_slot(trans, found_root, &found_key, path,
3198 path->lowest_level = 0;
3199 btrfs_release_path(found_root, path);
3201 if (found_root == found_root->fs_info->extent_root)
3202 btrfs_extent_post_op(trans, found_root);
3204 mutex_unlock(&extent_root->fs_info->alloc_mutex);
3206 btrfs_end_transaction(trans, found_root);
3210 mutex_lock(&extent_root->fs_info->alloc_mutex);
3214 static int noinline del_extent_zero(struct btrfs_root *extent_root,
3215 struct btrfs_path *path,
3216 struct btrfs_key *extent_key)
3219 struct btrfs_trans_handle *trans;
3221 trans = btrfs_start_transaction(extent_root, 1);
3222 ret = btrfs_search_slot(trans, extent_root, extent_key, path, -1, 1);
3229 ret = btrfs_del_item(trans, extent_root, path);
3231 btrfs_end_transaction(trans, extent_root);
3235 static int noinline relocate_one_extent(struct btrfs_root *extent_root,
3236 struct btrfs_path *path,
3237 struct btrfs_key *extent_key)
3239 struct btrfs_key key;
3240 struct btrfs_key found_key;
3241 struct extent_buffer *leaf;
3242 u64 last_file_objectid = 0;
3243 u64 last_file_root = 0;
3244 u64 last_file_offset = (u64)-1;
3245 u64 last_extent = 0;
3250 if (extent_key->objectid == 0) {
3251 ret = del_extent_zero(extent_root, path, extent_key);
3254 key.objectid = extent_key->objectid;
3255 key.type = BTRFS_EXTENT_REF_KEY;
3259 ret = btrfs_search_slot(NULL, extent_root, &key, path, 0, 0);
3265 leaf = path->nodes[0];
3266 nritems = btrfs_header_nritems(leaf);
3267 if (path->slots[0] == nritems) {
3268 ret = btrfs_next_leaf(extent_root, path);
3275 leaf = path->nodes[0];
3278 btrfs_item_key_to_cpu(leaf, &found_key, path->slots[0]);
3279 if (found_key.objectid != extent_key->objectid) {
3283 if (found_key.type != BTRFS_EXTENT_REF_KEY) {
3287 key.offset = found_key.offset + 1;
3288 item_size = btrfs_item_size_nr(leaf, path->slots[0]);
3290 ret = relocate_one_reference(extent_root, path, extent_key,
3291 &last_file_objectid,
3293 &last_file_root, last_extent);
3296 last_extent = extent_key->objectid;
3300 btrfs_release_path(extent_root, path);
3304 static u64 update_block_group_flags(struct btrfs_root *root, u64 flags)
3307 u64 stripped = BTRFS_BLOCK_GROUP_RAID0 |
3308 BTRFS_BLOCK_GROUP_RAID1 | BTRFS_BLOCK_GROUP_RAID10;
3310 num_devices = root->fs_info->fs_devices->num_devices;
3311 if (num_devices == 1) {
3312 stripped |= BTRFS_BLOCK_GROUP_DUP;
3313 stripped = flags & ~stripped;
3315 /* turn raid0 into single device chunks */
3316 if (flags & BTRFS_BLOCK_GROUP_RAID0)
3319 /* turn mirroring into duplication */
3320 if (flags & (BTRFS_BLOCK_GROUP_RAID1 |
3321 BTRFS_BLOCK_GROUP_RAID10))
3322 return stripped | BTRFS_BLOCK_GROUP_DUP;
3325 /* they already had raid on here, just return */
3326 if (flags & stripped)
3329 stripped |= BTRFS_BLOCK_GROUP_DUP;
3330 stripped = flags & ~stripped;
3332 /* switch duplicated blocks with raid1 */
3333 if (flags & BTRFS_BLOCK_GROUP_DUP)
3334 return stripped | BTRFS_BLOCK_GROUP_RAID1;
3336 /* turn single device chunks into raid0 */
3337 return stripped | BTRFS_BLOCK_GROUP_RAID0;
3342 int __alloc_chunk_for_shrink(struct btrfs_root *root,
3343 struct btrfs_block_group_cache *shrink_block_group,
3346 struct btrfs_trans_handle *trans;
3347 u64 new_alloc_flags;
3350 spin_lock(&shrink_block_group->lock);
3351 if (btrfs_block_group_used(&shrink_block_group->item) > 0) {
3352 spin_unlock(&shrink_block_group->lock);
3353 mutex_unlock(&root->fs_info->alloc_mutex);
3355 trans = btrfs_start_transaction(root, 1);
3356 mutex_lock(&root->fs_info->alloc_mutex);
3357 spin_lock(&shrink_block_group->lock);
3359 new_alloc_flags = update_block_group_flags(root,
3360 shrink_block_group->flags);
3361 if (new_alloc_flags != shrink_block_group->flags) {
3363 btrfs_block_group_used(&shrink_block_group->item);
3365 calc = shrink_block_group->key.offset;
3367 spin_unlock(&shrink_block_group->lock);
3369 do_chunk_alloc(trans, root->fs_info->extent_root,
3370 calc + 2 * 1024 * 1024, new_alloc_flags, force);
3372 mutex_unlock(&root->fs_info->alloc_mutex);
3373 btrfs_end_transaction(trans, root);
3374 mutex_lock(&root->fs_info->alloc_mutex);
3376 spin_unlock(&shrink_block_group->lock);
3380 int btrfs_shrink_extent_tree(struct btrfs_root *root, u64 shrink_start)
3382 struct btrfs_trans_handle *trans;
3383 struct btrfs_root *tree_root = root->fs_info->tree_root;
3384 struct btrfs_path *path;
3387 u64 shrink_last_byte;
3388 struct btrfs_block_group_cache *shrink_block_group;
3389 struct btrfs_fs_info *info = root->fs_info;
3390 struct btrfs_key key;
3391 struct btrfs_key found_key;
3392 struct extent_buffer *leaf;
3397 mutex_lock(&root->fs_info->alloc_mutex);
3398 shrink_block_group = btrfs_lookup_block_group(root->fs_info,
3400 BUG_ON(!shrink_block_group);
3402 shrink_last_byte = shrink_block_group->key.objectid +
3403 shrink_block_group->key.offset;
3405 shrink_block_group->space_info->total_bytes -=
3406 shrink_block_group->key.offset;
3407 path = btrfs_alloc_path();
3408 root = root->fs_info->extent_root;
3411 printk("btrfs relocating block group %llu flags %llu\n",
3412 (unsigned long long)shrink_start,
3413 (unsigned long long)shrink_block_group->flags);
3415 __alloc_chunk_for_shrink(root, shrink_block_group, 1);
3419 shrink_block_group->ro = 1;
3423 key.objectid = shrink_start;
3426 cur_byte = key.objectid;
3428 mutex_unlock(&root->fs_info->alloc_mutex);
3430 btrfs_start_delalloc_inodes(root);
3431 btrfs_wait_ordered_extents(tree_root, 0);
3433 mutex_lock(&root->fs_info->alloc_mutex);
3435 ret = btrfs_search_slot(NULL, root, &key, path, 0, 0);
3439 ret = btrfs_previous_item(root, path, 0, BTRFS_EXTENT_ITEM_KEY);
3444 leaf = path->nodes[0];
3445 btrfs_item_key_to_cpu(leaf, &found_key, path->slots[0]);
3446 if (found_key.objectid + found_key.offset > shrink_start &&
3447 found_key.objectid < shrink_last_byte) {
3448 cur_byte = found_key.objectid;
3449 key.objectid = cur_byte;
3452 btrfs_release_path(root, path);
3455 ret = btrfs_search_slot(NULL, root, &key, path, 0, 0);
3460 leaf = path->nodes[0];
3461 nritems = btrfs_header_nritems(leaf);
3462 if (path->slots[0] >= nritems) {
3463 ret = btrfs_next_leaf(root, path);
3470 leaf = path->nodes[0];
3471 nritems = btrfs_header_nritems(leaf);
3474 btrfs_item_key_to_cpu(leaf, &found_key, path->slots[0]);
3476 if (found_key.objectid >= shrink_last_byte)
3479 if (progress && need_resched()) {
3480 memcpy(&key, &found_key, sizeof(key));
3482 btrfs_release_path(root, path);
3483 btrfs_search_slot(NULL, root, &key, path, 0, 0);
3489 if (btrfs_key_type(&found_key) != BTRFS_EXTENT_ITEM_KEY ||
3490 found_key.objectid + found_key.offset <= cur_byte) {
3491 memcpy(&key, &found_key, sizeof(key));
3498 cur_byte = found_key.objectid + found_key.offset;
3499 key.objectid = cur_byte;
3500 btrfs_release_path(root, path);
3501 ret = relocate_one_extent(root, path, &found_key);
3502 __alloc_chunk_for_shrink(root, shrink_block_group, 0);
3505 btrfs_release_path(root, path);
3507 if (total_found > 0) {
3508 printk("btrfs relocate found %llu last extent was %llu\n",
3509 (unsigned long long)total_found,
3510 (unsigned long long)found_key.objectid);
3511 mutex_unlock(&root->fs_info->alloc_mutex);
3512 trans = btrfs_start_transaction(tree_root, 1);
3513 btrfs_commit_transaction(trans, tree_root);
3515 btrfs_clean_old_snapshots(tree_root);
3517 btrfs_start_delalloc_inodes(root);
3518 btrfs_wait_ordered_extents(tree_root, 0);
3520 trans = btrfs_start_transaction(tree_root, 1);
3521 btrfs_commit_transaction(trans, tree_root);
3522 mutex_lock(&root->fs_info->alloc_mutex);
3527 * we've freed all the extents, now remove the block
3528 * group item from the tree
3530 mutex_unlock(&root->fs_info->alloc_mutex);
3532 trans = btrfs_start_transaction(root, 1);
3534 mutex_lock(&root->fs_info->alloc_mutex);
3535 memcpy(&key, &shrink_block_group->key, sizeof(key));
3537 ret = btrfs_search_slot(trans, root, &key, path, -1, 1);
3541 btrfs_end_transaction(trans, root);
3545 clear_extent_bits(&info->block_group_cache, key.objectid,
3546 key.objectid + key.offset - 1,
3547 (unsigned int)-1, GFP_NOFS);
3550 clear_extent_bits(&info->free_space_cache,
3551 key.objectid, key.objectid + key.offset - 1,
3552 (unsigned int)-1, GFP_NOFS);
3555 memset(shrink_block_group, 0, sizeof(*shrink_block_group));
3556 kfree(shrink_block_group);
3559 btrfs_del_item(trans, root, path);
3560 btrfs_release_path(root, path);
3561 mutex_unlock(&root->fs_info->alloc_mutex);
3562 btrfs_commit_transaction(trans, root);
3564 mutex_lock(&root->fs_info->alloc_mutex);
3566 /* the code to unpin extents might set a few bits in the free
3567 * space cache for this range again
3569 clear_extent_bits(&info->free_space_cache,
3570 key.objectid, key.objectid + key.offset - 1,
3571 (unsigned int)-1, GFP_NOFS);
3573 btrfs_free_path(path);
3574 mutex_unlock(&root->fs_info->alloc_mutex);
3578 int find_first_block_group(struct btrfs_root *root, struct btrfs_path *path,
3579 struct btrfs_key *key)
3582 struct btrfs_key found_key;
3583 struct extent_buffer *leaf;
3586 ret = btrfs_search_slot(NULL, root, key, path, 0, 0);
3591 slot = path->slots[0];
3592 leaf = path->nodes[0];
3593 if (slot >= btrfs_header_nritems(leaf)) {
3594 ret = btrfs_next_leaf(root, path);
3601 btrfs_item_key_to_cpu(leaf, &found_key, slot);
3603 if (found_key.objectid >= key->objectid &&
3604 found_key.type == BTRFS_BLOCK_GROUP_ITEM_KEY) {
3615 int btrfs_read_block_groups(struct btrfs_root *root)
3617 struct btrfs_path *path;
3620 struct btrfs_block_group_cache *cache;
3621 struct btrfs_fs_info *info = root->fs_info;
3622 struct btrfs_space_info *space_info;
3623 struct extent_io_tree *block_group_cache;
3624 struct btrfs_key key;
3625 struct btrfs_key found_key;
3626 struct extent_buffer *leaf;
3628 block_group_cache = &info->block_group_cache;
3629 root = info->extent_root;
3632 btrfs_set_key_type(&key, BTRFS_BLOCK_GROUP_ITEM_KEY);
3633 path = btrfs_alloc_path();
3637 mutex_lock(&root->fs_info->alloc_mutex);
3639 ret = find_first_block_group(root, path, &key);
3647 leaf = path->nodes[0];
3648 btrfs_item_key_to_cpu(leaf, &found_key, path->slots[0]);
3649 cache = kzalloc(sizeof(*cache), GFP_NOFS);
3655 spin_lock_init(&cache->lock);
3656 read_extent_buffer(leaf, &cache->item,
3657 btrfs_item_ptr_offset(leaf, path->slots[0]),
3658 sizeof(cache->item));
3659 memcpy(&cache->key, &found_key, sizeof(found_key));
3661 key.objectid = found_key.objectid + found_key.offset;
3662 btrfs_release_path(root, path);
3663 cache->flags = btrfs_block_group_flags(&cache->item);
3665 if (cache->flags & BTRFS_BLOCK_GROUP_DATA) {
3666 bit = BLOCK_GROUP_DATA;
3667 } else if (cache->flags & BTRFS_BLOCK_GROUP_SYSTEM) {
3668 bit = BLOCK_GROUP_SYSTEM;
3669 } else if (cache->flags & BTRFS_BLOCK_GROUP_METADATA) {
3670 bit = BLOCK_GROUP_METADATA;
3672 set_avail_alloc_bits(info, cache->flags);
3674 ret = update_space_info(info, cache->flags, found_key.offset,
3675 btrfs_block_group_used(&cache->item),
3678 cache->space_info = space_info;
3680 /* use EXTENT_LOCKED to prevent merging */
3681 set_extent_bits(block_group_cache, found_key.objectid,
3682 found_key.objectid + found_key.offset - 1,
3683 EXTENT_LOCKED, GFP_NOFS);
3684 set_state_private(block_group_cache, found_key.objectid,
3685 (unsigned long)cache);
3686 set_extent_bits(block_group_cache, found_key.objectid,
3687 found_key.objectid + found_key.offset - 1,
3688 bit | EXTENT_LOCKED, GFP_NOFS);
3690 btrfs_super_total_bytes(&info->super_copy))
3695 btrfs_free_path(path);
3696 mutex_unlock(&root->fs_info->alloc_mutex);
3700 int btrfs_make_block_group(struct btrfs_trans_handle *trans,
3701 struct btrfs_root *root, u64 bytes_used,
3702 u64 type, u64 chunk_objectid, u64 chunk_offset,
3707 struct btrfs_root *extent_root;
3708 struct btrfs_block_group_cache *cache;
3709 struct extent_io_tree *block_group_cache;
3711 WARN_ON(!mutex_is_locked(&root->fs_info->alloc_mutex));
3712 extent_root = root->fs_info->extent_root;
3713 block_group_cache = &root->fs_info->block_group_cache;
3715 root->fs_info->last_trans_new_blockgroup = trans->transid;
3717 cache = kzalloc(sizeof(*cache), GFP_NOFS);
3719 cache->key.objectid = chunk_offset;
3720 cache->key.offset = size;
3721 spin_lock_init(&cache->lock);
3722 btrfs_set_key_type(&cache->key, BTRFS_BLOCK_GROUP_ITEM_KEY);
3724 btrfs_set_block_group_used(&cache->item, bytes_used);
3725 btrfs_set_block_group_chunk_objectid(&cache->item, chunk_objectid);
3726 cache->flags = type;
3727 btrfs_set_block_group_flags(&cache->item, type);
3729 ret = update_space_info(root->fs_info, cache->flags, size, bytes_used,
3730 &cache->space_info);
3733 bit = block_group_state_bits(type);
3734 set_extent_bits(block_group_cache, chunk_offset,
3735 chunk_offset + size - 1,
3736 EXTENT_LOCKED, GFP_NOFS);
3737 set_state_private(block_group_cache, chunk_offset,
3738 (unsigned long)cache);
3739 set_extent_bits(block_group_cache, chunk_offset,
3740 chunk_offset + size - 1,
3741 bit | EXTENT_LOCKED, GFP_NOFS);
3743 ret = btrfs_insert_item(trans, extent_root, &cache->key, &cache->item,
3744 sizeof(cache->item));
3747 finish_current_insert(trans, extent_root);
3748 ret = del_pending_extents(trans, extent_root);
3750 set_avail_alloc_bits(extent_root->fs_info, type);