#include <linux/pagemap.h>
#include <linux/writeback.h>
#include <linux/blkdev.h>
+#include <linux/version.h>
+#include "compat.h"
#include "hash.h"
#include "crc32c.h"
#include "ctree.h"
#include "volumes.h"
#include "locking.h"
#include "ref-cache.h"
+#include "compat.h"
#define PENDING_EXTENT_INSERT 0
#define PENDING_EXTENT_DELETE 1
btrfs_root *extent_root, int all);
static int del_pending_extents(struct btrfs_trans_handle *trans, struct
btrfs_root *extent_root, int all);
-static struct btrfs_block_group_cache *
-__btrfs_find_block_group(struct btrfs_root *root,
- struct btrfs_block_group_cache *hint,
- u64 search_start, int data, int owner);
static int pin_down_bytes(struct btrfs_trans_handle *trans,
struct btrfs_root *root,
u64 bytenr, u64 num_bytes, int is_data);
* this adds the block group to the fs_info rb tree for the block group
* cache
*/
-int btrfs_add_block_group_cache(struct btrfs_fs_info *info,
+static int btrfs_add_block_group_cache(struct btrfs_fs_info *info,
struct btrfs_block_group_cache *block_group)
{
struct rb_node **p;
break;
}
}
+ if (ret)
+ atomic_inc(&ret->count);
spin_unlock(&info->block_group_cache_lock);
return ret;
start = extent_end + 1;
} else if (extent_start > start && extent_start < end) {
size = extent_start - start;
- ret = btrfs_add_free_space_lock(block_group, start,
- size);
+ ret = btrfs_add_free_space(block_group, start,
+ size);
BUG_ON(ret);
start = extent_end + 1;
} else {
if (start < end) {
size = end - start;
- ret = btrfs_add_free_space_lock(block_group, start, size);
+ ret = btrfs_add_free_space(block_group, start, size);
BUG_ON(ret);
}
mutex_unlock(&info->pinned_mutex);
return 0;
}
+static int remove_sb_from_cache(struct btrfs_root *root,
+ struct btrfs_block_group_cache *cache)
+{
+ u64 bytenr;
+ u64 *logical;
+ int stripe_len;
+ int i, nr, ret;
+
+ for (i = 0; i < BTRFS_SUPER_MIRROR_MAX; i++) {
+ bytenr = btrfs_sb_offset(i);
+ ret = btrfs_rmap_block(&root->fs_info->mapping_tree,
+ cache->key.objectid, bytenr, 0,
+ &logical, &nr, &stripe_len);
+ BUG_ON(ret);
+ while (nr--) {
+ btrfs_remove_free_space(cache, logical[nr],
+ stripe_len);
+ }
+ kfree(logical);
+ }
+ return 0;
+}
+
static int cache_block_group(struct btrfs_root *root,
struct btrfs_block_group_cache *block_group)
{
struct btrfs_key key;
struct extent_buffer *leaf;
int slot;
- u64 last = 0;
- u64 first_free;
- int found = 0;
+ u64 last = block_group->key.objectid;
if (!block_group)
return 0;
* skip the locking here
*/
path->skip_locking = 1;
- first_free = max_t(u64, block_group->key.objectid,
- BTRFS_SUPER_INFO_OFFSET + BTRFS_SUPER_INFO_SIZE);
- key.objectid = block_group->key.objectid;
+ key.objectid = max_t(u64, last, BTRFS_SUPER_INFO_OFFSET);
key.offset = 0;
btrfs_set_key_type(&key, BTRFS_EXTENT_ITEM_KEY);
ret = btrfs_search_slot(NULL, root, &key, path, 0, 0);
if (ret < 0)
goto err;
- ret = btrfs_previous_item(root, path, 0, BTRFS_EXTENT_ITEM_KEY);
- if (ret < 0)
- goto err;
- if (ret == 0) {
- leaf = path->nodes[0];
- btrfs_item_key_to_cpu(leaf, &key, path->slots[0]);
- if (key.objectid + key.offset > first_free)
- first_free = key.objectid + key.offset;
- }
+
while(1) {
leaf = path->nodes[0];
slot = path->slots[0];
break;
if (btrfs_key_type(&key) == BTRFS_EXTENT_ITEM_KEY) {
- if (!found) {
- last = first_free;
- found = 1;
- }
-
add_new_free_space(block_group, root->fs_info, last,
key.objectid);
path->slots[0]++;
}
- if (!found)
- last = first_free;
-
add_new_free_space(block_group, root->fs_info, last,
block_group->key.objectid +
block_group->key.offset);
+ remove_sb_from_cache(root, block_group);
block_group->cached = 1;
ret = 0;
err:
/*
* return the block group that starts at or after bytenr
*/
-struct btrfs_block_group_cache *btrfs_lookup_first_block_group(struct
+static struct btrfs_block_group_cache *btrfs_lookup_first_block_group(struct
btrfs_fs_info *info,
u64 bytenr)
{
return cache;
}
+static inline void put_block_group(struct btrfs_block_group_cache *cache)
+{
+ if (atomic_dec_and_test(&cache->count))
+ kfree(cache);
+}
+
static struct btrfs_space_info *__find_space_info(struct btrfs_fs_info *info,
u64 flags)
{
return num;
}
-static struct btrfs_block_group_cache *
-__btrfs_find_block_group(struct btrfs_root *root,
- struct btrfs_block_group_cache *hint,
- u64 search_start, int data, int owner)
+u64 btrfs_find_block_group(struct btrfs_root *root,
+ u64 search_start, u64 search_hint, int owner)
{
struct btrfs_block_group_cache *cache;
- struct btrfs_block_group_cache *found_group = NULL;
- struct btrfs_fs_info *info = root->fs_info;
u64 used;
- u64 last = 0;
- u64 free_check;
+ u64 last = max(search_hint, search_start);
+ u64 group_start = 0;
int full_search = 0;
- int factor = 10;
+ int factor = 9;
int wrapped = 0;
-
- if (data & BTRFS_BLOCK_GROUP_METADATA)
- factor = 9;
-
- if (search_start) {
- struct btrfs_block_group_cache *shint;
- shint = btrfs_lookup_first_block_group(info, search_start);
- if (shint && block_group_bits(shint, data)) {
- spin_lock(&shint->lock);
- used = btrfs_block_group_used(&shint->item);
- if (used + shint->pinned + shint->reserved <
- div_factor(shint->key.offset, factor)) {
- spin_unlock(&shint->lock);
- return shint;
- }
- spin_unlock(&shint->lock);
- }
- }
- if (hint && block_group_bits(hint, data)) {
- spin_lock(&hint->lock);
- used = btrfs_block_group_used(&hint->item);
- if (used + hint->pinned + hint->reserved <
- div_factor(hint->key.offset, factor)) {
- spin_unlock(&hint->lock);
- return hint;
- }
- spin_unlock(&hint->lock);
- last = hint->key.objectid + hint->key.offset;
- } else {
- if (hint)
- last = max(hint->key.objectid, search_start);
- else
- last = search_start;
- }
again:
while (1) {
cache = btrfs_lookup_first_block_group(root->fs_info, last);
last = cache->key.objectid + cache->key.offset;
used = btrfs_block_group_used(&cache->item);
- if (block_group_bits(cache, data)) {
- free_check = div_factor(cache->key.offset, factor);
+ if ((full_search || !cache->ro) &&
+ block_group_bits(cache, BTRFS_BLOCK_GROUP_METADATA)) {
if (used + cache->pinned + cache->reserved <
- free_check) {
- found_group = cache;
+ div_factor(cache->key.offset, factor)) {
+ group_start = cache->key.objectid;
spin_unlock(&cache->lock);
+ put_block_group(cache);
goto found;
}
}
spin_unlock(&cache->lock);
+ put_block_group(cache);
cond_resched();
}
if (!wrapped) {
goto again;
}
found:
- return found_group;
-}
-
-struct btrfs_block_group_cache *btrfs_find_block_group(struct btrfs_root *root,
- struct btrfs_block_group_cache
- *hint, u64 search_start,
- int data, int owner)
-{
-
- struct btrfs_block_group_cache *ret;
- ret = __btrfs_find_block_group(root, hint, search_start, data, owner);
- return ret;
+ return group_start;
}
/* simple helper to search for an existing extent at a given offset */
return ret;
}
+#ifdef BIO_RW_DISCARD
+static void btrfs_issue_discard(struct block_device *bdev,
+ u64 start, u64 len)
+{
+#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,28)
+ blkdev_issue_discard(bdev, start >> 9, len >> 9, GFP_KERNEL);
+#else
+ blkdev_issue_discard(bdev, start >> 9, len >> 9);
+#endif
+}
+#endif
+
static int noinline free_extents(struct btrfs_trans_handle *trans,
struct btrfs_root *extent_root,
struct list_head *del_list)
BUG_ON(ret);
#ifdef BIO_RW_DISCARD
+ map_length = tmp->num_bytes;
ret = btrfs_map_block(&info->mapping_tree, READ,
tmp->bytenr, &map_length, &multi,
0);
struct btrfs_bio_stripe *stripe;
int i;
- stripe = multi->stripe;
+ stripe = multi->stripes;
if (map_length > tmp->num_bytes)
map_length = tmp->num_bytes;
for (i = 0; i < multi->num_stripes;
i++, stripe++)
- blkdev_issue_discard(stripe->dev->bdev,
- stripe->physical >> 9,
- map_length >> 9);
+ btrfs_issue_discard(stripe->dev->bdev,
+ stripe->physical,
+ map_length);
kfree(multi);
}
#endif
return werr;
}
+int btrfs_extent_readonly(struct btrfs_root *root, u64 bytenr)
+{
+ struct btrfs_block_group_cache *block_group;
+ int readonly = 0;
+
+ block_group = btrfs_lookup_block_group(root->fs_info, bytenr);
+ if (!block_group || block_group->ro)
+ readonly = 1;
+ if (block_group)
+ put_block_group(block_group);
+ return readonly;
+}
+
static int update_space_info(struct btrfs_fs_info *info, u64 flags,
u64 total_bytes, u64 bytes_used,
struct btrfs_space_info **space_info)
if (alloc) {
old_val += num_bytes;
cache->space_info->bytes_used += num_bytes;
- if (cache->ro) {
+ if (cache->ro)
cache->space_info->bytes_readonly -= num_bytes;
- WARN_ON(1);
- }
btrfs_set_block_group_used(&cache->item, old_val);
spin_unlock(&cache->lock);
spin_unlock(&cache->space_info->lock);
int ret;
ret = btrfs_add_free_space(cache, bytenr,
num_bytes);
- if (ret)
- return -1;
+ WARN_ON(ret);
}
}
+ put_block_group(cache);
total -= num_bytes;
bytenr += num_bytes;
}
static u64 first_logical_byte(struct btrfs_root *root, u64 search_start)
{
struct btrfs_block_group_cache *cache;
+ u64 bytenr;
cache = btrfs_lookup_first_block_group(root->fs_info, search_start);
if (!cache)
return 0;
- return cache->key.objectid;
+ bytenr = cache->key.objectid;
+ put_block_group(cache);
+
+ return bytenr;
}
int btrfs_update_pinned_extents(struct btrfs_root *root,
spin_unlock(&cache->lock);
spin_unlock(&cache->space_info->lock);
fs_info->total_pinned -= len;
+ if (cache->cached)
+ btrfs_add_free_space(cache, bytenr, len);
}
+ put_block_group(cache);
bytenr += len;
num -= len;
}
}
spin_unlock(&cache->lock);
spin_unlock(&cache->space_info->lock);
+ put_block_group(cache);
bytenr += len;
num -= len;
}
u64 start;
u64 end;
int ret;
- struct btrfs_block_group_cache *cache;
mutex_lock(&root->fs_info->pinned_mutex);
while(1) {
break;
btrfs_update_pinned_extents(root, start, end + 1 - start, 0);
clear_extent_dirty(unpin, start, end, GFP_NOFS);
- cache = btrfs_lookup_block_group(root->fs_info, start);
- if (cache->cached)
- btrfs_add_free_space(cache, start, end - start + 1);
if (need_resched()) {
mutex_unlock(&root->fs_info->pinned_mutex);
cond_resched();
mark_free = 1;
BUG_ON(ret < 0);
}
-
/* block accounting for super block */
spin_lock_irq(&info->delalloc_lock);
super_used = btrfs_super_bytes_used(&info->super_copy);
mark_free);
BUG_ON(ret);
+ if (owner_objectid >= BTRFS_FIRST_FREE_OBJECTID) {
+ ret = btrfs_del_csums(trans, root, bytenr, num_bytes);
+ BUG_ON(ret);
+ }
+
#ifdef BIO_RW_DISCARD
/* Tell the block device(s) that the sectors can be discarded */
ret = btrfs_map_block(&root->fs_info->mapping_tree, READ,
map_length = num_bytes;
for (i = 0; i < multi->num_stripes; i++, stripe++) {
- blkdev_issue_discard(stripe->dev->bdev,
- stripe->physical >> 9,
- map_length >> 9);
+ btrfs_issue_discard(stripe->dev->bdev,
+ stripe->physical,
+ map_length);
}
kfree(multi);
}
cache = btrfs_lookup_block_group(root->fs_info, bytenr);
BUG_ON(!cache);
btrfs_add_free_space(cache, bytenr, num_bytes);
+ put_block_group(cache);
update_reserved_extents(root, bytenr, num_bytes, 0);
return 0;
}
if (!block_group)
goto new_group_no_lock;
+ if (unlikely(!block_group->cached)) {
+ mutex_lock(&block_group->cache_mutex);
+ ret = cache_block_group(root, block_group);
+ mutex_unlock(&block_group->cache_mutex);
+ if (ret)
+ break;
+ }
+
mutex_lock(&block_group->alloc_mutex);
if (unlikely(!block_group_bits(block_group, data)))
goto new_group;
- ret = cache_block_group(root, block_group);
- if (ret) {
- mutex_unlock(&block_group->alloc_mutex);
- break;
- }
-
- if (block_group->ro)
+ if (unlikely(block_group->ro))
goto new_group;
free_space = btrfs_find_free_space(block_group, search_start,
}
new_group:
mutex_unlock(&block_group->alloc_mutex);
+ put_block_group(block_group);
+ block_group = NULL;
new_group_no_lock:
/* don't try to compare new allocations against the
* last allocation any more
block_group = list_entry(cur, struct btrfs_block_group_cache,
list);
+ atomic_inc(&block_group->count);
+
search_start = block_group->key.objectid;
cur = cur->next;
}
/* we found what we needed */
if (ins->objectid) {
if (!(data & BTRFS_BLOCK_GROUP_DATA))
- trans->block_group = block_group;
+ trans->block_group = block_group->key.objectid;
if (last_ptr)
*last_ptr = ins->objectid + ins->offset;
loop, allowed_chunk_alloc);
ret = -ENOSPC;
}
+ if (block_group)
+ put_block_group(block_group);
up_read(&space_info->groups_sem);
return ret;
return -ENOSPC;
}
btrfs_add_free_space(cache, start, len);
+ put_block_group(cache);
update_reserved_extents(root, start, len, 0);
return 0;
}
struct btrfs_block_group_cache *block_group;
block_group = btrfs_lookup_block_group(root->fs_info, ins->objectid);
- mutex_lock(&block_group->alloc_mutex);
+ mutex_lock(&block_group->cache_mutex);
cache_block_group(root, block_group);
+ mutex_unlock(&block_group->cache_mutex);
- ret = btrfs_remove_free_space_lock(block_group, ins->objectid,
- ins->offset);
- mutex_unlock(&block_group->alloc_mutex);
+ ret = btrfs_remove_free_space(block_group, ins->objectid,
+ ins->offset);
BUG_ON(ret);
+ put_block_group(block_group);
ret = __btrfs_alloc_reserved_extent(trans, root, parent, root_objectid,
ref_generation, owner, ins);
return ret;
return 0;
}
-int drop_snap_lookup_refcount(struct btrfs_root *root, u64 start, u64 len,
+static int drop_snap_lookup_refcount(struct btrfs_root *root, u64 start, u64 len,
u32 *refs)
{
int ret;
root_objectid == BTRFS_EXTENT_TREE_OBJECTID ||
root_objectid == BTRFS_CHUNK_TREE_OBJECTID ||
root_objectid == BTRFS_DEV_TREE_OBJECTID ||
- root_objectid == BTRFS_TREE_LOG_OBJECTID)
+ root_objectid == BTRFS_TREE_LOG_OBJECTID ||
+ root_objectid == BTRFS_CSUM_TREE_OBJECTID)
return 1;
return 0;
}
return flags;
}
-int __alloc_chunk_for_shrink(struct btrfs_root *root,
+static int __alloc_chunk_for_shrink(struct btrfs_root *root,
struct btrfs_block_group_cache *shrink_block_group,
int force)
{
btrfs_set_inode_generation(leaf, item, 1);
btrfs_set_inode_size(leaf, item, size);
btrfs_set_inode_mode(leaf, item, S_IFREG | 0600);
- btrfs_set_inode_flags(leaf, item, BTRFS_INODE_NODATASUM |
- BTRFS_INODE_NOCOMPRESS);
+ btrfs_set_inode_flags(leaf, item, BTRFS_INODE_NOCOMPRESS);
btrfs_mark_buffer_dirty(leaf);
btrfs_release_path(root, path);
out:
WARN_ON(block_group->reserved > 0);
WARN_ON(btrfs_block_group_used(&block_group->item) > 0);
spin_unlock(&block_group->lock);
+ put_block_group(block_group);
ret = 0;
out:
btrfs_free_path(path);
return ret;
}
-int find_first_block_group(struct btrfs_root *root, struct btrfs_path *path,
- struct btrfs_key *key)
+static int find_first_block_group(struct btrfs_root *root,
+ struct btrfs_path *path, struct btrfs_key *key)
{
int ret = 0;
struct btrfs_key found_key;
down_write(&block_group->space_info->groups_sem);
list_del(&block_group->list);
up_write(&block_group->space_info->groups_sem);
+
+ WARN_ON(atomic_read(&block_group->count) != 1);
kfree(block_group);
spin_lock(&info->block_group_cache_lock);
break;
}
+ atomic_set(&cache->count, 1);
spin_lock_init(&cache->lock);
mutex_init(&cache->alloc_mutex);
+ mutex_init(&cache->cache_mutex);
INIT_LIST_HEAD(&cache->list);
read_extent_buffer(leaf, &cache->item,
btrfs_item_ptr_offset(leaf, path->slots[0]),
cache->key.objectid = chunk_offset;
cache->key.offset = size;
+ cache->key.type = BTRFS_BLOCK_GROUP_ITEM_KEY;
+ atomic_set(&cache->count, 1);
spin_lock_init(&cache->lock);
mutex_init(&cache->alloc_mutex);
+ mutex_init(&cache->cache_mutex);
INIT_LIST_HEAD(&cache->list);
- btrfs_set_key_type(&cache->key, BTRFS_BLOCK_GROUP_ITEM_KEY);
btrfs_set_block_group_used(&cache->item, bytes_used);
btrfs_set_block_group_chunk_objectid(&cache->item, chunk_objectid);
spin_unlock(&block_group->space_info->lock);
block_group->space_info->full = 0;
- /*
- memset(shrink_block_group, 0, sizeof(*shrink_block_group));
- kfree(shrink_block_group);
- */
+ put_block_group(block_group);
+ put_block_group(block_group);
ret = btrfs_search_slot(trans, root, &key, path, -1, 1);
if (ret > 0)