#include "mballoc.h"
#include <linux/debugfs.h>
+#include <linux/slab.h>
#include <trace/events/ext4.h>
/*
*
* pa_lstart -> the logical start block for this prealloc space
* pa_pstart -> the physical start block for this prealloc space
- * pa_len -> lenght for this prealloc space
+ * pa_len -> length for this prealloc space
* pa_free -> free space available in this prealloc space
*
* The inode preallocation space is used looking at the _logical_ start
* 2 blocks and the order of allocation is >= sbi->s_mb_order2_reqs. The
* value of s_mb_order2_reqs can be tuned via
* /sys/fs/ext4/<partition>/mb_order2_req. If the request len is equal to
- * stripe size (sbi->s_stripe), we try to search for contigous block in
+ * stripe size (sbi->s_stripe), we try to search for contiguous block in
* stripe size. This should result in better allocation on RAID setups. If
* not, we search in the specific group using bitmap for best extents. The
* tunable min_to_scan and max_to_scan control the behaviour here.
for (i = 0; i < count; i++) {
if (!mb_test_bit(first + i, e4b->bd_info->bb_bitmap)) {
ext4_fsblk_t blocknr;
- blocknr = e4b->bd_group * EXT4_BLOCKS_PER_GROUP(sb);
+
+ blocknr = ext4_group_first_block_no(sb, e4b->bd_group);
blocknr += first + i;
- blocknr +=
- le32_to_cpu(EXT4_SB(sb)->s_es->s_first_data_block);
ext4_grp_locked_error(sb, e4b->bd_group,
__func__, "double-free of inode"
" %lu's block %llu(bit %u in group %u)",
}
}
+/*
+ * Cache the order of the largest free extent we have available in this block
+ * group.
+ */
+static void
+mb_set_largest_free_order(struct super_block *sb, struct ext4_group_info *grp)
+{
+ int i;
+ int bits;
+
+ grp->bb_largest_free_order = -1; /* uninit */
+
+ bits = sb->s_blocksize_bits + 1;
+ for (i = bits; i >= 0; i--) {
+ if (grp->bb_counters[i] > 0) {
+ grp->bb_largest_free_order = i;
+ break;
+ }
+ }
+}
+
static noinline_for_stack
void ext4_mb_generate_buddy(struct super_block *sb,
void *buddy, void *bitmap, ext4_group_t group)
*/
grp->bb_free = free;
}
+ mb_set_largest_free_order(sb, grp);
clear_bit(EXT4_GROUP_INFO_NEED_INIT_BIT, &(grp->bb_state));
* contain blocks_per_page (PAGE_CACHE_SIZE / blocksize) blocks.
* So it can have information regarding groups_per_page which
* is blocks_per_page/2
+ *
+ * Locking note: This routine takes the block group lock of all groups
+ * for this page; do not hold this lock when calling this routine!
*/
static int ext4_mb_init_cache(struct page *page, char *incore)
BUG_ON(incore == NULL);
mb_debug(1, "put buddy for group %u in page %lu/%x\n",
group, page->index, i * blocksize);
+ trace_ext4_mb_buddy_bitmap_load(sb, group);
grinfo = ext4_get_group_info(sb, group);
grinfo->bb_fragments = 0;
memset(grinfo->bb_counters, 0,
BUG_ON(incore != NULL);
mb_debug(1, "put bitmap for group %u in page %lu/%x\n",
group, page->index, i * blocksize);
+ trace_ext4_mb_bitmap_load(sb, group);
/* see comments in ext4_mb_put_pa() */
ext4_lock_group(sb, group);
return err;
}
+/*
+ * Locking note: This routine calls ext4_mb_init_cache(), which takes the
+ * block group lock of all groups for this page; do not hold the BG lock when
+ * calling this routine!
+ */
static noinline_for_stack
int ext4_mb_init_group(struct super_block *sb, ext4_group_t group)
{
return ret;
}
+/*
+ * Locking note: This routine calls ext4_mb_init_cache(), which takes the
+ * block group lock of all groups for this page; do not hold the BG lock when
+ * calling this routine!
+ */
static noinline_for_stack int
ext4_mb_load_buddy(struct super_block *sb, ext4_group_t group,
struct ext4_buddy *e4b)
return ret;
}
-static void ext4_mb_release_desc(struct ext4_buddy *e4b)
+static void ext4_mb_unload_buddy(struct ext4_buddy *e4b)
{
if (e4b->bd_bitmap_page)
page_cache_release(e4b->bd_bitmap_page);
if (!mb_test_bit(block, EXT4_MB_BITMAP(e4b))) {
ext4_fsblk_t blocknr;
- blocknr = e4b->bd_group * EXT4_BLOCKS_PER_GROUP(sb);
+
+ blocknr = ext4_group_first_block_no(sb, e4b->bd_group);
blocknr += block;
- blocknr +=
- le32_to_cpu(EXT4_SB(sb)->s_es->s_first_data_block);
ext4_grp_locked_error(sb, e4b->bd_group,
__func__, "double-free of inode"
" %lu's block %llu(bit %u in group %u)",
buddy = buddy2;
} while (1);
}
+ mb_set_largest_free_order(sb, e4b->bd_info);
mb_check_buddy(e4b);
}
e4b->bd_info->bb_counters[ord]++;
e4b->bd_info->bb_counters[ord]++;
}
+ mb_set_largest_free_order(e4b->bd_sb, e4b->bd_info);
mb_set_bits(EXT4_MB_BITMAP(e4b), ex->fe_start, len0);
mb_check_buddy(e4b);
}
ext4_unlock_group(ac->ac_sb, group);
- ext4_mb_release_desc(e4b);
+ ext4_mb_unload_buddy(e4b);
return 0;
}
int max;
int err;
struct ext4_sb_info *sbi = EXT4_SB(ac->ac_sb);
- struct ext4_super_block *es = sbi->s_es;
struct ext4_free_extent ex;
if (!(ac->ac_flags & EXT4_MB_HINT_TRY_GOAL))
if (max >= ac->ac_g_ex.fe_len && ac->ac_g_ex.fe_len == sbi->s_stripe) {
ext4_fsblk_t start;
- start = (e4b->bd_group * EXT4_BLOCKS_PER_GROUP(ac->ac_sb)) +
- ex.fe_start + le32_to_cpu(es->s_first_data_block);
+ start = ext4_group_first_block_no(ac->ac_sb, e4b->bd_group) +
+ ex.fe_start;
/* use do_div to get remainder (would be 64-bit modulo) */
if (do_div(start, sbi->s_stripe) == 0) {
ac->ac_found++;
ext4_mb_use_best_found(ac, e4b);
}
ext4_unlock_group(ac->ac_sb, group);
- ext4_mb_release_desc(e4b);
+ ext4_mb_unload_buddy(e4b);
return 0;
}
BUG_ON(sbi->s_stripe == 0);
/* find first stripe-aligned block in group */
- first_group_block = e4b->bd_group * EXT4_BLOCKS_PER_GROUP(sb)
- + le32_to_cpu(sbi->s_es->s_first_data_block);
+ first_group_block = ext4_group_first_block_no(sb, e4b->bd_group);
+
a = first_group_block + sbi->s_stripe - 1;
do_div(a, sbi->s_stripe);
i = (a * sbi->s_stripe) - first_group_block;
}
}
+/* This is now called BEFORE we load the buddy bitmap. */
static int ext4_mb_good_group(struct ext4_allocation_context *ac,
ext4_group_t group, int cr)
{
unsigned free, fragments;
- unsigned i, bits;
int flex_size = ext4_flex_bg_size(EXT4_SB(ac->ac_sb));
struct ext4_group_info *grp = ext4_get_group_info(ac->ac_sb, group);
BUG_ON(cr < 0 || cr >= 4);
- BUG_ON(EXT4_MB_GRP_NEED_INIT(grp));
+
+ /* We only do this if the grp has never been initialized */
+ if (unlikely(EXT4_MB_GRP_NEED_INIT(grp))) {
+ int ret = ext4_mb_init_group(ac->ac_sb, group);
+ if (ret)
+ return 0;
+ }
free = grp->bb_free;
fragments = grp->bb_fragments;
case 0:
BUG_ON(ac->ac_2order == 0);
+ if (grp->bb_largest_free_order < ac->ac_2order)
+ return 0;
+
/* Avoid using the first bg of a flexgroup for data files */
if ((ac->ac_flags & EXT4_MB_HINT_DATA) &&
(flex_size >= EXT4_FLEX_SIZE_DIR_ALLOC_SCHEME) &&
((group % flex_size) == 0))
return 0;
- bits = ac->ac_sb->s_blocksize_bits + 1;
- for (i = ac->ac_2order; i <= bits; i++)
- if (grp->bb_counters[i] > 0)
- return 1;
- break;
+ return 1;
case 1:
if ((free / fragments) >= ac->ac_g_ex.fe_len)
return 1;
sbi = EXT4_SB(sb);
ngroups = ext4_get_groups_count(sb);
/* non-extent files are limited to low blocks/groups */
- if (!(EXT4_I(ac->ac_inode)->i_flags & EXT4_EXTENTS_FL))
+ if (!(ext4_test_inode_flag(ac->ac_inode, EXT4_INODE_EXTENTS)))
ngroups = sbi->s_blockfile_groups;
BUG_ON(ac->ac_status == AC_STATUS_FOUND);
group = ac->ac_g_ex.fe_group;
for (i = 0; i < ngroups; group++, i++) {
- struct ext4_group_info *grp;
- struct ext4_group_desc *desc;
-
if (group == ngroups)
group = 0;
- /* quick check to skip empty groups */
- grp = ext4_get_group_info(sb, group);
- if (grp->bb_free == 0)
+ /* This now checks without needing the buddy page */
+ if (!ext4_mb_good_group(ac, group, cr))
continue;
err = ext4_mb_load_buddy(sb, group, &e4b);
goto out;
ext4_lock_group(sb, group);
+
+ /*
+ * We need to check again after locking the
+ * block group
+ */
if (!ext4_mb_good_group(ac, group, cr)) {
- /* someone did allocation from this group */
ext4_unlock_group(sb, group);
- ext4_mb_release_desc(&e4b);
+ ext4_mb_unload_buddy(&e4b);
continue;
}
ac->ac_groups_scanned++;
- desc = ext4_get_group_desc(sb, group, NULL);
if (cr == 0)
ext4_mb_simple_scan_group(ac, &e4b);
else if (cr == 1 &&
ext4_mb_complex_scan_group(ac, &e4b);
ext4_unlock_group(sb, group);
- ext4_mb_release_desc(&e4b);
+ ext4_mb_unload_buddy(&e4b);
if (ac->ac_status != AC_STATUS_CONTINUE)
break;
ext4_lock_group(sb, group);
memcpy(&sg, ext4_get_group_info(sb, group), i);
ext4_unlock_group(sb, group);
- ext4_mb_release_desc(&e4b);
+ ext4_mb_unload_buddy(&e4b);
seq_printf(seq, "#%-5u: %-5u %-5u %-5u [", group, sg.info.bb_free,
sg.info.bb_fragments, sg.info.bb_first_free);
INIT_LIST_HEAD(&meta_group_info[i]->bb_prealloc_list);
init_rwsem(&meta_group_info[i]->alloc_sem);
- meta_group_info[i]->bb_free_root.rb_node = NULL;
+ meta_group_info[i]->bb_free_root = RB_ROOT;
+ meta_group_info[i]->bb_largest_free_order = -1; /* uninit */
#ifdef DOUBLE_CHECK
{
mb_debug(1, "gonna free %u blocks in group %u (0x%p):",
entry->count, entry->group, entry);
+ if (test_opt(sb, DISCARD)) {
+ int ret;
+ ext4_fsblk_t discard_block;
+
+ discard_block = entry->start_blk +
+ ext4_group_first_block_no(sb, entry->group);
+ trace_ext4_discard_blocks(sb,
+ (unsigned long long)discard_block,
+ entry->count);
+ ret = sb_issue_discard(sb, discard_block, entry->count);
+ if (ret == EOPNOTSUPP) {
+ ext4_warning(sb,
+ "discard not supported, disabling");
+ clear_opt(EXT4_SB(sb)->s_mount_opt, DISCARD);
+ }
+ }
+
err = ext4_mb_load_buddy(sb, entry->group, &e4b);
/* we expect to find existing buddy because it's pinned */
BUG_ON(err != 0);
page_cache_release(e4b.bd_bitmap_page);
}
ext4_unlock_group(sb, entry->group);
- if (test_opt(sb, DISCARD)) {
- ext4_fsblk_t discard_block;
- struct ext4_super_block *es = EXT4_SB(sb)->s_es;
-
- discard_block = (ext4_fsblk_t)entry->group *
- EXT4_BLOCKS_PER_GROUP(sb)
- + entry->start_blk
- + le32_to_cpu(es->s_first_data_block);
- trace_ext4_discard_blocks(sb,
- (unsigned long long)discard_block,
- entry->count);
- sb_issue_discard(sb, discard_block, entry->count);
- }
kmem_cache_free(ext4_free_ext_cachep, entry);
- ext4_mb_release_desc(&e4b);
+ ext4_mb_unload_buddy(&e4b);
}
mb_debug(1, "freed %u blocks in %u structures\n", count, count2);
void exit_ext4_mballoc(void)
{
- /*
+ /*
* Wait for completion of call_rcu()'s on ext4_pspace_cachep
* before destroying the slab cache.
*/
if (err)
goto out_err;
- block = ac->ac_b_ex.fe_group * EXT4_BLOCKS_PER_GROUP(sb)
- + ac->ac_b_ex.fe_start
- + le32_to_cpu(es->s_first_data_block);
+ block = ext4_grp_offs_to_block(sb, &ac->ac_b_ex);
len = ac->ac_b_ex.fe_len;
if (!ext4_data_block_valid(sbi, block, len)) {
- ext4_error(sb, __func__,
- "Allocating blocks %llu-%llu which overlap "
+ ext4_error(sb, "Allocating blocks %llu-%llu which overlap "
"fs metadata\n", block, block+len);
/* File system mounted not to panic on error
* Fix the bitmap and repeat the block allocation
if (!(ac->ac_flags & EXT4_MB_DELALLOC_RESERVED))
/* release all the reserved blocks if non delalloc */
percpu_counter_sub(&sbi->s_dirtyblocks_counter, reserv_blks);
- else {
- percpu_counter_sub(&sbi->s_dirtyblocks_counter,
- ac->ac_b_ex.fe_len);
- /* convert reserved quota blocks to real quota blocks */
- vfs_dq_claim_block(ac->ac_inode, ac->ac_b_ex.fe_len);
- }
if (sbi->s_log_groups_per_flex) {
ext4_group_t flex_group = ext4_flex_group(sbi,
if (sbi->s_mb_stats && ac->ac_g_ex.fe_len > 1) {
atomic_inc(&sbi->s_bal_reqs);
atomic_add(ac->ac_b_ex.fe_len, &sbi->s_bal_allocated);
- if (ac->ac_o_ex.fe_len >= ac->ac_g_ex.fe_len)
+ if (ac->ac_b_ex.fe_len >= ac->ac_o_ex.fe_len)
atomic_inc(&sbi->s_bal_success);
atomic_add(ac->ac_found, &sbi->s_bal_ex_scanned);
if (ac->ac_g_ex.fe_start == ac->ac_b_ex.fe_start &&
}
/*
+ * Called on failure; free up any blocks from the inode PA for this
+ * context. We don't need this for MB_GROUP_PA because we only change
+ * pa_free in ext4_mb_release_context(), but on failure, we've already
+ * zeroed out ac->ac_b_ex.fe_len, so group_pa->pa_free is not changed.
+ */
+static void ext4_discard_allocated_blocks(struct ext4_allocation_context *ac)
+{
+ struct ext4_prealloc_space *pa = ac->ac_pa;
+ int len;
+
+ if (pa && pa->pa_type == MB_INODE_PA) {
+ len = ac->ac_b_ex.fe_len;
+ pa->pa_free += len;
+ }
+
+}
+
+/*
* use blocks preallocated to inode
*/
static void ext4_mb_use_inode_pa(struct ext4_allocation_context *ac,
continue;
/* non-extent files can't have physical blocks past 2^32 */
- if (!(EXT4_I(ac->ac_inode)->i_flags & EXT4_EXTENTS_FL) &&
+ if (!(ext4_test_inode_flag(ac->ac_inode, EXT4_INODE_EXTENTS)) &&
pa->pa_pstart + pa->pa_len > EXT4_MAX_BLOCK_FILE_PHYS)
continue;
/* The max size of hash table is PREALLOC_TB_SIZE */
order = PREALLOC_TB_SIZE - 1;
- goal_block = ac->ac_g_ex.fe_group * EXT4_BLOCKS_PER_GROUP(ac->ac_sb) +
- ac->ac_g_ex.fe_start +
- le32_to_cpu(EXT4_SB(ac->ac_sb)->s_es->s_first_data_block);
+ goal_block = ext4_grp_offs_to_block(ac->ac_sb, &ac->ac_g_ex);
/*
* search for the prealloc space that is having
* minimal distance from the goal block.
spin_unlock(&pa->pa_lock);
grp_blk = pa->pa_pstart;
- /*
+ /*
* If doing group-based preallocation, pa_pstart may be in the
* next group when pa is used up
*/
if (bit >= end)
break;
next = mb_find_next_bit(bitmap_bh->b_data, end, bit);
- start = group * EXT4_BLOCKS_PER_GROUP(sb) + bit +
- le32_to_cpu(sbi->s_es->s_first_data_block);
+ start = ext4_group_first_block_no(sb, group) + bit;
mb_debug(1, " free preallocated %u/%u in group %u\n",
(unsigned) start, (unsigned) next - bit,
(unsigned) group);
bitmap_bh = ext4_read_block_bitmap(sb, group);
if (bitmap_bh == NULL) {
- ext4_error(sb, __func__, "Error in reading block "
- "bitmap for %u", group);
+ ext4_error(sb, "Error reading block bitmap for %u", group);
return 0;
}
err = ext4_mb_load_buddy(sb, group, &e4b);
if (err) {
- ext4_error(sb, __func__, "Error in loading buddy "
- "information for %u", group);
+ ext4_error(sb, "Error loading buddy information for %u", group);
put_bh(bitmap_bh);
return 0;
}
ext4_unlock_group(sb, group);
if (ac)
kmem_cache_free(ext4_ac_cachep, ac);
- ext4_mb_release_desc(&e4b);
+ ext4_mb_unload_buddy(&e4b);
put_bh(bitmap_bh);
return free;
}
err = ext4_mb_load_buddy(sb, group, &e4b);
if (err) {
- ext4_error(sb, __func__, "Error in loading buddy "
- "information for %u", group);
+ ext4_error(sb, "Error loading buddy information for %u",
+ group);
continue;
}
bitmap_bh = ext4_read_block_bitmap(sb, group);
if (bitmap_bh == NULL) {
- ext4_error(sb, __func__, "Error in reading block "
- "bitmap for %u", group);
- ext4_mb_release_desc(&e4b);
+ ext4_error(sb, "Error reading block bitmap for %u",
+ group);
+ ext4_mb_unload_buddy(&e4b);
continue;
}
ext4_mb_release_inode_pa(&e4b, bitmap_bh, pa, ac);
ext4_unlock_group(sb, group);
- ext4_mb_release_desc(&e4b);
+ ext4_mb_unload_buddy(&e4b);
put_bh(bitmap_bh);
list_del(&pa->u.pa_tmp_list);
/* don't use group allocation for large files */
size = max(size, isize);
- if (size >= sbi->s_mb_stream_request) {
+ if (size > sbi->s_mb_stream_request) {
ac->ac_flags |= EXT4_MB_STREAM_ALLOC;
return;
}
* per cpu locality group is to reduce the contention between block
* request from multiple CPUs.
*/
- ac->ac_lg = per_cpu_ptr(sbi->s_locality_groups, raw_smp_processor_id());
+ ac->ac_lg = __this_cpu_ptr(sbi->s_locality_groups);
/* we're going to use group allocation */
ac->ac_flags |= EXT4_MB_HINT_GROUP_ALLOC;
ext4_get_group_no_and_offset(sb, pa->pa_pstart, &group, NULL);
if (ext4_mb_load_buddy(sb, group, &e4b)) {
- ext4_error(sb, __func__, "Error in loading buddy "
- "information for %u", group);
+ ext4_error(sb, "Error loading buddy information for %u",
+ group);
continue;
}
ext4_lock_group(sb, group);
ext4_mb_release_group_pa(&e4b, pa, ac);
ext4_unlock_group(sb, group);
- ext4_mb_release_desc(&e4b);
+ ext4_mb_unload_buddy(&e4b);
list_del(&pa->u.pa_tmp_list);
call_rcu(&(pa)->u.pa_rcu, ext4_mb_pa_callback);
}
return 0;
}
reserv_blks = ar->len;
- while (ar->len && vfs_dq_alloc_block(ar->inode, ar->len)) {
+ while (ar->len && dquot_alloc_block(ar->inode, ar->len)) {
ar->flags |= EXT4_MB_HINT_NOPREALLOC;
ar->len--;
}
ac->ac_status = AC_STATUS_CONTINUE;
goto repeat;
} else if (*errp) {
+ ext4_discard_allocated_blocks(ac);
ac->ac_b_ex.fe_len = 0;
ar->len = 0;
ext4_mb_show_ac(ac);
kmem_cache_free(ext4_ac_cachep, ac);
out1:
if (inquota && ar->len < inquota)
- vfs_dq_free_block(ar->inode, inquota - ar->len);
+ dquot_free_block(ar->inode, inquota - ar->len);
out3:
if (!ar->len) {
if (!EXT4_I(ar->inode)->i_delalloc_reserved_flag)
return 0;
}
-/*
- * Main entry point into mballoc to free blocks
+/**
+ * ext4_free_blocks() -- Free given blocks and update quota
+ * @handle: handle for this transaction
+ * @inode: inode
+ * @block: start physical block to free
+ * @count: number of blocks to count
+ * @metadata: Are these metadata blocks
*/
-void ext4_mb_free_blocks(handle_t *handle, struct inode *inode,
- ext4_fsblk_t block, unsigned long count,
- int metadata, unsigned long *freed)
+void ext4_free_blocks(handle_t *handle, struct inode *inode,
+ struct buffer_head *bh, ext4_fsblk_t block,
+ unsigned long count, int flags)
{
struct buffer_head *bitmap_bh = NULL;
struct super_block *sb = inode->i_sb;
struct ext4_allocation_context *ac = NULL;
struct ext4_group_desc *gdp;
struct ext4_super_block *es;
+ unsigned long freed = 0;
unsigned int overflow;
ext4_grpblk_t bit;
struct buffer_head *gd_bh;
int err = 0;
int ret;
- *freed = 0;
+ if (bh) {
+ if (block)
+ BUG_ON(block != bh->b_blocknr);
+ else
+ block = bh->b_blocknr;
+ }
sbi = EXT4_SB(sb);
es = EXT4_SB(sb)->s_es;
- if (block < le32_to_cpu(es->s_first_data_block) ||
- block + count < block ||
- block + count > ext4_blocks_count(es)) {
- ext4_error(sb, __func__,
- "Freeing blocks not in datazone - "
- "block = %llu, count = %lu", block, count);
+ if (!(flags & EXT4_FREE_BLOCKS_VALIDATED) &&
+ !ext4_data_block_valid(sbi, block, count)) {
+ ext4_error(sb, "Freeing blocks not in datazone - "
+ "block = %llu, count = %lu", block, count);
goto error_return;
}
ext4_debug("freeing block %llu\n", block);
- trace_ext4_free_blocks(inode, block, count, metadata);
+ trace_ext4_free_blocks(inode, block, count, flags);
+
+ if (flags & EXT4_FREE_BLOCKS_FORGET) {
+ struct buffer_head *tbh = bh;
+ int i;
+
+ BUG_ON(bh && (count > 1));
+
+ for (i = 0; i < count; i++) {
+ if (!bh)
+ tbh = sb_find_get_block(inode->i_sb,
+ block + i);
+ ext4_forget(handle, flags & EXT4_FREE_BLOCKS_METADATA,
+ inode, tbh, block + i);
+ }
+ }
+
+ /*
+ * We need to make sure we don't reuse the freed block until
+ * after the transaction is committed, which we can do by
+ * treating the block as metadata, below. We make an
+ * exception if the inode is to be written in writeback mode
+ * since writeback mode has weak data consistency guarantees.
+ */
+ if (!ext4_should_writeback_data(inode))
+ flags |= EXT4_FREE_BLOCKS_METADATA;
ac = kmem_cache_alloc(ext4_ac_cachep, GFP_NOFS);
if (ac) {
in_range(block + count - 1, ext4_inode_table(sb, gdp),
EXT4_SB(sb)->s_itb_per_group)) {
- ext4_error(sb, __func__,
- "Freeing blocks in system zone - "
+ ext4_error(sb, "Freeing blocks in system zone - "
"Block = %llu, count = %lu", block, count);
/* err = 0. ext4_std_error should be a no op */
goto error_return;
err = ext4_mb_load_buddy(sb, block_group, &e4b);
if (err)
goto error_return;
- if (metadata && ext4_handle_valid(handle)) {
+
+ if ((flags & EXT4_FREE_BLOCKS_METADATA) && ext4_handle_valid(handle)) {
struct ext4_free_data *new_entry;
/*
* blocks being freed are metadata. these blocks shouldn't
atomic_add(count, &sbi->s_flex_groups[flex_group].free_blocks);
}
- ext4_mb_release_desc(&e4b);
+ ext4_mb_unload_buddy(&e4b);
- *freed += count;
+ freed += count;
/* We dirtied the bitmap block */
BUFFER_TRACE(bitmap_bh, "dirtied bitmap block");
}
sb->s_dirt = 1;
error_return:
+ if (freed)
+ dquot_free_block(inode, freed);
brelse(bitmap_bh);
ext4_std_error(sb, err);
if (ac)