Ext4: Uninitialized Block Groups
[safe/jmp/linux-2.6] / fs / ext4 / balloc.c
1 /*
2  *  linux/fs/ext4/balloc.c
3  *
4  * Copyright (C) 1992, 1993, 1994, 1995
5  * Remy Card (card@masi.ibp.fr)
6  * Laboratoire MASI - Institut Blaise Pascal
7  * Universite Pierre et Marie Curie (Paris VI)
8  *
9  *  Enhanced block allocation by Stephen Tweedie (sct@redhat.com), 1993
10  *  Big-endian to little-endian byte-swapping/bitmaps by
11  *        David S. Miller (davem@caip.rutgers.edu), 1995
12  */
13
14 #include <linux/time.h>
15 #include <linux/capability.h>
16 #include <linux/fs.h>
17 #include <linux/jbd2.h>
18 #include <linux/ext4_fs.h>
19 #include <linux/ext4_jbd2.h>
20 #include <linux/quotaops.h>
21 #include <linux/buffer_head.h>
22
23 #include "group.h"
24 /*
25  * balloc.c contains the blocks allocation and deallocation routines
26  */
27
28 /*
29  * Calculate the block group number and offset, given a block number
30  */
31 void ext4_get_group_no_and_offset(struct super_block *sb, ext4_fsblk_t blocknr,
32                 unsigned long *blockgrpp, ext4_grpblk_t *offsetp)
33 {
34         struct ext4_super_block *es = EXT4_SB(sb)->s_es;
35         ext4_grpblk_t offset;
36
37         blocknr = blocknr - le32_to_cpu(es->s_first_data_block);
38         offset = do_div(blocknr, EXT4_BLOCKS_PER_GROUP(sb));
39         if (offsetp)
40                 *offsetp = offset;
41         if (blockgrpp)
42                 *blockgrpp = blocknr;
43
44 }
45
46 /* Initializes an uninitialized block bitmap if given, and returns the
47  * number of blocks free in the group. */
48 unsigned ext4_init_block_bitmap(struct super_block *sb, struct buffer_head *bh,
49                                 int block_group, struct ext4_group_desc *gdp)
50 {
51         unsigned long start;
52         int bit, bit_max;
53         unsigned free_blocks, group_blocks;
54         struct ext4_sb_info *sbi = EXT4_SB(sb);
55
56         if (bh) {
57                 J_ASSERT_BH(bh, buffer_locked(bh));
58
59                 /* If checksum is bad mark all blocks used to prevent allocation
60                  * essentially implementing a per-group read-only flag. */
61                 if (!ext4_group_desc_csum_verify(sbi, block_group, gdp)) {
62                         ext4_error(sb, __FUNCTION__,
63                                    "Checksum bad for group %u\n", block_group);
64                         gdp->bg_free_blocks_count = 0;
65                         gdp->bg_free_inodes_count = 0;
66                         gdp->bg_itable_unused = 0;
67                         memset(bh->b_data, 0xff, sb->s_blocksize);
68                         return 0;
69                 }
70                 memset(bh->b_data, 0, sb->s_blocksize);
71         }
72
73         /* Check for superblock and gdt backups in this group */
74         bit_max = ext4_bg_has_super(sb, block_group);
75
76         if (!EXT4_HAS_INCOMPAT_FEATURE(sb, EXT4_FEATURE_INCOMPAT_META_BG) ||
77             block_group < le32_to_cpu(sbi->s_es->s_first_meta_bg) *
78                           sbi->s_desc_per_block) {
79                 if (bit_max) {
80                         bit_max += ext4_bg_num_gdb(sb, block_group);
81                         bit_max +=
82                                 le16_to_cpu(sbi->s_es->s_reserved_gdt_blocks);
83                 }
84         } else { /* For META_BG_BLOCK_GROUPS */
85                 int group_rel = (block_group -
86                                  le32_to_cpu(sbi->s_es->s_first_meta_bg)) %
87                                 EXT4_DESC_PER_BLOCK(sb);
88                 if (group_rel == 0 || group_rel == 1 ||
89                     (group_rel == EXT4_DESC_PER_BLOCK(sb) - 1))
90                         bit_max += 1;
91         }
92
93         if (block_group == sbi->s_groups_count - 1) {
94                 /*
95                  * Even though mke2fs always initialize first and last group
96                  * if some other tool enabled the EXT4_BG_BLOCK_UNINIT we need
97                  * to make sure we calculate the right free blocks
98                  */
99                 group_blocks = ext4_blocks_count(sbi->s_es) -
100                         le32_to_cpu(sbi->s_es->s_first_data_block) -
101                         (EXT4_BLOCKS_PER_GROUP(sb) * (sbi->s_groups_count -1));
102         } else {
103                 group_blocks = EXT4_BLOCKS_PER_GROUP(sb);
104         }
105
106         free_blocks = group_blocks - bit_max;
107
108         if (bh) {
109                 for (bit = 0; bit < bit_max; bit++)
110                         ext4_set_bit(bit, bh->b_data);
111
112                 start = block_group * EXT4_BLOCKS_PER_GROUP(sb) +
113                         le32_to_cpu(sbi->s_es->s_first_data_block);
114
115                 /* Set bits for block and inode bitmaps, and inode table */
116                 ext4_set_bit(ext4_block_bitmap(sb, gdp) - start, bh->b_data);
117                 ext4_set_bit(ext4_inode_bitmap(sb, gdp) - start, bh->b_data);
118                 for (bit = le32_to_cpu(gdp->bg_inode_table) - start,
119                      bit_max = bit + sbi->s_itb_per_group; bit < bit_max; bit++)
120                         ext4_set_bit(bit, bh->b_data);
121
122                 /*
123                  * Also if the number of blocks within the group is
124                  * less than the blocksize * 8 ( which is the size
125                  * of bitmap ), set rest of the block bitmap to 1
126                  */
127                 mark_bitmap_end(group_blocks, sb->s_blocksize * 8, bh->b_data);
128         }
129
130         return free_blocks - sbi->s_itb_per_group - 2;
131 }
132
133
134 /*
135  * The free blocks are managed by bitmaps.  A file system contains several
136  * blocks groups.  Each group contains 1 bitmap block for blocks, 1 bitmap
137  * block for inodes, N blocks for the inode table and data blocks.
138  *
139  * The file system contains group descriptors which are located after the
140  * super block.  Each descriptor contains the number of the bitmap block and
141  * the free blocks count in the block.  The descriptors are loaded in memory
142  * when a file system is mounted (see ext4_fill_super).
143  */
144
145
146 #define in_range(b, first, len) ((b) >= (first) && (b) <= (first) + (len) - 1)
147
148 /**
149  * ext4_get_group_desc() -- load group descriptor from disk
150  * @sb:                 super block
151  * @block_group:        given block group
152  * @bh:                 pointer to the buffer head to store the block
153  *                      group descriptor
154  */
155 struct ext4_group_desc * ext4_get_group_desc(struct super_block * sb,
156                                              unsigned int block_group,
157                                              struct buffer_head ** bh)
158 {
159         unsigned long group_desc;
160         unsigned long offset;
161         struct ext4_group_desc * desc;
162         struct ext4_sb_info *sbi = EXT4_SB(sb);
163
164         if (block_group >= sbi->s_groups_count) {
165                 ext4_error (sb, "ext4_get_group_desc",
166                             "block_group >= groups_count - "
167                             "block_group = %d, groups_count = %lu",
168                             block_group, sbi->s_groups_count);
169
170                 return NULL;
171         }
172         smp_rmb();
173
174         group_desc = block_group >> EXT4_DESC_PER_BLOCK_BITS(sb);
175         offset = block_group & (EXT4_DESC_PER_BLOCK(sb) - 1);
176         if (!sbi->s_group_desc[group_desc]) {
177                 ext4_error (sb, "ext4_get_group_desc",
178                             "Group descriptor not loaded - "
179                             "block_group = %d, group_desc = %lu, desc = %lu",
180                              block_group, group_desc, offset);
181                 return NULL;
182         }
183
184         desc = (struct ext4_group_desc *)(
185                 (__u8 *)sbi->s_group_desc[group_desc]->b_data +
186                 offset * EXT4_DESC_SIZE(sb));
187         if (bh)
188                 *bh = sbi->s_group_desc[group_desc];
189         return desc;
190 }
191
192 static inline int
193 block_in_use(ext4_fsblk_t block, struct super_block *sb, unsigned char *map)
194 {
195         ext4_grpblk_t offset;
196
197         ext4_get_group_no_and_offset(sb, block, NULL, &offset);
198         return ext4_test_bit (offset, map);
199 }
200
201 /**
202  * read_block_bitmap()
203  * @sb:                 super block
204  * @block_group:        given block group
205  *
206  * Read the bitmap for a given block_group, reading into the specified
207  * slot in the superblock's bitmap cache.
208  *
209  * Return buffer_head on success or NULL in case of failure.
210  */
211 struct buffer_head *
212 read_block_bitmap(struct super_block *sb, unsigned int block_group)
213 {
214         int i;
215         struct ext4_group_desc * desc;
216         struct buffer_head * bh = NULL;
217         ext4_fsblk_t bitmap_blk;
218
219         desc = ext4_get_group_desc(sb, block_group, NULL);
220         if (!desc)
221                 return NULL;
222         bitmap_blk = ext4_block_bitmap(sb, desc);
223         if (desc->bg_flags & cpu_to_le16(EXT4_BG_BLOCK_UNINIT)) {
224                 bh = sb_getblk(sb, bitmap_blk);
225                 if (!buffer_uptodate(bh)) {
226                         lock_buffer(bh);
227                         if (!buffer_uptodate(bh)) {
228                                 ext4_init_block_bitmap(sb, bh, block_group,
229                                                        desc);
230                                 set_buffer_uptodate(bh);
231                         }
232                         unlock_buffer(bh);
233                 }
234         } else {
235                 bh = sb_bread(sb, bitmap_blk);
236         }
237         if (!bh)
238                 ext4_error (sb, __FUNCTION__,
239                             "Cannot read block bitmap - "
240                             "block_group = %d, block_bitmap = %llu",
241                             block_group, bitmap_blk);
242
243         /* check whether block bitmap block number is set */
244         if (!block_in_use(bitmap_blk, sb, bh->b_data)) {
245                 /* bad block bitmap */
246                 goto error_out;
247         }
248
249         /* check whether the inode bitmap block number is set */
250         bitmap_blk = ext4_inode_bitmap(sb, desc);
251         if (!block_in_use(bitmap_blk, sb, bh->b_data)) {
252                 /* bad block bitmap */
253                 goto error_out;
254         }
255         /* check whether the inode table block number is set */
256         bitmap_blk = ext4_inode_table(sb, desc);
257         for (i = 0; i < EXT4_SB(sb)->s_itb_per_group; i++, bitmap_blk++) {
258                 if (!block_in_use(bitmap_blk, sb, bh->b_data)) {
259                         /* bad block bitmap */
260                         goto error_out;
261                 }
262         }
263
264         return bh;
265
266 error_out:
267         brelse(bh);
268         ext4_error(sb, __FUNCTION__,
269                         "Invalid block bitmap - "
270                         "block_group = %d, block = %llu",
271                         block_group, bitmap_blk);
272         return NULL;
273
274 }
275 /*
276  * The reservation window structure operations
277  * --------------------------------------------
278  * Operations include:
279  * dump, find, add, remove, is_empty, find_next_reservable_window, etc.
280  *
281  * We use a red-black tree to represent per-filesystem reservation
282  * windows.
283  *
284  */
285
286 /**
287  * __rsv_window_dump() -- Dump the filesystem block allocation reservation map
288  * @rb_root:            root of per-filesystem reservation rb tree
289  * @verbose:            verbose mode
290  * @fn:                 function which wishes to dump the reservation map
291  *
292  * If verbose is turned on, it will print the whole block reservation
293  * windows(start, end). Otherwise, it will only print out the "bad" windows,
294  * those windows that overlap with their immediate neighbors.
295  */
296 #if 1
297 static void __rsv_window_dump(struct rb_root *root, int verbose,
298                               const char *fn)
299 {
300         struct rb_node *n;
301         struct ext4_reserve_window_node *rsv, *prev;
302         int bad;
303
304 restart:
305         n = rb_first(root);
306         bad = 0;
307         prev = NULL;
308
309         printk("Block Allocation Reservation Windows Map (%s):\n", fn);
310         while (n) {
311                 rsv = rb_entry(n, struct ext4_reserve_window_node, rsv_node);
312                 if (verbose)
313                         printk("reservation window 0x%p "
314                                "start:  %llu, end:  %llu\n",
315                                rsv, rsv->rsv_start, rsv->rsv_end);
316                 if (rsv->rsv_start && rsv->rsv_start >= rsv->rsv_end) {
317                         printk("Bad reservation %p (start >= end)\n",
318                                rsv);
319                         bad = 1;
320                 }
321                 if (prev && prev->rsv_end >= rsv->rsv_start) {
322                         printk("Bad reservation %p (prev->end >= start)\n",
323                                rsv);
324                         bad = 1;
325                 }
326                 if (bad) {
327                         if (!verbose) {
328                                 printk("Restarting reservation walk in verbose mode\n");
329                                 verbose = 1;
330                                 goto restart;
331                         }
332                 }
333                 n = rb_next(n);
334                 prev = rsv;
335         }
336         printk("Window map complete.\n");
337         if (bad)
338                 BUG();
339 }
340 #define rsv_window_dump(root, verbose) \
341         __rsv_window_dump((root), (verbose), __FUNCTION__)
342 #else
343 #define rsv_window_dump(root, verbose) do {} while (0)
344 #endif
345
346 /**
347  * goal_in_my_reservation()
348  * @rsv:                inode's reservation window
349  * @grp_goal:           given goal block relative to the allocation block group
350  * @group:              the current allocation block group
351  * @sb:                 filesystem super block
352  *
353  * Test if the given goal block (group relative) is within the file's
354  * own block reservation window range.
355  *
356  * If the reservation window is outside the goal allocation group, return 0;
357  * grp_goal (given goal block) could be -1, which means no specific
358  * goal block. In this case, always return 1.
359  * If the goal block is within the reservation window, return 1;
360  * otherwise, return 0;
361  */
362 static int
363 goal_in_my_reservation(struct ext4_reserve_window *rsv, ext4_grpblk_t grp_goal,
364                         unsigned int group, struct super_block * sb)
365 {
366         ext4_fsblk_t group_first_block, group_last_block;
367
368         group_first_block = ext4_group_first_block_no(sb, group);
369         group_last_block = group_first_block + (EXT4_BLOCKS_PER_GROUP(sb) - 1);
370
371         if ((rsv->_rsv_start > group_last_block) ||
372             (rsv->_rsv_end < group_first_block))
373                 return 0;
374         if ((grp_goal >= 0) && ((grp_goal + group_first_block < rsv->_rsv_start)
375                 || (grp_goal + group_first_block > rsv->_rsv_end)))
376                 return 0;
377         return 1;
378 }
379
380 /**
381  * search_reserve_window()
382  * @rb_root:            root of reservation tree
383  * @goal:               target allocation block
384  *
385  * Find the reserved window which includes the goal, or the previous one
386  * if the goal is not in any window.
387  * Returns NULL if there are no windows or if all windows start after the goal.
388  */
389 static struct ext4_reserve_window_node *
390 search_reserve_window(struct rb_root *root, ext4_fsblk_t goal)
391 {
392         struct rb_node *n = root->rb_node;
393         struct ext4_reserve_window_node *rsv;
394
395         if (!n)
396                 return NULL;
397
398         do {
399                 rsv = rb_entry(n, struct ext4_reserve_window_node, rsv_node);
400
401                 if (goal < rsv->rsv_start)
402                         n = n->rb_left;
403                 else if (goal > rsv->rsv_end)
404                         n = n->rb_right;
405                 else
406                         return rsv;
407         } while (n);
408         /*
409          * We've fallen off the end of the tree: the goal wasn't inside
410          * any particular node.  OK, the previous node must be to one
411          * side of the interval containing the goal.  If it's the RHS,
412          * we need to back up one.
413          */
414         if (rsv->rsv_start > goal) {
415                 n = rb_prev(&rsv->rsv_node);
416                 rsv = rb_entry(n, struct ext4_reserve_window_node, rsv_node);
417         }
418         return rsv;
419 }
420
421 /**
422  * ext4_rsv_window_add() -- Insert a window to the block reservation rb tree.
423  * @sb:                 super block
424  * @rsv:                reservation window to add
425  *
426  * Must be called with rsv_lock hold.
427  */
428 void ext4_rsv_window_add(struct super_block *sb,
429                     struct ext4_reserve_window_node *rsv)
430 {
431         struct rb_root *root = &EXT4_SB(sb)->s_rsv_window_root;
432         struct rb_node *node = &rsv->rsv_node;
433         ext4_fsblk_t start = rsv->rsv_start;
434
435         struct rb_node ** p = &root->rb_node;
436         struct rb_node * parent = NULL;
437         struct ext4_reserve_window_node *this;
438
439         while (*p)
440         {
441                 parent = *p;
442                 this = rb_entry(parent, struct ext4_reserve_window_node, rsv_node);
443
444                 if (start < this->rsv_start)
445                         p = &(*p)->rb_left;
446                 else if (start > this->rsv_end)
447                         p = &(*p)->rb_right;
448                 else {
449                         rsv_window_dump(root, 1);
450                         BUG();
451                 }
452         }
453
454         rb_link_node(node, parent, p);
455         rb_insert_color(node, root);
456 }
457
458 /**
459  * ext4_rsv_window_remove() -- unlink a window from the reservation rb tree
460  * @sb:                 super block
461  * @rsv:                reservation window to remove
462  *
463  * Mark the block reservation window as not allocated, and unlink it
464  * from the filesystem reservation window rb tree. Must be called with
465  * rsv_lock hold.
466  */
467 static void rsv_window_remove(struct super_block *sb,
468                               struct ext4_reserve_window_node *rsv)
469 {
470         rsv->rsv_start = EXT4_RESERVE_WINDOW_NOT_ALLOCATED;
471         rsv->rsv_end = EXT4_RESERVE_WINDOW_NOT_ALLOCATED;
472         rsv->rsv_alloc_hit = 0;
473         rb_erase(&rsv->rsv_node, &EXT4_SB(sb)->s_rsv_window_root);
474 }
475
476 /*
477  * rsv_is_empty() -- Check if the reservation window is allocated.
478  * @rsv:                given reservation window to check
479  *
480  * returns 1 if the end block is EXT4_RESERVE_WINDOW_NOT_ALLOCATED.
481  */
482 static inline int rsv_is_empty(struct ext4_reserve_window *rsv)
483 {
484         /* a valid reservation end block could not be 0 */
485         return rsv->_rsv_end == EXT4_RESERVE_WINDOW_NOT_ALLOCATED;
486 }
487
488 /**
489  * ext4_init_block_alloc_info()
490  * @inode:              file inode structure
491  *
492  * Allocate and initialize the  reservation window structure, and
493  * link the window to the ext4 inode structure at last
494  *
495  * The reservation window structure is only dynamically allocated
496  * and linked to ext4 inode the first time the open file
497  * needs a new block. So, before every ext4_new_block(s) call, for
498  * regular files, we should check whether the reservation window
499  * structure exists or not. In the latter case, this function is called.
500  * Fail to do so will result in block reservation being turned off for that
501  * open file.
502  *
503  * This function is called from ext4_get_blocks_handle(), also called
504  * when setting the reservation window size through ioctl before the file
505  * is open for write (needs block allocation).
506  *
507  * Needs truncate_mutex protection prior to call this function.
508  */
509 void ext4_init_block_alloc_info(struct inode *inode)
510 {
511         struct ext4_inode_info *ei = EXT4_I(inode);
512         struct ext4_block_alloc_info *block_i = ei->i_block_alloc_info;
513         struct super_block *sb = inode->i_sb;
514
515         block_i = kmalloc(sizeof(*block_i), GFP_NOFS);
516         if (block_i) {
517                 struct ext4_reserve_window_node *rsv = &block_i->rsv_window_node;
518
519                 rsv->rsv_start = EXT4_RESERVE_WINDOW_NOT_ALLOCATED;
520                 rsv->rsv_end = EXT4_RESERVE_WINDOW_NOT_ALLOCATED;
521
522                 /*
523                  * if filesystem is mounted with NORESERVATION, the goal
524                  * reservation window size is set to zero to indicate
525                  * block reservation is off
526                  */
527                 if (!test_opt(sb, RESERVATION))
528                         rsv->rsv_goal_size = 0;
529                 else
530                         rsv->rsv_goal_size = EXT4_DEFAULT_RESERVE_BLOCKS;
531                 rsv->rsv_alloc_hit = 0;
532                 block_i->last_alloc_logical_block = 0;
533                 block_i->last_alloc_physical_block = 0;
534         }
535         ei->i_block_alloc_info = block_i;
536 }
537
538 /**
539  * ext4_discard_reservation()
540  * @inode:              inode
541  *
542  * Discard(free) block reservation window on last file close, or truncate
543  * or at last iput().
544  *
545  * It is being called in three cases:
546  *      ext4_release_file(): last writer close the file
547  *      ext4_clear_inode(): last iput(), when nobody link to this file.
548  *      ext4_truncate(): when the block indirect map is about to change.
549  *
550  */
551 void ext4_discard_reservation(struct inode *inode)
552 {
553         struct ext4_inode_info *ei = EXT4_I(inode);
554         struct ext4_block_alloc_info *block_i = ei->i_block_alloc_info;
555         struct ext4_reserve_window_node *rsv;
556         spinlock_t *rsv_lock = &EXT4_SB(inode->i_sb)->s_rsv_window_lock;
557
558         if (!block_i)
559                 return;
560
561         rsv = &block_i->rsv_window_node;
562         if (!rsv_is_empty(&rsv->rsv_window)) {
563                 spin_lock(rsv_lock);
564                 if (!rsv_is_empty(&rsv->rsv_window))
565                         rsv_window_remove(inode->i_sb, rsv);
566                 spin_unlock(rsv_lock);
567         }
568 }
569
570 /**
571  * ext4_free_blocks_sb() -- Free given blocks and update quota
572  * @handle:                     handle to this transaction
573  * @sb:                         super block
574  * @block:                      start physcial block to free
575  * @count:                      number of blocks to free
576  * @pdquot_freed_blocks:        pointer to quota
577  */
578 void ext4_free_blocks_sb(handle_t *handle, struct super_block *sb,
579                          ext4_fsblk_t block, unsigned long count,
580                          unsigned long *pdquot_freed_blocks)
581 {
582         struct buffer_head *bitmap_bh = NULL;
583         struct buffer_head *gd_bh;
584         unsigned long block_group;
585         ext4_grpblk_t bit;
586         unsigned long i;
587         unsigned long overflow;
588         struct ext4_group_desc * desc;
589         struct ext4_super_block * es;
590         struct ext4_sb_info *sbi;
591         int err = 0, ret;
592         ext4_grpblk_t group_freed;
593
594         *pdquot_freed_blocks = 0;
595         sbi = EXT4_SB(sb);
596         es = sbi->s_es;
597         if (block < le32_to_cpu(es->s_first_data_block) ||
598             block + count < block ||
599             block + count > ext4_blocks_count(es)) {
600                 ext4_error (sb, "ext4_free_blocks",
601                             "Freeing blocks not in datazone - "
602                             "block = %llu, count = %lu", block, count);
603                 goto error_return;
604         }
605
606         ext4_debug ("freeing block(s) %llu-%llu\n", block, block + count - 1);
607
608 do_more:
609         overflow = 0;
610         ext4_get_group_no_and_offset(sb, block, &block_group, &bit);
611         /*
612          * Check to see if we are freeing blocks across a group
613          * boundary.
614          */
615         if (bit + count > EXT4_BLOCKS_PER_GROUP(sb)) {
616                 overflow = bit + count - EXT4_BLOCKS_PER_GROUP(sb);
617                 count -= overflow;
618         }
619         brelse(bitmap_bh);
620         bitmap_bh = read_block_bitmap(sb, block_group);
621         if (!bitmap_bh)
622                 goto error_return;
623         desc = ext4_get_group_desc (sb, block_group, &gd_bh);
624         if (!desc)
625                 goto error_return;
626
627         if (in_range(ext4_block_bitmap(sb, desc), block, count) ||
628             in_range(ext4_inode_bitmap(sb, desc), block, count) ||
629             in_range(block, ext4_inode_table(sb, desc), sbi->s_itb_per_group) ||
630             in_range(block + count - 1, ext4_inode_table(sb, desc),
631                      sbi->s_itb_per_group))
632                 ext4_error (sb, "ext4_free_blocks",
633                             "Freeing blocks in system zones - "
634                             "Block = %llu, count = %lu",
635                             block, count);
636
637         /*
638          * We are about to start releasing blocks in the bitmap,
639          * so we need undo access.
640          */
641         /* @@@ check errors */
642         BUFFER_TRACE(bitmap_bh, "getting undo access");
643         err = ext4_journal_get_undo_access(handle, bitmap_bh);
644         if (err)
645                 goto error_return;
646
647         /*
648          * We are about to modify some metadata.  Call the journal APIs
649          * to unshare ->b_data if a currently-committing transaction is
650          * using it
651          */
652         BUFFER_TRACE(gd_bh, "get_write_access");
653         err = ext4_journal_get_write_access(handle, gd_bh);
654         if (err)
655                 goto error_return;
656
657         jbd_lock_bh_state(bitmap_bh);
658
659         for (i = 0, group_freed = 0; i < count; i++) {
660                 /*
661                  * An HJ special.  This is expensive...
662                  */
663 #ifdef CONFIG_JBD2_DEBUG
664                 jbd_unlock_bh_state(bitmap_bh);
665                 {
666                         struct buffer_head *debug_bh;
667                         debug_bh = sb_find_get_block(sb, block + i);
668                         if (debug_bh) {
669                                 BUFFER_TRACE(debug_bh, "Deleted!");
670                                 if (!bh2jh(bitmap_bh)->b_committed_data)
671                                         BUFFER_TRACE(debug_bh,
672                                                 "No commited data in bitmap");
673                                 BUFFER_TRACE2(debug_bh, bitmap_bh, "bitmap");
674                                 __brelse(debug_bh);
675                         }
676                 }
677                 jbd_lock_bh_state(bitmap_bh);
678 #endif
679                 if (need_resched()) {
680                         jbd_unlock_bh_state(bitmap_bh);
681                         cond_resched();
682                         jbd_lock_bh_state(bitmap_bh);
683                 }
684                 /* @@@ This prevents newly-allocated data from being
685                  * freed and then reallocated within the same
686                  * transaction.
687                  *
688                  * Ideally we would want to allow that to happen, but to
689                  * do so requires making jbd2_journal_forget() capable of
690                  * revoking the queued write of a data block, which
691                  * implies blocking on the journal lock.  *forget()
692                  * cannot block due to truncate races.
693                  *
694                  * Eventually we can fix this by making jbd2_journal_forget()
695                  * return a status indicating whether or not it was able
696                  * to revoke the buffer.  On successful revoke, it is
697                  * safe not to set the allocation bit in the committed
698                  * bitmap, because we know that there is no outstanding
699                  * activity on the buffer any more and so it is safe to
700                  * reallocate it.
701                  */
702                 BUFFER_TRACE(bitmap_bh, "set in b_committed_data");
703                 J_ASSERT_BH(bitmap_bh,
704                                 bh2jh(bitmap_bh)->b_committed_data != NULL);
705                 ext4_set_bit_atomic(sb_bgl_lock(sbi, block_group), bit + i,
706                                 bh2jh(bitmap_bh)->b_committed_data);
707
708                 /*
709                  * We clear the bit in the bitmap after setting the committed
710                  * data bit, because this is the reverse order to that which
711                  * the allocator uses.
712                  */
713                 BUFFER_TRACE(bitmap_bh, "clear bit");
714                 if (!ext4_clear_bit_atomic(sb_bgl_lock(sbi, block_group),
715                                                 bit + i, bitmap_bh->b_data)) {
716                         jbd_unlock_bh_state(bitmap_bh);
717                         ext4_error(sb, __FUNCTION__,
718                                    "bit already cleared for block %llu",
719                                    (ext4_fsblk_t)(block + i));
720                         jbd_lock_bh_state(bitmap_bh);
721                         BUFFER_TRACE(bitmap_bh, "bit already cleared");
722                 } else {
723                         group_freed++;
724                 }
725         }
726         jbd_unlock_bh_state(bitmap_bh);
727
728         spin_lock(sb_bgl_lock(sbi, block_group));
729         desc->bg_free_blocks_count =
730                 cpu_to_le16(le16_to_cpu(desc->bg_free_blocks_count) +
731                         group_freed);
732         desc->bg_checksum = ext4_group_desc_csum(sbi, block_group, desc);
733         spin_unlock(sb_bgl_lock(sbi, block_group));
734         percpu_counter_add(&sbi->s_freeblocks_counter, count);
735
736         /* We dirtied the bitmap block */
737         BUFFER_TRACE(bitmap_bh, "dirtied bitmap block");
738         err = ext4_journal_dirty_metadata(handle, bitmap_bh);
739
740         /* And the group descriptor block */
741         BUFFER_TRACE(gd_bh, "dirtied group descriptor block");
742         ret = ext4_journal_dirty_metadata(handle, gd_bh);
743         if (!err) err = ret;
744         *pdquot_freed_blocks += group_freed;
745
746         if (overflow && !err) {
747                 block += count;
748                 count = overflow;
749                 goto do_more;
750         }
751         sb->s_dirt = 1;
752 error_return:
753         brelse(bitmap_bh);
754         ext4_std_error(sb, err);
755         return;
756 }
757
758 /**
759  * ext4_free_blocks() -- Free given blocks and update quota
760  * @handle:             handle for this transaction
761  * @inode:              inode
762  * @block:              start physical block to free
763  * @count:              number of blocks to count
764  */
765 void ext4_free_blocks(handle_t *handle, struct inode *inode,
766                         ext4_fsblk_t block, unsigned long count)
767 {
768         struct super_block * sb;
769         unsigned long dquot_freed_blocks;
770
771         sb = inode->i_sb;
772         if (!sb) {
773                 printk ("ext4_free_blocks: nonexistent device");
774                 return;
775         }
776         ext4_free_blocks_sb(handle, sb, block, count, &dquot_freed_blocks);
777         if (dquot_freed_blocks)
778                 DQUOT_FREE_BLOCK(inode, dquot_freed_blocks);
779         return;
780 }
781
782 /**
783  * ext4_test_allocatable()
784  * @nr:                 given allocation block group
785  * @bh:                 bufferhead contains the bitmap of the given block group
786  *
787  * For ext4 allocations, we must not reuse any blocks which are
788  * allocated in the bitmap buffer's "last committed data" copy.  This
789  * prevents deletes from freeing up the page for reuse until we have
790  * committed the delete transaction.
791  *
792  * If we didn't do this, then deleting something and reallocating it as
793  * data would allow the old block to be overwritten before the
794  * transaction committed (because we force data to disk before commit).
795  * This would lead to corruption if we crashed between overwriting the
796  * data and committing the delete.
797  *
798  * @@@ We may want to make this allocation behaviour conditional on
799  * data-writes at some point, and disable it for metadata allocations or
800  * sync-data inodes.
801  */
802 static int ext4_test_allocatable(ext4_grpblk_t nr, struct buffer_head *bh)
803 {
804         int ret;
805         struct journal_head *jh = bh2jh(bh);
806
807         if (ext4_test_bit(nr, bh->b_data))
808                 return 0;
809
810         jbd_lock_bh_state(bh);
811         if (!jh->b_committed_data)
812                 ret = 1;
813         else
814                 ret = !ext4_test_bit(nr, jh->b_committed_data);
815         jbd_unlock_bh_state(bh);
816         return ret;
817 }
818
819 /**
820  * bitmap_search_next_usable_block()
821  * @start:              the starting block (group relative) of the search
822  * @bh:                 bufferhead contains the block group bitmap
823  * @maxblocks:          the ending block (group relative) of the reservation
824  *
825  * The bitmap search --- search forward alternately through the actual
826  * bitmap on disk and the last-committed copy in journal, until we find a
827  * bit free in both bitmaps.
828  */
829 static ext4_grpblk_t
830 bitmap_search_next_usable_block(ext4_grpblk_t start, struct buffer_head *bh,
831                                         ext4_grpblk_t maxblocks)
832 {
833         ext4_grpblk_t next;
834         struct journal_head *jh = bh2jh(bh);
835
836         while (start < maxblocks) {
837                 next = ext4_find_next_zero_bit(bh->b_data, maxblocks, start);
838                 if (next >= maxblocks)
839                         return -1;
840                 if (ext4_test_allocatable(next, bh))
841                         return next;
842                 jbd_lock_bh_state(bh);
843                 if (jh->b_committed_data)
844                         start = ext4_find_next_zero_bit(jh->b_committed_data,
845                                                         maxblocks, next);
846                 jbd_unlock_bh_state(bh);
847         }
848         return -1;
849 }
850
851 /**
852  * find_next_usable_block()
853  * @start:              the starting block (group relative) to find next
854  *                      allocatable block in bitmap.
855  * @bh:                 bufferhead contains the block group bitmap
856  * @maxblocks:          the ending block (group relative) for the search
857  *
858  * Find an allocatable block in a bitmap.  We honor both the bitmap and
859  * its last-committed copy (if that exists), and perform the "most
860  * appropriate allocation" algorithm of looking for a free block near
861  * the initial goal; then for a free byte somewhere in the bitmap; then
862  * for any free bit in the bitmap.
863  */
864 static ext4_grpblk_t
865 find_next_usable_block(ext4_grpblk_t start, struct buffer_head *bh,
866                         ext4_grpblk_t maxblocks)
867 {
868         ext4_grpblk_t here, next;
869         char *p, *r;
870
871         if (start > 0) {
872                 /*
873                  * The goal was occupied; search forward for a free
874                  * block within the next XX blocks.
875                  *
876                  * end_goal is more or less random, but it has to be
877                  * less than EXT4_BLOCKS_PER_GROUP. Aligning up to the
878                  * next 64-bit boundary is simple..
879                  */
880                 ext4_grpblk_t end_goal = (start + 63) & ~63;
881                 if (end_goal > maxblocks)
882                         end_goal = maxblocks;
883                 here = ext4_find_next_zero_bit(bh->b_data, end_goal, start);
884                 if (here < end_goal && ext4_test_allocatable(here, bh))
885                         return here;
886                 ext4_debug("Bit not found near goal\n");
887         }
888
889         here = start;
890         if (here < 0)
891                 here = 0;
892
893         p = ((char *)bh->b_data) + (here >> 3);
894         r = memscan(p, 0, ((maxblocks + 7) >> 3) - (here >> 3));
895         next = (r - ((char *)bh->b_data)) << 3;
896
897         if (next < maxblocks && next >= start && ext4_test_allocatable(next, bh))
898                 return next;
899
900         /*
901          * The bitmap search --- search forward alternately through the actual
902          * bitmap and the last-committed copy until we find a bit free in
903          * both
904          */
905         here = bitmap_search_next_usable_block(here, bh, maxblocks);
906         return here;
907 }
908
909 /**
910  * claim_block()
911  * @block:              the free block (group relative) to allocate
912  * @bh:                 the bufferhead containts the block group bitmap
913  *
914  * We think we can allocate this block in this bitmap.  Try to set the bit.
915  * If that succeeds then check that nobody has allocated and then freed the
916  * block since we saw that is was not marked in b_committed_data.  If it _was_
917  * allocated and freed then clear the bit in the bitmap again and return
918  * zero (failure).
919  */
920 static inline int
921 claim_block(spinlock_t *lock, ext4_grpblk_t block, struct buffer_head *bh)
922 {
923         struct journal_head *jh = bh2jh(bh);
924         int ret;
925
926         if (ext4_set_bit_atomic(lock, block, bh->b_data))
927                 return 0;
928         jbd_lock_bh_state(bh);
929         if (jh->b_committed_data && ext4_test_bit(block,jh->b_committed_data)) {
930                 ext4_clear_bit_atomic(lock, block, bh->b_data);
931                 ret = 0;
932         } else {
933                 ret = 1;
934         }
935         jbd_unlock_bh_state(bh);
936         return ret;
937 }
938
939 /**
940  * ext4_try_to_allocate()
941  * @sb:                 superblock
942  * @handle:             handle to this transaction
943  * @group:              given allocation block group
944  * @bitmap_bh:          bufferhead holds the block bitmap
945  * @grp_goal:           given target block within the group
946  * @count:              target number of blocks to allocate
947  * @my_rsv:             reservation window
948  *
949  * Attempt to allocate blocks within a give range. Set the range of allocation
950  * first, then find the first free bit(s) from the bitmap (within the range),
951  * and at last, allocate the blocks by claiming the found free bit as allocated.
952  *
953  * To set the range of this allocation:
954  *      if there is a reservation window, only try to allocate block(s) from the
955  *      file's own reservation window;
956  *      Otherwise, the allocation range starts from the give goal block, ends at
957  *      the block group's last block.
958  *
959  * If we failed to allocate the desired block then we may end up crossing to a
960  * new bitmap.  In that case we must release write access to the old one via
961  * ext4_journal_release_buffer(), else we'll run out of credits.
962  */
963 static ext4_grpblk_t
964 ext4_try_to_allocate(struct super_block *sb, handle_t *handle, int group,
965                         struct buffer_head *bitmap_bh, ext4_grpblk_t grp_goal,
966                         unsigned long *count, struct ext4_reserve_window *my_rsv)
967 {
968         ext4_fsblk_t group_first_block;
969         ext4_grpblk_t start, end;
970         unsigned long num = 0;
971
972         /* we do allocation within the reservation window if we have a window */
973         if (my_rsv) {
974                 group_first_block = ext4_group_first_block_no(sb, group);
975                 if (my_rsv->_rsv_start >= group_first_block)
976                         start = my_rsv->_rsv_start - group_first_block;
977                 else
978                         /* reservation window cross group boundary */
979                         start = 0;
980                 end = my_rsv->_rsv_end - group_first_block + 1;
981                 if (end > EXT4_BLOCKS_PER_GROUP(sb))
982                         /* reservation window crosses group boundary */
983                         end = EXT4_BLOCKS_PER_GROUP(sb);
984                 if ((start <= grp_goal) && (grp_goal < end))
985                         start = grp_goal;
986                 else
987                         grp_goal = -1;
988         } else {
989                 if (grp_goal > 0)
990                         start = grp_goal;
991                 else
992                         start = 0;
993                 end = EXT4_BLOCKS_PER_GROUP(sb);
994         }
995
996         BUG_ON(start > EXT4_BLOCKS_PER_GROUP(sb));
997
998 repeat:
999         if (grp_goal < 0 || !ext4_test_allocatable(grp_goal, bitmap_bh)) {
1000                 grp_goal = find_next_usable_block(start, bitmap_bh, end);
1001                 if (grp_goal < 0)
1002                         goto fail_access;
1003                 if (!my_rsv) {
1004                         int i;
1005
1006                         for (i = 0; i < 7 && grp_goal > start &&
1007                                         ext4_test_allocatable(grp_goal - 1,
1008                                                                 bitmap_bh);
1009                                         i++, grp_goal--)
1010                                 ;
1011                 }
1012         }
1013         start = grp_goal;
1014
1015         if (!claim_block(sb_bgl_lock(EXT4_SB(sb), group),
1016                 grp_goal, bitmap_bh)) {
1017                 /*
1018                  * The block was allocated by another thread, or it was
1019                  * allocated and then freed by another thread
1020                  */
1021                 start++;
1022                 grp_goal++;
1023                 if (start >= end)
1024                         goto fail_access;
1025                 goto repeat;
1026         }
1027         num++;
1028         grp_goal++;
1029         while (num < *count && grp_goal < end
1030                 && ext4_test_allocatable(grp_goal, bitmap_bh)
1031                 && claim_block(sb_bgl_lock(EXT4_SB(sb), group),
1032                                 grp_goal, bitmap_bh)) {
1033                 num++;
1034                 grp_goal++;
1035         }
1036         *count = num;
1037         return grp_goal - num;
1038 fail_access:
1039         *count = num;
1040         return -1;
1041 }
1042
1043 /**
1044  *      find_next_reservable_window():
1045  *              find a reservable space within the given range.
1046  *              It does not allocate the reservation window for now:
1047  *              alloc_new_reservation() will do the work later.
1048  *
1049  *      @search_head: the head of the searching list;
1050  *              This is not necessarily the list head of the whole filesystem
1051  *
1052  *              We have both head and start_block to assist the search
1053  *              for the reservable space. The list starts from head,
1054  *              but we will shift to the place where start_block is,
1055  *              then start from there, when looking for a reservable space.
1056  *
1057  *      @size: the target new reservation window size
1058  *
1059  *      @group_first_block: the first block we consider to start
1060  *                      the real search from
1061  *
1062  *      @last_block:
1063  *              the maximum block number that our goal reservable space
1064  *              could start from. This is normally the last block in this
1065  *              group. The search will end when we found the start of next
1066  *              possible reservable space is out of this boundary.
1067  *              This could handle the cross boundary reservation window
1068  *              request.
1069  *
1070  *      basically we search from the given range, rather than the whole
1071  *      reservation double linked list, (start_block, last_block)
1072  *      to find a free region that is of my size and has not
1073  *      been reserved.
1074  *
1075  */
1076 static int find_next_reservable_window(
1077                                 struct ext4_reserve_window_node *search_head,
1078                                 struct ext4_reserve_window_node *my_rsv,
1079                                 struct super_block * sb,
1080                                 ext4_fsblk_t start_block,
1081                                 ext4_fsblk_t last_block)
1082 {
1083         struct rb_node *next;
1084         struct ext4_reserve_window_node *rsv, *prev;
1085         ext4_fsblk_t cur;
1086         int size = my_rsv->rsv_goal_size;
1087
1088         /* TODO: make the start of the reservation window byte-aligned */
1089         /* cur = *start_block & ~7;*/
1090         cur = start_block;
1091         rsv = search_head;
1092         if (!rsv)
1093                 return -1;
1094
1095         while (1) {
1096                 if (cur <= rsv->rsv_end)
1097                         cur = rsv->rsv_end + 1;
1098
1099                 /* TODO?
1100                  * in the case we could not find a reservable space
1101                  * that is what is expected, during the re-search, we could
1102                  * remember what's the largest reservable space we could have
1103                  * and return that one.
1104                  *
1105                  * For now it will fail if we could not find the reservable
1106                  * space with expected-size (or more)...
1107                  */
1108                 if (cur > last_block)
1109                         return -1;              /* fail */
1110
1111                 prev = rsv;
1112                 next = rb_next(&rsv->rsv_node);
1113                 rsv = rb_entry(next,struct ext4_reserve_window_node,rsv_node);
1114
1115                 /*
1116                  * Reached the last reservation, we can just append to the
1117                  * previous one.
1118                  */
1119                 if (!next)
1120                         break;
1121
1122                 if (cur + size <= rsv->rsv_start) {
1123                         /*
1124                          * Found a reserveable space big enough.  We could
1125                          * have a reservation across the group boundary here
1126                          */
1127                         break;
1128                 }
1129         }
1130         /*
1131          * we come here either :
1132          * when we reach the end of the whole list,
1133          * and there is empty reservable space after last entry in the list.
1134          * append it to the end of the list.
1135          *
1136          * or we found one reservable space in the middle of the list,
1137          * return the reservation window that we could append to.
1138          * succeed.
1139          */
1140
1141         if ((prev != my_rsv) && (!rsv_is_empty(&my_rsv->rsv_window)))
1142                 rsv_window_remove(sb, my_rsv);
1143
1144         /*
1145          * Let's book the whole avaliable window for now.  We will check the
1146          * disk bitmap later and then, if there are free blocks then we adjust
1147          * the window size if it's larger than requested.
1148          * Otherwise, we will remove this node from the tree next time
1149          * call find_next_reservable_window.
1150          */
1151         my_rsv->rsv_start = cur;
1152         my_rsv->rsv_end = cur + size - 1;
1153         my_rsv->rsv_alloc_hit = 0;
1154
1155         if (prev != my_rsv)
1156                 ext4_rsv_window_add(sb, my_rsv);
1157
1158         return 0;
1159 }
1160
1161 /**
1162  *      alloc_new_reservation()--allocate a new reservation window
1163  *
1164  *              To make a new reservation, we search part of the filesystem
1165  *              reservation list (the list that inside the group). We try to
1166  *              allocate a new reservation window near the allocation goal,
1167  *              or the beginning of the group, if there is no goal.
1168  *
1169  *              We first find a reservable space after the goal, then from
1170  *              there, we check the bitmap for the first free block after
1171  *              it. If there is no free block until the end of group, then the
1172  *              whole group is full, we failed. Otherwise, check if the free
1173  *              block is inside the expected reservable space, if so, we
1174  *              succeed.
1175  *              If the first free block is outside the reservable space, then
1176  *              start from the first free block, we search for next available
1177  *              space, and go on.
1178  *
1179  *      on succeed, a new reservation will be found and inserted into the list
1180  *      It contains at least one free block, and it does not overlap with other
1181  *      reservation windows.
1182  *
1183  *      failed: we failed to find a reservation window in this group
1184  *
1185  *      @rsv: the reservation
1186  *
1187  *      @grp_goal: The goal (group-relative).  It is where the search for a
1188  *              free reservable space should start from.
1189  *              if we have a grp_goal(grp_goal >0 ), then start from there,
1190  *              no grp_goal(grp_goal = -1), we start from the first block
1191  *              of the group.
1192  *
1193  *      @sb: the super block
1194  *      @group: the group we are trying to allocate in
1195  *      @bitmap_bh: the block group block bitmap
1196  *
1197  */
1198 static int alloc_new_reservation(struct ext4_reserve_window_node *my_rsv,
1199                 ext4_grpblk_t grp_goal, struct super_block *sb,
1200                 unsigned int group, struct buffer_head *bitmap_bh)
1201 {
1202         struct ext4_reserve_window_node *search_head;
1203         ext4_fsblk_t group_first_block, group_end_block, start_block;
1204         ext4_grpblk_t first_free_block;
1205         struct rb_root *fs_rsv_root = &EXT4_SB(sb)->s_rsv_window_root;
1206         unsigned long size;
1207         int ret;
1208         spinlock_t *rsv_lock = &EXT4_SB(sb)->s_rsv_window_lock;
1209
1210         group_first_block = ext4_group_first_block_no(sb, group);
1211         group_end_block = group_first_block + (EXT4_BLOCKS_PER_GROUP(sb) - 1);
1212
1213         if (grp_goal < 0)
1214                 start_block = group_first_block;
1215         else
1216                 start_block = grp_goal + group_first_block;
1217
1218         size = my_rsv->rsv_goal_size;
1219
1220         if (!rsv_is_empty(&my_rsv->rsv_window)) {
1221                 /*
1222                  * if the old reservation is cross group boundary
1223                  * and if the goal is inside the old reservation window,
1224                  * we will come here when we just failed to allocate from
1225                  * the first part of the window. We still have another part
1226                  * that belongs to the next group. In this case, there is no
1227                  * point to discard our window and try to allocate a new one
1228                  * in this group(which will fail). we should
1229                  * keep the reservation window, just simply move on.
1230                  *
1231                  * Maybe we could shift the start block of the reservation
1232                  * window to the first block of next group.
1233                  */
1234
1235                 if ((my_rsv->rsv_start <= group_end_block) &&
1236                                 (my_rsv->rsv_end > group_end_block) &&
1237                                 (start_block >= my_rsv->rsv_start))
1238                         return -1;
1239
1240                 if ((my_rsv->rsv_alloc_hit >
1241                      (my_rsv->rsv_end - my_rsv->rsv_start + 1) / 2)) {
1242                         /*
1243                          * if the previously allocation hit ratio is
1244                          * greater than 1/2, then we double the size of
1245                          * the reservation window the next time,
1246                          * otherwise we keep the same size window
1247                          */
1248                         size = size * 2;
1249                         if (size > EXT4_MAX_RESERVE_BLOCKS)
1250                                 size = EXT4_MAX_RESERVE_BLOCKS;
1251                         my_rsv->rsv_goal_size= size;
1252                 }
1253         }
1254
1255         spin_lock(rsv_lock);
1256         /*
1257          * shift the search start to the window near the goal block
1258          */
1259         search_head = search_reserve_window(fs_rsv_root, start_block);
1260
1261         /*
1262          * find_next_reservable_window() simply finds a reservable window
1263          * inside the given range(start_block, group_end_block).
1264          *
1265          * To make sure the reservation window has a free bit inside it, we
1266          * need to check the bitmap after we found a reservable window.
1267          */
1268 retry:
1269         ret = find_next_reservable_window(search_head, my_rsv, sb,
1270                                                 start_block, group_end_block);
1271
1272         if (ret == -1) {
1273                 if (!rsv_is_empty(&my_rsv->rsv_window))
1274                         rsv_window_remove(sb, my_rsv);
1275                 spin_unlock(rsv_lock);
1276                 return -1;
1277         }
1278
1279         /*
1280          * On success, find_next_reservable_window() returns the
1281          * reservation window where there is a reservable space after it.
1282          * Before we reserve this reservable space, we need
1283          * to make sure there is at least a free block inside this region.
1284          *
1285          * searching the first free bit on the block bitmap and copy of
1286          * last committed bitmap alternatively, until we found a allocatable
1287          * block. Search start from the start block of the reservable space
1288          * we just found.
1289          */
1290         spin_unlock(rsv_lock);
1291         first_free_block = bitmap_search_next_usable_block(
1292                         my_rsv->rsv_start - group_first_block,
1293                         bitmap_bh, group_end_block - group_first_block + 1);
1294
1295         if (first_free_block < 0) {
1296                 /*
1297                  * no free block left on the bitmap, no point
1298                  * to reserve the space. return failed.
1299                  */
1300                 spin_lock(rsv_lock);
1301                 if (!rsv_is_empty(&my_rsv->rsv_window))
1302                         rsv_window_remove(sb, my_rsv);
1303                 spin_unlock(rsv_lock);
1304                 return -1;              /* failed */
1305         }
1306
1307         start_block = first_free_block + group_first_block;
1308         /*
1309          * check if the first free block is within the
1310          * free space we just reserved
1311          */
1312         if (start_block >= my_rsv->rsv_start && start_block <= my_rsv->rsv_end)
1313                 return 0;               /* success */
1314         /*
1315          * if the first free bit we found is out of the reservable space
1316          * continue search for next reservable space,
1317          * start from where the free block is,
1318          * we also shift the list head to where we stopped last time
1319          */
1320         search_head = my_rsv;
1321         spin_lock(rsv_lock);
1322         goto retry;
1323 }
1324
1325 /**
1326  * try_to_extend_reservation()
1327  * @my_rsv:             given reservation window
1328  * @sb:                 super block
1329  * @size:               the delta to extend
1330  *
1331  * Attempt to expand the reservation window large enough to have
1332  * required number of free blocks
1333  *
1334  * Since ext4_try_to_allocate() will always allocate blocks within
1335  * the reservation window range, if the window size is too small,
1336  * multiple blocks allocation has to stop at the end of the reservation
1337  * window. To make this more efficient, given the total number of
1338  * blocks needed and the current size of the window, we try to
1339  * expand the reservation window size if necessary on a best-effort
1340  * basis before ext4_new_blocks() tries to allocate blocks,
1341  */
1342 static void try_to_extend_reservation(struct ext4_reserve_window_node *my_rsv,
1343                         struct super_block *sb, int size)
1344 {
1345         struct ext4_reserve_window_node *next_rsv;
1346         struct rb_node *next;
1347         spinlock_t *rsv_lock = &EXT4_SB(sb)->s_rsv_window_lock;
1348
1349         if (!spin_trylock(rsv_lock))
1350                 return;
1351
1352         next = rb_next(&my_rsv->rsv_node);
1353
1354         if (!next)
1355                 my_rsv->rsv_end += size;
1356         else {
1357                 next_rsv = rb_entry(next, struct ext4_reserve_window_node, rsv_node);
1358
1359                 if ((next_rsv->rsv_start - my_rsv->rsv_end - 1) >= size)
1360                         my_rsv->rsv_end += size;
1361                 else
1362                         my_rsv->rsv_end = next_rsv->rsv_start - 1;
1363         }
1364         spin_unlock(rsv_lock);
1365 }
1366
1367 /**
1368  * ext4_try_to_allocate_with_rsv()
1369  * @sb:                 superblock
1370  * @handle:             handle to this transaction
1371  * @group:              given allocation block group
1372  * @bitmap_bh:          bufferhead holds the block bitmap
1373  * @grp_goal:           given target block within the group
1374  * @count:              target number of blocks to allocate
1375  * @my_rsv:             reservation window
1376  * @errp:               pointer to store the error code
1377  *
1378  * This is the main function used to allocate a new block and its reservation
1379  * window.
1380  *
1381  * Each time when a new block allocation is need, first try to allocate from
1382  * its own reservation.  If it does not have a reservation window, instead of
1383  * looking for a free bit on bitmap first, then look up the reservation list to
1384  * see if it is inside somebody else's reservation window, we try to allocate a
1385  * reservation window for it starting from the goal first. Then do the block
1386  * allocation within the reservation window.
1387  *
1388  * This will avoid keeping on searching the reservation list again and
1389  * again when somebody is looking for a free block (without
1390  * reservation), and there are lots of free blocks, but they are all
1391  * being reserved.
1392  *
1393  * We use a red-black tree for the per-filesystem reservation list.
1394  *
1395  */
1396 static ext4_grpblk_t
1397 ext4_try_to_allocate_with_rsv(struct super_block *sb, handle_t *handle,
1398                         unsigned int group, struct buffer_head *bitmap_bh,
1399                         ext4_grpblk_t grp_goal,
1400                         struct ext4_reserve_window_node * my_rsv,
1401                         unsigned long *count, int *errp)
1402 {
1403         ext4_fsblk_t group_first_block, group_last_block;
1404         ext4_grpblk_t ret = 0;
1405         int fatal;
1406         unsigned long num = *count;
1407
1408         *errp = 0;
1409
1410         /*
1411          * Make sure we use undo access for the bitmap, because it is critical
1412          * that we do the frozen_data COW on bitmap buffers in all cases even
1413          * if the buffer is in BJ_Forget state in the committing transaction.
1414          */
1415         BUFFER_TRACE(bitmap_bh, "get undo access for new block");
1416         fatal = ext4_journal_get_undo_access(handle, bitmap_bh);
1417         if (fatal) {
1418                 *errp = fatal;
1419                 return -1;
1420         }
1421
1422         /*
1423          * we don't deal with reservation when
1424          * filesystem is mounted without reservation
1425          * or the file is not a regular file
1426          * or last attempt to allocate a block with reservation turned on failed
1427          */
1428         if (my_rsv == NULL ) {
1429                 ret = ext4_try_to_allocate(sb, handle, group, bitmap_bh,
1430                                                 grp_goal, count, NULL);
1431                 goto out;
1432         }
1433         /*
1434          * grp_goal is a group relative block number (if there is a goal)
1435          * 0 <= grp_goal < EXT4_BLOCKS_PER_GROUP(sb)
1436          * first block is a filesystem wide block number
1437          * first block is the block number of the first block in this group
1438          */
1439         group_first_block = ext4_group_first_block_no(sb, group);
1440         group_last_block = group_first_block + (EXT4_BLOCKS_PER_GROUP(sb) - 1);
1441
1442         /*
1443          * Basically we will allocate a new block from inode's reservation
1444          * window.
1445          *
1446          * We need to allocate a new reservation window, if:
1447          * a) inode does not have a reservation window; or
1448          * b) last attempt to allocate a block from existing reservation
1449          *    failed; or
1450          * c) we come here with a goal and with a reservation window
1451          *
1452          * We do not need to allocate a new reservation window if we come here
1453          * at the beginning with a goal and the goal is inside the window, or
1454          * we don't have a goal but already have a reservation window.
1455          * then we could go to allocate from the reservation window directly.
1456          */
1457         while (1) {
1458                 if (rsv_is_empty(&my_rsv->rsv_window) || (ret < 0) ||
1459                         !goal_in_my_reservation(&my_rsv->rsv_window,
1460                                                 grp_goal, group, sb)) {
1461                         if (my_rsv->rsv_goal_size < *count)
1462                                 my_rsv->rsv_goal_size = *count;
1463                         ret = alloc_new_reservation(my_rsv, grp_goal, sb,
1464                                                         group, bitmap_bh);
1465                         if (ret < 0)
1466                                 break;                  /* failed */
1467
1468                         if (!goal_in_my_reservation(&my_rsv->rsv_window,
1469                                                         grp_goal, group, sb))
1470                                 grp_goal = -1;
1471                 } else if (grp_goal >= 0) {
1472                         int curr = my_rsv->rsv_end -
1473                                         (grp_goal + group_first_block) + 1;
1474
1475                         if (curr < *count)
1476                                 try_to_extend_reservation(my_rsv, sb,
1477                                                         *count - curr);
1478                 }
1479
1480                 if ((my_rsv->rsv_start > group_last_block) ||
1481                                 (my_rsv->rsv_end < group_first_block)) {
1482                         rsv_window_dump(&EXT4_SB(sb)->s_rsv_window_root, 1);
1483                         BUG();
1484                 }
1485                 ret = ext4_try_to_allocate(sb, handle, group, bitmap_bh,
1486                                            grp_goal, &num, &my_rsv->rsv_window);
1487                 if (ret >= 0) {
1488                         my_rsv->rsv_alloc_hit += num;
1489                         *count = num;
1490                         break;                          /* succeed */
1491                 }
1492                 num = *count;
1493         }
1494 out:
1495         if (ret >= 0) {
1496                 BUFFER_TRACE(bitmap_bh, "journal_dirty_metadata for "
1497                                         "bitmap block");
1498                 fatal = ext4_journal_dirty_metadata(handle, bitmap_bh);
1499                 if (fatal) {
1500                         *errp = fatal;
1501                         return -1;
1502                 }
1503                 return ret;
1504         }
1505
1506         BUFFER_TRACE(bitmap_bh, "journal_release_buffer");
1507         ext4_journal_release_buffer(handle, bitmap_bh);
1508         return ret;
1509 }
1510
1511 /**
1512  * ext4_has_free_blocks()
1513  * @sbi:                in-core super block structure.
1514  *
1515  * Check if filesystem has at least 1 free block available for allocation.
1516  */
1517 static int ext4_has_free_blocks(struct ext4_sb_info *sbi)
1518 {
1519         ext4_fsblk_t free_blocks, root_blocks;
1520
1521         free_blocks = percpu_counter_read_positive(&sbi->s_freeblocks_counter);
1522         root_blocks = ext4_r_blocks_count(sbi->s_es);
1523         if (free_blocks < root_blocks + 1 && !capable(CAP_SYS_RESOURCE) &&
1524                 sbi->s_resuid != current->fsuid &&
1525                 (sbi->s_resgid == 0 || !in_group_p (sbi->s_resgid))) {
1526                 return 0;
1527         }
1528         return 1;
1529 }
1530
1531 /**
1532  * ext4_should_retry_alloc()
1533  * @sb:                 super block
1534  * @retries             number of attemps has been made
1535  *
1536  * ext4_should_retry_alloc() is called when ENOSPC is returned, and if
1537  * it is profitable to retry the operation, this function will wait
1538  * for the current or commiting transaction to complete, and then
1539  * return TRUE.
1540  *
1541  * if the total number of retries exceed three times, return FALSE.
1542  */
1543 int ext4_should_retry_alloc(struct super_block *sb, int *retries)
1544 {
1545         if (!ext4_has_free_blocks(EXT4_SB(sb)) || (*retries)++ > 3)
1546                 return 0;
1547
1548         jbd_debug(1, "%s: retrying operation after ENOSPC\n", sb->s_id);
1549
1550         return jbd2_journal_force_commit_nested(EXT4_SB(sb)->s_journal);
1551 }
1552
1553 /**
1554  * ext4_new_blocks() -- core block(s) allocation function
1555  * @handle:             handle to this transaction
1556  * @inode:              file inode
1557  * @goal:               given target block(filesystem wide)
1558  * @count:              target number of blocks to allocate
1559  * @errp:               error code
1560  *
1561  * ext4_new_blocks uses a goal block to assist allocation.  It tries to
1562  * allocate block(s) from the block group contains the goal block first. If that
1563  * fails, it will try to allocate block(s) from other block groups without
1564  * any specific goal block.
1565  *
1566  */
1567 ext4_fsblk_t ext4_new_blocks(handle_t *handle, struct inode *inode,
1568                         ext4_fsblk_t goal, unsigned long *count, int *errp)
1569 {
1570         struct buffer_head *bitmap_bh = NULL;
1571         struct buffer_head *gdp_bh;
1572         unsigned long group_no;
1573         int goal_group;
1574         ext4_grpblk_t grp_target_blk;   /* blockgroup relative goal block */
1575         ext4_grpblk_t grp_alloc_blk;    /* blockgroup-relative allocated block*/
1576         ext4_fsblk_t ret_block;         /* filesyetem-wide allocated block */
1577         int bgi;                        /* blockgroup iteration index */
1578         int fatal = 0, err;
1579         int performed_allocation = 0;
1580         ext4_grpblk_t free_blocks;      /* number of free blocks in a group */
1581         struct super_block *sb;
1582         struct ext4_group_desc *gdp;
1583         struct ext4_super_block *es;
1584         struct ext4_sb_info *sbi;
1585         struct ext4_reserve_window_node *my_rsv = NULL;
1586         struct ext4_block_alloc_info *block_i;
1587         unsigned short windowsz = 0;
1588 #ifdef EXT4FS_DEBUG
1589         static int goal_hits, goal_attempts;
1590 #endif
1591         unsigned long ngroups;
1592         unsigned long num = *count;
1593
1594         *errp = -ENOSPC;
1595         sb = inode->i_sb;
1596         if (!sb) {
1597                 printk("ext4_new_block: nonexistent device");
1598                 return 0;
1599         }
1600
1601         /*
1602          * Check quota for allocation of this block.
1603          */
1604         if (DQUOT_ALLOC_BLOCK(inode, num)) {
1605                 *errp = -EDQUOT;
1606                 return 0;
1607         }
1608
1609         sbi = EXT4_SB(sb);
1610         es = EXT4_SB(sb)->s_es;
1611         ext4_debug("goal=%lu.\n", goal);
1612         /*
1613          * Allocate a block from reservation only when
1614          * filesystem is mounted with reservation(default,-o reservation), and
1615          * it's a regular file, and
1616          * the desired window size is greater than 0 (One could use ioctl
1617          * command EXT4_IOC_SETRSVSZ to set the window size to 0 to turn off
1618          * reservation on that particular file)
1619          */
1620         block_i = EXT4_I(inode)->i_block_alloc_info;
1621         if (block_i && ((windowsz = block_i->rsv_window_node.rsv_goal_size) > 0))
1622                 my_rsv = &block_i->rsv_window_node;
1623
1624         if (!ext4_has_free_blocks(sbi)) {
1625                 *errp = -ENOSPC;
1626                 goto out;
1627         }
1628
1629         /*
1630          * First, test whether the goal block is free.
1631          */
1632         if (goal < le32_to_cpu(es->s_first_data_block) ||
1633             goal >= ext4_blocks_count(es))
1634                 goal = le32_to_cpu(es->s_first_data_block);
1635         ext4_get_group_no_and_offset(sb, goal, &group_no, &grp_target_blk);
1636         goal_group = group_no;
1637 retry_alloc:
1638         gdp = ext4_get_group_desc(sb, group_no, &gdp_bh);
1639         if (!gdp)
1640                 goto io_error;
1641
1642         free_blocks = le16_to_cpu(gdp->bg_free_blocks_count);
1643         /*
1644          * if there is not enough free blocks to make a new resevation
1645          * turn off reservation for this allocation
1646          */
1647         if (my_rsv && (free_blocks < windowsz)
1648                 && (rsv_is_empty(&my_rsv->rsv_window)))
1649                 my_rsv = NULL;
1650
1651         if (free_blocks > 0) {
1652                 bitmap_bh = read_block_bitmap(sb, group_no);
1653                 if (!bitmap_bh)
1654                         goto io_error;
1655                 grp_alloc_blk = ext4_try_to_allocate_with_rsv(sb, handle,
1656                                         group_no, bitmap_bh, grp_target_blk,
1657                                         my_rsv, &num, &fatal);
1658                 if (fatal)
1659                         goto out;
1660                 if (grp_alloc_blk >= 0)
1661                         goto allocated;
1662         }
1663
1664         ngroups = EXT4_SB(sb)->s_groups_count;
1665         smp_rmb();
1666
1667         /*
1668          * Now search the rest of the groups.  We assume that
1669          * i and gdp correctly point to the last group visited.
1670          */
1671         for (bgi = 0; bgi < ngroups; bgi++) {
1672                 group_no++;
1673                 if (group_no >= ngroups)
1674                         group_no = 0;
1675                 gdp = ext4_get_group_desc(sb, group_no, &gdp_bh);
1676                 if (!gdp)
1677                         goto io_error;
1678                 free_blocks = le16_to_cpu(gdp->bg_free_blocks_count);
1679                 /*
1680                  * skip this group if the number of
1681                  * free blocks is less than half of the reservation
1682                  * window size.
1683                  */
1684                 if (free_blocks <= (windowsz/2))
1685                         continue;
1686
1687                 brelse(bitmap_bh);
1688                 bitmap_bh = read_block_bitmap(sb, group_no);
1689                 if (!bitmap_bh)
1690                         goto io_error;
1691                 /*
1692                  * try to allocate block(s) from this group, without a goal(-1).
1693                  */
1694                 grp_alloc_blk = ext4_try_to_allocate_with_rsv(sb, handle,
1695                                         group_no, bitmap_bh, -1, my_rsv,
1696                                         &num, &fatal);
1697                 if (fatal)
1698                         goto out;
1699                 if (grp_alloc_blk >= 0)
1700                         goto allocated;
1701         }
1702         /*
1703          * We may end up a bogus ealier ENOSPC error due to
1704          * filesystem is "full" of reservations, but
1705          * there maybe indeed free blocks avaliable on disk
1706          * In this case, we just forget about the reservations
1707          * just do block allocation as without reservations.
1708          */
1709         if (my_rsv) {
1710                 my_rsv = NULL;
1711                 windowsz = 0;
1712                 group_no = goal_group;
1713                 goto retry_alloc;
1714         }
1715         /* No space left on the device */
1716         *errp = -ENOSPC;
1717         goto out;
1718
1719 allocated:
1720
1721         ext4_debug("using block group %d(%d)\n",
1722                         group_no, gdp->bg_free_blocks_count);
1723
1724         BUFFER_TRACE(gdp_bh, "get_write_access");
1725         fatal = ext4_journal_get_write_access(handle, gdp_bh);
1726         if (fatal)
1727                 goto out;
1728
1729         ret_block = grp_alloc_blk + ext4_group_first_block_no(sb, group_no);
1730
1731         if (in_range(ext4_block_bitmap(sb, gdp), ret_block, num) ||
1732             in_range(ext4_inode_bitmap(sb, gdp), ret_block, num) ||
1733             in_range(ret_block, ext4_inode_table(sb, gdp),
1734                      EXT4_SB(sb)->s_itb_per_group) ||
1735             in_range(ret_block + num - 1, ext4_inode_table(sb, gdp),
1736                      EXT4_SB(sb)->s_itb_per_group))
1737                 ext4_error(sb, "ext4_new_block",
1738                             "Allocating block in system zone - "
1739                             "blocks from %llu, length %lu",
1740                              ret_block, num);
1741
1742         performed_allocation = 1;
1743
1744 #ifdef CONFIG_JBD2_DEBUG
1745         {
1746                 struct buffer_head *debug_bh;
1747
1748                 /* Record bitmap buffer state in the newly allocated block */
1749                 debug_bh = sb_find_get_block(sb, ret_block);
1750                 if (debug_bh) {
1751                         BUFFER_TRACE(debug_bh, "state when allocated");
1752                         BUFFER_TRACE2(debug_bh, bitmap_bh, "bitmap state");
1753                         brelse(debug_bh);
1754                 }
1755         }
1756         jbd_lock_bh_state(bitmap_bh);
1757         spin_lock(sb_bgl_lock(sbi, group_no));
1758         if (buffer_jbd(bitmap_bh) && bh2jh(bitmap_bh)->b_committed_data) {
1759                 int i;
1760
1761                 for (i = 0; i < num; i++) {
1762                         if (ext4_test_bit(grp_alloc_blk+i,
1763                                         bh2jh(bitmap_bh)->b_committed_data)) {
1764                                 printk("%s: block was unexpectedly set in "
1765                                         "b_committed_data\n", __FUNCTION__);
1766                         }
1767                 }
1768         }
1769         ext4_debug("found bit %d\n", grp_alloc_blk);
1770         spin_unlock(sb_bgl_lock(sbi, group_no));
1771         jbd_unlock_bh_state(bitmap_bh);
1772 #endif
1773
1774         if (ret_block + num - 1 >= ext4_blocks_count(es)) {
1775                 ext4_error(sb, "ext4_new_block",
1776                             "block(%llu) >= blocks count(%llu) - "
1777                             "block_group = %lu, es == %p ", ret_block,
1778                         ext4_blocks_count(es), group_no, es);
1779                 goto out;
1780         }
1781
1782         /*
1783          * It is up to the caller to add the new buffer to a journal
1784          * list of some description.  We don't know in advance whether
1785          * the caller wants to use it as metadata or data.
1786          */
1787         ext4_debug("allocating block %lu. Goal hits %d of %d.\n",
1788                         ret_block, goal_hits, goal_attempts);
1789
1790         spin_lock(sb_bgl_lock(sbi, group_no));
1791         if (gdp->bg_flags & cpu_to_le16(EXT4_BG_BLOCK_UNINIT))
1792                 gdp->bg_flags &= cpu_to_le16(~EXT4_BG_BLOCK_UNINIT);
1793         gdp->bg_free_blocks_count =
1794                         cpu_to_le16(le16_to_cpu(gdp->bg_free_blocks_count)-num);
1795         gdp->bg_checksum = ext4_group_desc_csum(sbi, group_no, gdp);
1796         spin_unlock(sb_bgl_lock(sbi, group_no));
1797         percpu_counter_sub(&sbi->s_freeblocks_counter, num);
1798
1799         BUFFER_TRACE(gdp_bh, "journal_dirty_metadata for group descriptor");
1800         err = ext4_journal_dirty_metadata(handle, gdp_bh);
1801         if (!fatal)
1802                 fatal = err;
1803
1804         sb->s_dirt = 1;
1805         if (fatal)
1806                 goto out;
1807
1808         *errp = 0;
1809         brelse(bitmap_bh);
1810         DQUOT_FREE_BLOCK(inode, *count-num);
1811         *count = num;
1812         return ret_block;
1813
1814 io_error:
1815         *errp = -EIO;
1816 out:
1817         if (fatal) {
1818                 *errp = fatal;
1819                 ext4_std_error(sb, fatal);
1820         }
1821         /*
1822          * Undo the block allocation
1823          */
1824         if (!performed_allocation)
1825                 DQUOT_FREE_BLOCK(inode, *count);
1826         brelse(bitmap_bh);
1827         return 0;
1828 }
1829
1830 ext4_fsblk_t ext4_new_block(handle_t *handle, struct inode *inode,
1831                         ext4_fsblk_t goal, int *errp)
1832 {
1833         unsigned long count = 1;
1834
1835         return ext4_new_blocks(handle, inode, goal, &count, errp);
1836 }
1837
1838 /**
1839  * ext4_count_free_blocks() -- count filesystem free blocks
1840  * @sb:         superblock
1841  *
1842  * Adds up the number of free blocks from each block group.
1843  */
1844 ext4_fsblk_t ext4_count_free_blocks(struct super_block *sb)
1845 {
1846         ext4_fsblk_t desc_count;
1847         struct ext4_group_desc *gdp;
1848         int i;
1849         unsigned long ngroups = EXT4_SB(sb)->s_groups_count;
1850 #ifdef EXT4FS_DEBUG
1851         struct ext4_super_block *es;
1852         ext4_fsblk_t bitmap_count;
1853         unsigned long x;
1854         struct buffer_head *bitmap_bh = NULL;
1855
1856         es = EXT4_SB(sb)->s_es;
1857         desc_count = 0;
1858         bitmap_count = 0;
1859         gdp = NULL;
1860
1861         smp_rmb();
1862         for (i = 0; i < ngroups; i++) {
1863                 gdp = ext4_get_group_desc(sb, i, NULL);
1864                 if (!gdp)
1865                         continue;
1866                 desc_count += le16_to_cpu(gdp->bg_free_blocks_count);
1867                 brelse(bitmap_bh);
1868                 bitmap_bh = read_block_bitmap(sb, i);
1869                 if (bitmap_bh == NULL)
1870                         continue;
1871
1872                 x = ext4_count_free(bitmap_bh, sb->s_blocksize);
1873                 printk("group %d: stored = %d, counted = %lu\n",
1874                         i, le16_to_cpu(gdp->bg_free_blocks_count), x);
1875                 bitmap_count += x;
1876         }
1877         brelse(bitmap_bh);
1878         printk("ext4_count_free_blocks: stored = %llu"
1879                 ", computed = %llu, %llu\n",
1880                EXT4_FREE_BLOCKS_COUNT(es),
1881                 desc_count, bitmap_count);
1882         return bitmap_count;
1883 #else
1884         desc_count = 0;
1885         smp_rmb();
1886         for (i = 0; i < ngroups; i++) {
1887                 gdp = ext4_get_group_desc(sb, i, NULL);
1888                 if (!gdp)
1889                         continue;
1890                 desc_count += le16_to_cpu(gdp->bg_free_blocks_count);
1891         }
1892
1893         return desc_count;
1894 #endif
1895 }
1896
1897 static inline int test_root(int a, int b)
1898 {
1899         int num = b;
1900
1901         while (a > num)
1902                 num *= b;
1903         return num == a;
1904 }
1905
1906 static int ext4_group_sparse(int group)
1907 {
1908         if (group <= 1)
1909                 return 1;
1910         if (!(group & 1))
1911                 return 0;
1912         return (test_root(group, 7) || test_root(group, 5) ||
1913                 test_root(group, 3));
1914 }
1915
1916 /**
1917  *      ext4_bg_has_super - number of blocks used by the superblock in group
1918  *      @sb: superblock for filesystem
1919  *      @group: group number to check
1920  *
1921  *      Return the number of blocks used by the superblock (primary or backup)
1922  *      in this group.  Currently this will be only 0 or 1.
1923  */
1924 int ext4_bg_has_super(struct super_block *sb, int group)
1925 {
1926         if (EXT4_HAS_RO_COMPAT_FEATURE(sb,
1927                                 EXT4_FEATURE_RO_COMPAT_SPARSE_SUPER) &&
1928                         !ext4_group_sparse(group))
1929                 return 0;
1930         return 1;
1931 }
1932
1933 static unsigned long ext4_bg_num_gdb_meta(struct super_block *sb, int group)
1934 {
1935         unsigned long metagroup = group / EXT4_DESC_PER_BLOCK(sb);
1936         unsigned long first = metagroup * EXT4_DESC_PER_BLOCK(sb);
1937         unsigned long last = first + EXT4_DESC_PER_BLOCK(sb) - 1;
1938
1939         if (group == first || group == first + 1 || group == last)
1940                 return 1;
1941         return 0;
1942 }
1943
1944 static unsigned long ext4_bg_num_gdb_nometa(struct super_block *sb, int group)
1945 {
1946         if (EXT4_HAS_RO_COMPAT_FEATURE(sb,
1947                                 EXT4_FEATURE_RO_COMPAT_SPARSE_SUPER) &&
1948                         !ext4_group_sparse(group))
1949                 return 0;
1950         return EXT4_SB(sb)->s_gdb_count;
1951 }
1952
1953 /**
1954  *      ext4_bg_num_gdb - number of blocks used by the group table in group
1955  *      @sb: superblock for filesystem
1956  *      @group: group number to check
1957  *
1958  *      Return the number of blocks used by the group descriptor table
1959  *      (primary or backup) in this group.  In the future there may be a
1960  *      different number of descriptor blocks in each group.
1961  */
1962 unsigned long ext4_bg_num_gdb(struct super_block *sb, int group)
1963 {
1964         unsigned long first_meta_bg =
1965                         le32_to_cpu(EXT4_SB(sb)->s_es->s_first_meta_bg);
1966         unsigned long metagroup = group / EXT4_DESC_PER_BLOCK(sb);
1967
1968         if (!EXT4_HAS_INCOMPAT_FEATURE(sb,EXT4_FEATURE_INCOMPAT_META_BG) ||
1969                         metagroup < first_meta_bg)
1970                 return ext4_bg_num_gdb_nometa(sb,group);
1971
1972         return ext4_bg_num_gdb_meta(sb,group);
1973
1974 }