[PATCH] ext3: turn on reservation dump on block allocation errors
[safe/jmp/linux-2.6] / fs / ext3 / balloc.c
1 /*
2  *  linux/fs/ext3/balloc.c
3  *
4  * Copyright (C) 1992, 1993, 1994, 1995
5  * Remy Card (card@masi.ibp.fr)
6  * Laboratoire MASI - Institut Blaise Pascal
7  * Universite Pierre et Marie Curie (Paris VI)
8  *
9  *  Enhanced block allocation by Stephen Tweedie (sct@redhat.com), 1993
10  *  Big-endian to little-endian byte-swapping/bitmaps by
11  *        David S. Miller (davem@caip.rutgers.edu), 1995
12  */
13
14 #include <linux/time.h>
15 #include <linux/capability.h>
16 #include <linux/fs.h>
17 #include <linux/jbd.h>
18 #include <linux/ext3_fs.h>
19 #include <linux/ext3_jbd.h>
20 #include <linux/quotaops.h>
21 #include <linux/buffer_head.h>
22
23 /*
24  * balloc.c contains the blocks allocation and deallocation routines
25  */
26
27 /*
28  * The free blocks are managed by bitmaps.  A file system contains several
29  * blocks groups.  Each group contains 1 bitmap block for blocks, 1 bitmap
30  * block for inodes, N blocks for the inode table and data blocks.
31  *
32  * The file system contains group descriptors which are located after the
33  * super block.  Each descriptor contains the number of the bitmap block and
34  * the free blocks count in the block.  The descriptors are loaded in memory
35  * when a file system is mounted (see ext3_read_super).
36  */
37
38
39 #define in_range(b, first, len) ((b) >= (first) && (b) <= (first) + (len) - 1)
40
41 struct ext3_group_desc * ext3_get_group_desc(struct super_block * sb,
42                                              unsigned int block_group,
43                                              struct buffer_head ** bh)
44 {
45         unsigned long group_desc;
46         unsigned long offset;
47         struct ext3_group_desc * desc;
48         struct ext3_sb_info *sbi = EXT3_SB(sb);
49
50         if (block_group >= sbi->s_groups_count) {
51                 ext3_error (sb, "ext3_get_group_desc",
52                             "block_group >= groups_count - "
53                             "block_group = %d, groups_count = %lu",
54                             block_group, sbi->s_groups_count);
55
56                 return NULL;
57         }
58         smp_rmb();
59
60         group_desc = block_group >> EXT3_DESC_PER_BLOCK_BITS(sb);
61         offset = block_group & (EXT3_DESC_PER_BLOCK(sb) - 1);
62         if (!sbi->s_group_desc[group_desc]) {
63                 ext3_error (sb, "ext3_get_group_desc",
64                             "Group descriptor not loaded - "
65                             "block_group = %d, group_desc = %lu, desc = %lu",
66                              block_group, group_desc, offset);
67                 return NULL;
68         }
69
70         desc = (struct ext3_group_desc *) sbi->s_group_desc[group_desc]->b_data;
71         if (bh)
72                 *bh = sbi->s_group_desc[group_desc];
73         return desc + offset;
74 }
75
76 /*
77  * Read the bitmap for a given block_group, reading into the specified
78  * slot in the superblock's bitmap cache.
79  *
80  * Return buffer_head on success or NULL in case of failure.
81  */
82 static struct buffer_head *
83 read_block_bitmap(struct super_block *sb, unsigned int block_group)
84 {
85         struct ext3_group_desc * desc;
86         struct buffer_head * bh = NULL;
87
88         desc = ext3_get_group_desc (sb, block_group, NULL);
89         if (!desc)
90                 goto error_out;
91         bh = sb_bread(sb, le32_to_cpu(desc->bg_block_bitmap));
92         if (!bh)
93                 ext3_error (sb, "read_block_bitmap",
94                             "Cannot read block bitmap - "
95                             "block_group = %d, block_bitmap = %u",
96                             block_group, le32_to_cpu(desc->bg_block_bitmap));
97 error_out:
98         return bh;
99 }
100 /*
101  * The reservation window structure operations
102  * --------------------------------------------
103  * Operations include:
104  * dump, find, add, remove, is_empty, find_next_reservable_window, etc.
105  *
106  * We use sorted double linked list for the per-filesystem reservation
107  * window list. (like in vm_region).
108  *
109  * Initially, we keep those small operations in the abstract functions,
110  * so later if we need a better searching tree than double linked-list,
111  * we could easily switch to that without changing too much
112  * code.
113  */
114 #if 1
115 static void __rsv_window_dump(struct rb_root *root, int verbose,
116                               const char *fn)
117 {
118         struct rb_node *n;
119         struct ext3_reserve_window_node *rsv, *prev;
120         int bad;
121
122 restart:
123         n = rb_first(root);
124         bad = 0;
125         prev = NULL;
126
127         printk("Block Allocation Reservation Windows Map (%s):\n", fn);
128         while (n) {
129                 rsv = list_entry(n, struct ext3_reserve_window_node, rsv_node);
130                 if (verbose)
131                         printk("reservation window 0x%p "
132                                "start:  %lu, end:  %lu\n",
133                                rsv, rsv->rsv_start, rsv->rsv_end);
134                 if (rsv->rsv_start && rsv->rsv_start >= rsv->rsv_end) {
135                         printk("Bad reservation %p (start >= end)\n",
136                                rsv);
137                         bad = 1;
138                 }
139                 if (prev && prev->rsv_end >= rsv->rsv_start) {
140                         printk("Bad reservation %p (prev->end >= start)\n",
141                                rsv);
142                         bad = 1;
143                 }
144                 if (bad) {
145                         if (!verbose) {
146                                 printk("Restarting reservation walk in verbose mode\n");
147                                 verbose = 1;
148                                 goto restart;
149                         }
150                 }
151                 n = rb_next(n);
152                 prev = rsv;
153         }
154         printk("Window map complete.\n");
155         if (bad)
156                 BUG();
157 }
158 #define rsv_window_dump(root, verbose) \
159         __rsv_window_dump((root), (verbose), __FUNCTION__)
160 #else
161 #define rsv_window_dump(root, verbose) do {} while (0)
162 #endif
163
164 static int
165 goal_in_my_reservation(struct ext3_reserve_window *rsv, ext3_grpblk_t grp_goal,
166                         unsigned int group, struct super_block * sb)
167 {
168         ext3_fsblk_t group_first_block, group_last_block;
169
170         group_first_block = ext3_group_first_block_no(sb, group);
171         group_last_block = group_first_block + EXT3_BLOCKS_PER_GROUP(sb) - 1;
172
173         if ((rsv->_rsv_start > group_last_block) ||
174             (rsv->_rsv_end < group_first_block))
175                 return 0;
176         if ((grp_goal >= 0) && ((grp_goal + group_first_block < rsv->_rsv_start)
177                 || (grp_goal + group_first_block > rsv->_rsv_end)))
178                 return 0;
179         return 1;
180 }
181
182 /*
183  * Find the reserved window which includes the goal, or the previous one
184  * if the goal is not in any window.
185  * Returns NULL if there are no windows or if all windows start after the goal.
186  */
187 static struct ext3_reserve_window_node *
188 search_reserve_window(struct rb_root *root, ext3_fsblk_t goal)
189 {
190         struct rb_node *n = root->rb_node;
191         struct ext3_reserve_window_node *rsv;
192
193         if (!n)
194                 return NULL;
195
196         do {
197                 rsv = rb_entry(n, struct ext3_reserve_window_node, rsv_node);
198
199                 if (goal < rsv->rsv_start)
200                         n = n->rb_left;
201                 else if (goal > rsv->rsv_end)
202                         n = n->rb_right;
203                 else
204                         return rsv;
205         } while (n);
206         /*
207          * We've fallen off the end of the tree: the goal wasn't inside
208          * any particular node.  OK, the previous node must be to one
209          * side of the interval containing the goal.  If it's the RHS,
210          * we need to back up one.
211          */
212         if (rsv->rsv_start > goal) {
213                 n = rb_prev(&rsv->rsv_node);
214                 rsv = rb_entry(n, struct ext3_reserve_window_node, rsv_node);
215         }
216         return rsv;
217 }
218
219 void ext3_rsv_window_add(struct super_block *sb,
220                     struct ext3_reserve_window_node *rsv)
221 {
222         struct rb_root *root = &EXT3_SB(sb)->s_rsv_window_root;
223         struct rb_node *node = &rsv->rsv_node;
224         ext3_fsblk_t start = rsv->rsv_start;
225
226         struct rb_node ** p = &root->rb_node;
227         struct rb_node * parent = NULL;
228         struct ext3_reserve_window_node *this;
229
230         while (*p)
231         {
232                 parent = *p;
233                 this = rb_entry(parent, struct ext3_reserve_window_node, rsv_node);
234
235                 if (start < this->rsv_start)
236                         p = &(*p)->rb_left;
237                 else if (start > this->rsv_end)
238                         p = &(*p)->rb_right;
239                 else {
240                         rsv_window_dump(root, 1);
241                         BUG();
242                 }
243         }
244
245         rb_link_node(node, parent, p);
246         rb_insert_color(node, root);
247 }
248
249 static void rsv_window_remove(struct super_block *sb,
250                               struct ext3_reserve_window_node *rsv)
251 {
252         rsv->rsv_start = EXT3_RESERVE_WINDOW_NOT_ALLOCATED;
253         rsv->rsv_end = EXT3_RESERVE_WINDOW_NOT_ALLOCATED;
254         rsv->rsv_alloc_hit = 0;
255         rb_erase(&rsv->rsv_node, &EXT3_SB(sb)->s_rsv_window_root);
256 }
257
258 static inline int rsv_is_empty(struct ext3_reserve_window *rsv)
259 {
260         /* a valid reservation end block could not be 0 */
261         return (rsv->_rsv_end == EXT3_RESERVE_WINDOW_NOT_ALLOCATED);
262 }
263 void ext3_init_block_alloc_info(struct inode *inode)
264 {
265         struct ext3_inode_info *ei = EXT3_I(inode);
266         struct ext3_block_alloc_info *block_i = ei->i_block_alloc_info;
267         struct super_block *sb = inode->i_sb;
268
269         block_i = kmalloc(sizeof(*block_i), GFP_NOFS);
270         if (block_i) {
271                 struct ext3_reserve_window_node *rsv = &block_i->rsv_window_node;
272
273                 rsv->rsv_start = EXT3_RESERVE_WINDOW_NOT_ALLOCATED;
274                 rsv->rsv_end = EXT3_RESERVE_WINDOW_NOT_ALLOCATED;
275
276                 /*
277                  * if filesystem is mounted with NORESERVATION, the goal
278                  * reservation window size is set to zero to indicate
279                  * block reservation is off
280                  */
281                 if (!test_opt(sb, RESERVATION))
282                         rsv->rsv_goal_size = 0;
283                 else
284                         rsv->rsv_goal_size = EXT3_DEFAULT_RESERVE_BLOCKS;
285                 rsv->rsv_alloc_hit = 0;
286                 block_i->last_alloc_logical_block = 0;
287                 block_i->last_alloc_physical_block = 0;
288         }
289         ei->i_block_alloc_info = block_i;
290 }
291
292 void ext3_discard_reservation(struct inode *inode)
293 {
294         struct ext3_inode_info *ei = EXT3_I(inode);
295         struct ext3_block_alloc_info *block_i = ei->i_block_alloc_info;
296         struct ext3_reserve_window_node *rsv;
297         spinlock_t *rsv_lock = &EXT3_SB(inode->i_sb)->s_rsv_window_lock;
298
299         if (!block_i)
300                 return;
301
302         rsv = &block_i->rsv_window_node;
303         if (!rsv_is_empty(&rsv->rsv_window)) {
304                 spin_lock(rsv_lock);
305                 if (!rsv_is_empty(&rsv->rsv_window))
306                         rsv_window_remove(inode->i_sb, rsv);
307                 spin_unlock(rsv_lock);
308         }
309 }
310
311 /* Free given blocks, update quota and i_blocks field */
312 void ext3_free_blocks_sb(handle_t *handle, struct super_block *sb,
313                          ext3_fsblk_t block, unsigned long count,
314                          unsigned long *pdquot_freed_blocks)
315 {
316         struct buffer_head *bitmap_bh = NULL;
317         struct buffer_head *gd_bh;
318         unsigned long block_group;
319         ext3_grpblk_t bit;
320         unsigned long i;
321         unsigned long overflow;
322         struct ext3_group_desc * desc;
323         struct ext3_super_block * es;
324         struct ext3_sb_info *sbi;
325         int err = 0, ret;
326         ext3_grpblk_t group_freed;
327
328         *pdquot_freed_blocks = 0;
329         sbi = EXT3_SB(sb);
330         es = sbi->s_es;
331         if (block < le32_to_cpu(es->s_first_data_block) ||
332             block + count < block ||
333             block + count > le32_to_cpu(es->s_blocks_count)) {
334                 ext3_error (sb, "ext3_free_blocks",
335                             "Freeing blocks not in datazone - "
336                             "block = "E3FSBLK", count = %lu", block, count);
337                 goto error_return;
338         }
339
340         ext3_debug ("freeing block(s) %lu-%lu\n", block, block + count - 1);
341
342 do_more:
343         overflow = 0;
344         block_group = (block - le32_to_cpu(es->s_first_data_block)) /
345                       EXT3_BLOCKS_PER_GROUP(sb);
346         bit = (block - le32_to_cpu(es->s_first_data_block)) %
347                       EXT3_BLOCKS_PER_GROUP(sb);
348         /*
349          * Check to see if we are freeing blocks across a group
350          * boundary.
351          */
352         if (bit + count > EXT3_BLOCKS_PER_GROUP(sb)) {
353                 overflow = bit + count - EXT3_BLOCKS_PER_GROUP(sb);
354                 count -= overflow;
355         }
356         brelse(bitmap_bh);
357         bitmap_bh = read_block_bitmap(sb, block_group);
358         if (!bitmap_bh)
359                 goto error_return;
360         desc = ext3_get_group_desc (sb, block_group, &gd_bh);
361         if (!desc)
362                 goto error_return;
363
364         if (in_range (le32_to_cpu(desc->bg_block_bitmap), block, count) ||
365             in_range (le32_to_cpu(desc->bg_inode_bitmap), block, count) ||
366             in_range (block, le32_to_cpu(desc->bg_inode_table),
367                       sbi->s_itb_per_group) ||
368             in_range (block + count - 1, le32_to_cpu(desc->bg_inode_table),
369                       sbi->s_itb_per_group))
370                 ext3_error (sb, "ext3_free_blocks",
371                             "Freeing blocks in system zones - "
372                             "Block = "E3FSBLK", count = %lu",
373                             block, count);
374
375         /*
376          * We are about to start releasing blocks in the bitmap,
377          * so we need undo access.
378          */
379         /* @@@ check errors */
380         BUFFER_TRACE(bitmap_bh, "getting undo access");
381         err = ext3_journal_get_undo_access(handle, bitmap_bh);
382         if (err)
383                 goto error_return;
384
385         /*
386          * We are about to modify some metadata.  Call the journal APIs
387          * to unshare ->b_data if a currently-committing transaction is
388          * using it
389          */
390         BUFFER_TRACE(gd_bh, "get_write_access");
391         err = ext3_journal_get_write_access(handle, gd_bh);
392         if (err)
393                 goto error_return;
394
395         jbd_lock_bh_state(bitmap_bh);
396
397         for (i = 0, group_freed = 0; i < count; i++) {
398                 /*
399                  * An HJ special.  This is expensive...
400                  */
401 #ifdef CONFIG_JBD_DEBUG
402                 jbd_unlock_bh_state(bitmap_bh);
403                 {
404                         struct buffer_head *debug_bh;
405                         debug_bh = sb_find_get_block(sb, block + i);
406                         if (debug_bh) {
407                                 BUFFER_TRACE(debug_bh, "Deleted!");
408                                 if (!bh2jh(bitmap_bh)->b_committed_data)
409                                         BUFFER_TRACE(debug_bh,
410                                                 "No commited data in bitmap");
411                                 BUFFER_TRACE2(debug_bh, bitmap_bh, "bitmap");
412                                 __brelse(debug_bh);
413                         }
414                 }
415                 jbd_lock_bh_state(bitmap_bh);
416 #endif
417                 if (need_resched()) {
418                         jbd_unlock_bh_state(bitmap_bh);
419                         cond_resched();
420                         jbd_lock_bh_state(bitmap_bh);
421                 }
422                 /* @@@ This prevents newly-allocated data from being
423                  * freed and then reallocated within the same
424                  * transaction.
425                  *
426                  * Ideally we would want to allow that to happen, but to
427                  * do so requires making journal_forget() capable of
428                  * revoking the queued write of a data block, which
429                  * implies blocking on the journal lock.  *forget()
430                  * cannot block due to truncate races.
431                  *
432                  * Eventually we can fix this by making journal_forget()
433                  * return a status indicating whether or not it was able
434                  * to revoke the buffer.  On successful revoke, it is
435                  * safe not to set the allocation bit in the committed
436                  * bitmap, because we know that there is no outstanding
437                  * activity on the buffer any more and so it is safe to
438                  * reallocate it.
439                  */
440                 BUFFER_TRACE(bitmap_bh, "set in b_committed_data");
441                 J_ASSERT_BH(bitmap_bh,
442                                 bh2jh(bitmap_bh)->b_committed_data != NULL);
443                 ext3_set_bit_atomic(sb_bgl_lock(sbi, block_group), bit + i,
444                                 bh2jh(bitmap_bh)->b_committed_data);
445
446                 /*
447                  * We clear the bit in the bitmap after setting the committed
448                  * data bit, because this is the reverse order to that which
449                  * the allocator uses.
450                  */
451                 BUFFER_TRACE(bitmap_bh, "clear bit");
452                 if (!ext3_clear_bit_atomic(sb_bgl_lock(sbi, block_group),
453                                                 bit + i, bitmap_bh->b_data)) {
454                         jbd_unlock_bh_state(bitmap_bh);
455                         ext3_error(sb, __FUNCTION__,
456                                 "bit already cleared for block "E3FSBLK,
457                                  block + i);
458                         jbd_lock_bh_state(bitmap_bh);
459                         BUFFER_TRACE(bitmap_bh, "bit already cleared");
460                 } else {
461                         group_freed++;
462                 }
463         }
464         jbd_unlock_bh_state(bitmap_bh);
465
466         spin_lock(sb_bgl_lock(sbi, block_group));
467         desc->bg_free_blocks_count =
468                 cpu_to_le16(le16_to_cpu(desc->bg_free_blocks_count) +
469                         group_freed);
470         spin_unlock(sb_bgl_lock(sbi, block_group));
471         percpu_counter_mod(&sbi->s_freeblocks_counter, count);
472
473         /* We dirtied the bitmap block */
474         BUFFER_TRACE(bitmap_bh, "dirtied bitmap block");
475         err = ext3_journal_dirty_metadata(handle, bitmap_bh);
476
477         /* And the group descriptor block */
478         BUFFER_TRACE(gd_bh, "dirtied group descriptor block");
479         ret = ext3_journal_dirty_metadata(handle, gd_bh);
480         if (!err) err = ret;
481         *pdquot_freed_blocks += group_freed;
482
483         if (overflow && !err) {
484                 block += count;
485                 count = overflow;
486                 goto do_more;
487         }
488         sb->s_dirt = 1;
489 error_return:
490         brelse(bitmap_bh);
491         ext3_std_error(sb, err);
492         return;
493 }
494
495 /* Free given blocks, update quota and i_blocks field */
496 void ext3_free_blocks(handle_t *handle, struct inode *inode,
497                         ext3_fsblk_t block, unsigned long count)
498 {
499         struct super_block * sb;
500         unsigned long dquot_freed_blocks;
501
502         sb = inode->i_sb;
503         if (!sb) {
504                 printk ("ext3_free_blocks: nonexistent device");
505                 return;
506         }
507         ext3_free_blocks_sb(handle, sb, block, count, &dquot_freed_blocks);
508         if (dquot_freed_blocks)
509                 DQUOT_FREE_BLOCK(inode, dquot_freed_blocks);
510         return;
511 }
512
513 /*
514  * For ext3 allocations, we must not reuse any blocks which are
515  * allocated in the bitmap buffer's "last committed data" copy.  This
516  * prevents deletes from freeing up the page for reuse until we have
517  * committed the delete transaction.
518  *
519  * If we didn't do this, then deleting something and reallocating it as
520  * data would allow the old block to be overwritten before the
521  * transaction committed (because we force data to disk before commit).
522  * This would lead to corruption if we crashed between overwriting the
523  * data and committing the delete.
524  *
525  * @@@ We may want to make this allocation behaviour conditional on
526  * data-writes at some point, and disable it for metadata allocations or
527  * sync-data inodes.
528  */
529 static int ext3_test_allocatable(ext3_grpblk_t nr, struct buffer_head *bh)
530 {
531         int ret;
532         struct journal_head *jh = bh2jh(bh);
533
534         if (ext3_test_bit(nr, bh->b_data))
535                 return 0;
536
537         jbd_lock_bh_state(bh);
538         if (!jh->b_committed_data)
539                 ret = 1;
540         else
541                 ret = !ext3_test_bit(nr, jh->b_committed_data);
542         jbd_unlock_bh_state(bh);
543         return ret;
544 }
545
546 static ext3_grpblk_t
547 bitmap_search_next_usable_block(ext3_grpblk_t start, struct buffer_head *bh,
548                                         ext3_grpblk_t maxblocks)
549 {
550         ext3_grpblk_t next;
551         struct journal_head *jh = bh2jh(bh);
552
553         /*
554          * The bitmap search --- search forward alternately through the actual
555          * bitmap and the last-committed copy until we find a bit free in
556          * both
557          */
558         while (start < maxblocks) {
559                 next = ext3_find_next_zero_bit(bh->b_data, maxblocks, start);
560                 if (next >= maxblocks)
561                         return -1;
562                 if (ext3_test_allocatable(next, bh))
563                         return next;
564                 jbd_lock_bh_state(bh);
565                 if (jh->b_committed_data)
566                         start = ext3_find_next_zero_bit(jh->b_committed_data,
567                                                         maxblocks, next);
568                 jbd_unlock_bh_state(bh);
569         }
570         return -1;
571 }
572
573 /*
574  * Find an allocatable block in a bitmap.  We honour both the bitmap and
575  * its last-committed copy (if that exists), and perform the "most
576  * appropriate allocation" algorithm of looking for a free block near
577  * the initial goal; then for a free byte somewhere in the bitmap; then
578  * for any free bit in the bitmap.
579  */
580 static ext3_grpblk_t
581 find_next_usable_block(ext3_grpblk_t start, struct buffer_head *bh,
582                         ext3_grpblk_t maxblocks)
583 {
584         ext3_grpblk_t here, next;
585         char *p, *r;
586
587         if (start > 0) {
588                 /*
589                  * The goal was occupied; search forward for a free
590                  * block within the next XX blocks.
591                  *
592                  * end_goal is more or less random, but it has to be
593                  * less than EXT3_BLOCKS_PER_GROUP. Aligning up to the
594                  * next 64-bit boundary is simple..
595                  */
596                 ext3_grpblk_t end_goal = (start + 63) & ~63;
597                 if (end_goal > maxblocks)
598                         end_goal = maxblocks;
599                 here = ext3_find_next_zero_bit(bh->b_data, end_goal, start);
600                 if (here < end_goal && ext3_test_allocatable(here, bh))
601                         return here;
602                 ext3_debug("Bit not found near goal\n");
603         }
604
605         here = start;
606         if (here < 0)
607                 here = 0;
608
609         p = ((char *)bh->b_data) + (here >> 3);
610         r = memscan(p, 0, (maxblocks - here + 7) >> 3);
611         next = (r - ((char *)bh->b_data)) << 3;
612
613         if (next < maxblocks && next >= start && ext3_test_allocatable(next, bh))
614                 return next;
615
616         /*
617          * The bitmap search --- search forward alternately through the actual
618          * bitmap and the last-committed copy until we find a bit free in
619          * both
620          */
621         here = bitmap_search_next_usable_block(here, bh, maxblocks);
622         return here;
623 }
624
625 /*
626  * We think we can allocate this block in this bitmap.  Try to set the bit.
627  * If that succeeds then check that nobody has allocated and then freed the
628  * block since we saw that is was not marked in b_committed_data.  If it _was_
629  * allocated and freed then clear the bit in the bitmap again and return
630  * zero (failure).
631  */
632 static inline int
633 claim_block(spinlock_t *lock, ext3_grpblk_t block, struct buffer_head *bh)
634 {
635         struct journal_head *jh = bh2jh(bh);
636         int ret;
637
638         if (ext3_set_bit_atomic(lock, block, bh->b_data))
639                 return 0;
640         jbd_lock_bh_state(bh);
641         if (jh->b_committed_data && ext3_test_bit(block,jh->b_committed_data)) {
642                 ext3_clear_bit_atomic(lock, block, bh->b_data);
643                 ret = 0;
644         } else {
645                 ret = 1;
646         }
647         jbd_unlock_bh_state(bh);
648         return ret;
649 }
650
651 /*
652  * If we failed to allocate the desired block then we may end up crossing to a
653  * new bitmap.  In that case we must release write access to the old one via
654  * ext3_journal_release_buffer(), else we'll run out of credits.
655  */
656 static ext3_grpblk_t
657 ext3_try_to_allocate(struct super_block *sb, handle_t *handle, int group,
658                         struct buffer_head *bitmap_bh, ext3_grpblk_t grp_goal,
659                         unsigned long *count, struct ext3_reserve_window *my_rsv)
660 {
661         ext3_fsblk_t group_first_block;
662         ext3_grpblk_t start, end;
663         unsigned long num = 0;
664
665         /* we do allocation within the reservation window if we have a window */
666         if (my_rsv) {
667                 group_first_block = ext3_group_first_block_no(sb, group);
668                 if (my_rsv->_rsv_start >= group_first_block)
669                         start = my_rsv->_rsv_start - group_first_block;
670                 else
671                         /* reservation window cross group boundary */
672                         start = 0;
673                 end = my_rsv->_rsv_end - group_first_block + 1;
674                 if (end > EXT3_BLOCKS_PER_GROUP(sb))
675                         /* reservation window crosses group boundary */
676                         end = EXT3_BLOCKS_PER_GROUP(sb);
677                 if ((start <= grp_goal) && (grp_goal < end))
678                         start = grp_goal;
679                 else
680                         grp_goal = -1;
681         } else {
682                 if (grp_goal > 0)
683                         start = grp_goal;
684                 else
685                         start = 0;
686                 end = EXT3_BLOCKS_PER_GROUP(sb);
687         }
688
689         BUG_ON(start > EXT3_BLOCKS_PER_GROUP(sb));
690
691 repeat:
692         if (grp_goal < 0 || !ext3_test_allocatable(grp_goal, bitmap_bh)) {
693                 grp_goal = find_next_usable_block(start, bitmap_bh, end);
694                 if (grp_goal < 0)
695                         goto fail_access;
696                 if (!my_rsv) {
697                         int i;
698
699                         for (i = 0; i < 7 && grp_goal > start &&
700                                         ext3_test_allocatable(grp_goal - 1,
701                                                                 bitmap_bh);
702                                         i++, grp_goal--)
703                                 ;
704                 }
705         }
706         start = grp_goal;
707
708         if (!claim_block(sb_bgl_lock(EXT3_SB(sb), group), grp_goal, bitmap_bh)) {
709                 /*
710                  * The block was allocated by another thread, or it was
711                  * allocated and then freed by another thread
712                  */
713                 start++;
714                 grp_goal++;
715                 if (start >= end)
716                         goto fail_access;
717                 goto repeat;
718         }
719         num++;
720         grp_goal++;
721         while (num < *count && grp_goal < end
722                 && ext3_test_allocatable(grp_goal, bitmap_bh)
723                 && claim_block(sb_bgl_lock(EXT3_SB(sb), group), grp_goal, bitmap_bh)) {
724                 num++;
725                 grp_goal++;
726         }
727         *count = num;
728         return grp_goal - num;
729 fail_access:
730         *count = num;
731         return -1;
732 }
733
734 /**
735  *      find_next_reservable_window():
736  *              find a reservable space within the given range.
737  *              It does not allocate the reservation window for now:
738  *              alloc_new_reservation() will do the work later.
739  *
740  *      @search_head: the head of the searching list;
741  *              This is not necessarily the list head of the whole filesystem
742  *
743  *              We have both head and start_block to assist the search
744  *              for the reservable space. The list starts from head,
745  *              but we will shift to the place where start_block is,
746  *              then start from there, when looking for a reservable space.
747  *
748  *      @size: the target new reservation window size
749  *
750  *      @group_first_block: the first block we consider to start
751  *                      the real search from
752  *
753  *      @last_block:
754  *              the maximum block number that our goal reservable space
755  *              could start from. This is normally the last block in this
756  *              group. The search will end when we found the start of next
757  *              possible reservable space is out of this boundary.
758  *              This could handle the cross boundary reservation window
759  *              request.
760  *
761  *      basically we search from the given range, rather than the whole
762  *      reservation double linked list, (start_block, last_block)
763  *      to find a free region that is of my size and has not
764  *      been reserved.
765  *
766  */
767 static int find_next_reservable_window(
768                                 struct ext3_reserve_window_node *search_head,
769                                 struct ext3_reserve_window_node *my_rsv,
770                                 struct super_block * sb,
771                                 ext3_fsblk_t start_block,
772                                 ext3_fsblk_t last_block)
773 {
774         struct rb_node *next;
775         struct ext3_reserve_window_node *rsv, *prev;
776         ext3_fsblk_t cur;
777         int size = my_rsv->rsv_goal_size;
778
779         /* TODO: make the start of the reservation window byte-aligned */
780         /* cur = *start_block & ~7;*/
781         cur = start_block;
782         rsv = search_head;
783         if (!rsv)
784                 return -1;
785
786         while (1) {
787                 if (cur <= rsv->rsv_end)
788                         cur = rsv->rsv_end + 1;
789
790                 /* TODO?
791                  * in the case we could not find a reservable space
792                  * that is what is expected, during the re-search, we could
793                  * remember what's the largest reservable space we could have
794                  * and return that one.
795                  *
796                  * For now it will fail if we could not find the reservable
797                  * space with expected-size (or more)...
798                  */
799                 if (cur > last_block)
800                         return -1;              /* fail */
801
802                 prev = rsv;
803                 next = rb_next(&rsv->rsv_node);
804                 rsv = list_entry(next,struct ext3_reserve_window_node,rsv_node);
805
806                 /*
807                  * Reached the last reservation, we can just append to the
808                  * previous one.
809                  */
810                 if (!next)
811                         break;
812
813                 if (cur + size <= rsv->rsv_start) {
814                         /*
815                          * Found a reserveable space big enough.  We could
816                          * have a reservation across the group boundary here
817                          */
818                         break;
819                 }
820         }
821         /*
822          * we come here either :
823          * when we reach the end of the whole list,
824          * and there is empty reservable space after last entry in the list.
825          * append it to the end of the list.
826          *
827          * or we found one reservable space in the middle of the list,
828          * return the reservation window that we could append to.
829          * succeed.
830          */
831
832         if ((prev != my_rsv) && (!rsv_is_empty(&my_rsv->rsv_window)))
833                 rsv_window_remove(sb, my_rsv);
834
835         /*
836          * Let's book the whole avaliable window for now.  We will check the
837          * disk bitmap later and then, if there are free blocks then we adjust
838          * the window size if it's larger than requested.
839          * Otherwise, we will remove this node from the tree next time
840          * call find_next_reservable_window.
841          */
842         my_rsv->rsv_start = cur;
843         my_rsv->rsv_end = cur + size - 1;
844         my_rsv->rsv_alloc_hit = 0;
845
846         if (prev != my_rsv)
847                 ext3_rsv_window_add(sb, my_rsv);
848
849         return 0;
850 }
851
852 /**
853  *      alloc_new_reservation()--allocate a new reservation window
854  *
855  *              To make a new reservation, we search part of the filesystem
856  *              reservation list (the list that inside the group). We try to
857  *              allocate a new reservation window near the allocation goal,
858  *              or the beginning of the group, if there is no goal.
859  *
860  *              We first find a reservable space after the goal, then from
861  *              there, we check the bitmap for the first free block after
862  *              it. If there is no free block until the end of group, then the
863  *              whole group is full, we failed. Otherwise, check if the free
864  *              block is inside the expected reservable space, if so, we
865  *              succeed.
866  *              If the first free block is outside the reservable space, then
867  *              start from the first free block, we search for next available
868  *              space, and go on.
869  *
870  *      on succeed, a new reservation will be found and inserted into the list
871  *      It contains at least one free block, and it does not overlap with other
872  *      reservation windows.
873  *
874  *      failed: we failed to find a reservation window in this group
875  *
876  *      @rsv: the reservation
877  *
878  *      @grp_goal: The goal (group-relative).  It is where the search for a
879  *              free reservable space should start from.
880  *              if we have a grp_goal(grp_goal >0 ), then start from there,
881  *              no grp_goal(grp_goal = -1), we start from the first block
882  *              of the group.
883  *
884  *      @sb: the super block
885  *      @group: the group we are trying to allocate in
886  *      @bitmap_bh: the block group block bitmap
887  *
888  */
889 static int alloc_new_reservation(struct ext3_reserve_window_node *my_rsv,
890                 ext3_grpblk_t grp_goal, struct super_block *sb,
891                 unsigned int group, struct buffer_head *bitmap_bh)
892 {
893         struct ext3_reserve_window_node *search_head;
894         ext3_fsblk_t group_first_block, group_end_block, start_block;
895         ext3_grpblk_t first_free_block;
896         struct rb_root *fs_rsv_root = &EXT3_SB(sb)->s_rsv_window_root;
897         unsigned long size;
898         int ret;
899         spinlock_t *rsv_lock = &EXT3_SB(sb)->s_rsv_window_lock;
900
901         group_first_block = ext3_group_first_block_no(sb, group);
902         group_end_block = group_first_block + EXT3_BLOCKS_PER_GROUP(sb) - 1;
903
904         if (grp_goal < 0)
905                 start_block = group_first_block;
906         else
907                 start_block = grp_goal + group_first_block;
908
909         size = my_rsv->rsv_goal_size;
910
911         if (!rsv_is_empty(&my_rsv->rsv_window)) {
912                 /*
913                  * if the old reservation is cross group boundary
914                  * and if the goal is inside the old reservation window,
915                  * we will come here when we just failed to allocate from
916                  * the first part of the window. We still have another part
917                  * that belongs to the next group. In this case, there is no
918                  * point to discard our window and try to allocate a new one
919                  * in this group(which will fail). we should
920                  * keep the reservation window, just simply move on.
921                  *
922                  * Maybe we could shift the start block of the reservation
923                  * window to the first block of next group.
924                  */
925
926                 if ((my_rsv->rsv_start <= group_end_block) &&
927                                 (my_rsv->rsv_end > group_end_block) &&
928                                 (start_block >= my_rsv->rsv_start))
929                         return -1;
930
931                 if ((my_rsv->rsv_alloc_hit >
932                      (my_rsv->rsv_end - my_rsv->rsv_start + 1) / 2)) {
933                         /*
934                          * if we previously allocation hit ration is greater than half
935                          * we double the size of reservation window next time
936                          * otherwise keep the same
937                          */
938                         size = size * 2;
939                         if (size > EXT3_MAX_RESERVE_BLOCKS)
940                                 size = EXT3_MAX_RESERVE_BLOCKS;
941                         my_rsv->rsv_goal_size= size;
942                 }
943         }
944
945         spin_lock(rsv_lock);
946         /*
947          * shift the search start to the window near the goal block
948          */
949         search_head = search_reserve_window(fs_rsv_root, start_block);
950
951         /*
952          * find_next_reservable_window() simply finds a reservable window
953          * inside the given range(start_block, group_end_block).
954          *
955          * To make sure the reservation window has a free bit inside it, we
956          * need to check the bitmap after we found a reservable window.
957          */
958 retry:
959         ret = find_next_reservable_window(search_head, my_rsv, sb,
960                                                 start_block, group_end_block);
961
962         if (ret == -1) {
963                 if (!rsv_is_empty(&my_rsv->rsv_window))
964                         rsv_window_remove(sb, my_rsv);
965                 spin_unlock(rsv_lock);
966                 return -1;
967         }
968
969         /*
970          * On success, find_next_reservable_window() returns the
971          * reservation window where there is a reservable space after it.
972          * Before we reserve this reservable space, we need
973          * to make sure there is at least a free block inside this region.
974          *
975          * searching the first free bit on the block bitmap and copy of
976          * last committed bitmap alternatively, until we found a allocatable
977          * block. Search start from the start block of the reservable space
978          * we just found.
979          */
980         spin_unlock(rsv_lock);
981         first_free_block = bitmap_search_next_usable_block(
982                         my_rsv->rsv_start - group_first_block,
983                         bitmap_bh, group_end_block - group_first_block + 1);
984
985         if (first_free_block < 0) {
986                 /*
987                  * no free block left on the bitmap, no point
988                  * to reserve the space. return failed.
989                  */
990                 spin_lock(rsv_lock);
991                 if (!rsv_is_empty(&my_rsv->rsv_window))
992                         rsv_window_remove(sb, my_rsv);
993                 spin_unlock(rsv_lock);
994                 return -1;              /* failed */
995         }
996
997         start_block = first_free_block + group_first_block;
998         /*
999          * check if the first free block is within the
1000          * free space we just reserved
1001          */
1002         if (start_block >= my_rsv->rsv_start && start_block < my_rsv->rsv_end)
1003                 return 0;               /* success */
1004         /*
1005          * if the first free bit we found is out of the reservable space
1006          * continue search for next reservable space,
1007          * start from where the free block is,
1008          * we also shift the list head to where we stopped last time
1009          */
1010         search_head = my_rsv;
1011         spin_lock(rsv_lock);
1012         goto retry;
1013 }
1014
1015 static void try_to_extend_reservation(struct ext3_reserve_window_node *my_rsv,
1016                         struct super_block *sb, int size)
1017 {
1018         struct ext3_reserve_window_node *next_rsv;
1019         struct rb_node *next;
1020         spinlock_t *rsv_lock = &EXT3_SB(sb)->s_rsv_window_lock;
1021
1022         if (!spin_trylock(rsv_lock))
1023                 return;
1024
1025         next = rb_next(&my_rsv->rsv_node);
1026
1027         if (!next)
1028                 my_rsv->rsv_end += size;
1029         else {
1030                 next_rsv = list_entry(next, struct ext3_reserve_window_node, rsv_node);
1031
1032                 if ((next_rsv->rsv_start - my_rsv->rsv_end - 1) >= size)
1033                         my_rsv->rsv_end += size;
1034                 else
1035                         my_rsv->rsv_end = next_rsv->rsv_start - 1;
1036         }
1037         spin_unlock(rsv_lock);
1038 }
1039
1040 /*
1041  * This is the main function used to allocate a new block and its reservation
1042  * window.
1043  *
1044  * Each time when a new block allocation is need, first try to allocate from
1045  * its own reservation.  If it does not have a reservation window, instead of
1046  * looking for a free bit on bitmap first, then look up the reservation list to
1047  * see if it is inside somebody else's reservation window, we try to allocate a
1048  * reservation window for it starting from the goal first. Then do the block
1049  * allocation within the reservation window.
1050  *
1051  * This will avoid keeping on searching the reservation list again and
1052  * again when somebody is looking for a free block (without
1053  * reservation), and there are lots of free blocks, but they are all
1054  * being reserved.
1055  *
1056  * We use a sorted double linked list for the per-filesystem reservation list.
1057  * The insert, remove and find a free space(non-reserved) operations for the
1058  * sorted double linked list should be fast.
1059  *
1060  */
1061 static ext3_grpblk_t
1062 ext3_try_to_allocate_with_rsv(struct super_block *sb, handle_t *handle,
1063                         unsigned int group, struct buffer_head *bitmap_bh,
1064                         ext3_grpblk_t grp_goal,
1065                         struct ext3_reserve_window_node * my_rsv,
1066                         unsigned long *count, int *errp)
1067 {
1068         ext3_fsblk_t group_first_block;
1069         ext3_grpblk_t ret = 0;
1070         int fatal;
1071         unsigned long num = *count;
1072
1073         *errp = 0;
1074
1075         /*
1076          * Make sure we use undo access for the bitmap, because it is critical
1077          * that we do the frozen_data COW on bitmap buffers in all cases even
1078          * if the buffer is in BJ_Forget state in the committing transaction.
1079          */
1080         BUFFER_TRACE(bitmap_bh, "get undo access for new block");
1081         fatal = ext3_journal_get_undo_access(handle, bitmap_bh);
1082         if (fatal) {
1083                 *errp = fatal;
1084                 return -1;
1085         }
1086
1087         /*
1088          * we don't deal with reservation when
1089          * filesystem is mounted without reservation
1090          * or the file is not a regular file
1091          * or last attempt to allocate a block with reservation turned on failed
1092          */
1093         if (my_rsv == NULL ) {
1094                 ret = ext3_try_to_allocate(sb, handle, group, bitmap_bh,
1095                                                 grp_goal, count, NULL);
1096                 goto out;
1097         }
1098         /*
1099          * grp_goal is a group relative block number (if there is a goal)
1100          * 0 < grp_goal < EXT3_BLOCKS_PER_GROUP(sb)
1101          * first block is a filesystem wide block number
1102          * first block is the block number of the first block in this group
1103          */
1104         group_first_block = ext3_group_first_block_no(sb, group);
1105
1106         /*
1107          * Basically we will allocate a new block from inode's reservation
1108          * window.
1109          *
1110          * We need to allocate a new reservation window, if:
1111          * a) inode does not have a reservation window; or
1112          * b) last attempt to allocate a block from existing reservation
1113          *    failed; or
1114          * c) we come here with a goal and with a reservation window
1115          *
1116          * We do not need to allocate a new reservation window if we come here
1117          * at the beginning with a goal and the goal is inside the window, or
1118          * we don't have a goal but already have a reservation window.
1119          * then we could go to allocate from the reservation window directly.
1120          */
1121         while (1) {
1122                 if (rsv_is_empty(&my_rsv->rsv_window) || (ret < 0) ||
1123                         !goal_in_my_reservation(&my_rsv->rsv_window, grp_goal, group, sb)) {
1124                         if (my_rsv->rsv_goal_size < *count)
1125                                 my_rsv->rsv_goal_size = *count;
1126                         ret = alloc_new_reservation(my_rsv, grp_goal, sb,
1127                                                         group, bitmap_bh);
1128                         if (ret < 0)
1129                                 break;                  /* failed */
1130
1131                         if (!goal_in_my_reservation(&my_rsv->rsv_window, grp_goal, group, sb))
1132                                 grp_goal = -1;
1133                 } else if (grp_goal > 0 && (my_rsv->rsv_end-grp_goal+1) < *count)
1134                         try_to_extend_reservation(my_rsv, sb,
1135                                         *count-my_rsv->rsv_end + grp_goal - 1);
1136
1137                 if ((my_rsv->rsv_start >= group_first_block + EXT3_BLOCKS_PER_GROUP(sb))
1138                     || (my_rsv->rsv_end < group_first_block)) {
1139                         rsv_window_dump(&EXT3_SB(sb)->s_rsv_window_root, 1);
1140                         BUG();
1141                 }
1142                 ret = ext3_try_to_allocate(sb, handle, group, bitmap_bh, grp_goal,
1143                                            &num, &my_rsv->rsv_window);
1144                 if (ret >= 0) {
1145                         my_rsv->rsv_alloc_hit += num;
1146                         *count = num;
1147                         break;                          /* succeed */
1148                 }
1149                 num = *count;
1150         }
1151 out:
1152         if (ret >= 0) {
1153                 BUFFER_TRACE(bitmap_bh, "journal_dirty_metadata for "
1154                                         "bitmap block");
1155                 fatal = ext3_journal_dirty_metadata(handle, bitmap_bh);
1156                 if (fatal) {
1157                         *errp = fatal;
1158                         return -1;
1159                 }
1160                 return ret;
1161         }
1162
1163         BUFFER_TRACE(bitmap_bh, "journal_release_buffer");
1164         ext3_journal_release_buffer(handle, bitmap_bh);
1165         return ret;
1166 }
1167
1168 static int ext3_has_free_blocks(struct ext3_sb_info *sbi)
1169 {
1170         ext3_fsblk_t free_blocks, root_blocks;
1171
1172         free_blocks = percpu_counter_read_positive(&sbi->s_freeblocks_counter);
1173         root_blocks = le32_to_cpu(sbi->s_es->s_r_blocks_count);
1174         if (free_blocks < root_blocks + 1 && !capable(CAP_SYS_RESOURCE) &&
1175                 sbi->s_resuid != current->fsuid &&
1176                 (sbi->s_resgid == 0 || !in_group_p (sbi->s_resgid))) {
1177                 return 0;
1178         }
1179         return 1;
1180 }
1181
1182 /*
1183  * ext3_should_retry_alloc() is called when ENOSPC is returned, and if
1184  * it is profitable to retry the operation, this function will wait
1185  * for the current or commiting transaction to complete, and then
1186  * return TRUE.
1187  */
1188 int ext3_should_retry_alloc(struct super_block *sb, int *retries)
1189 {
1190         if (!ext3_has_free_blocks(EXT3_SB(sb)) || (*retries)++ > 3)
1191                 return 0;
1192
1193         jbd_debug(1, "%s: retrying operation after ENOSPC\n", sb->s_id);
1194
1195         return journal_force_commit_nested(EXT3_SB(sb)->s_journal);
1196 }
1197
1198 /*
1199  * ext3_new_block uses a goal block to assist allocation.  If the goal is
1200  * free, or there is a free block within 32 blocks of the goal, that block
1201  * is allocated.  Otherwise a forward search is made for a free block; within
1202  * each block group the search first looks for an entire free byte in the block
1203  * bitmap, and then for any free bit if that fails.
1204  * This function also updates quota and i_blocks field.
1205  */
1206 ext3_fsblk_t ext3_new_blocks(handle_t *handle, struct inode *inode,
1207                         ext3_fsblk_t goal, unsigned long *count, int *errp)
1208 {
1209         struct buffer_head *bitmap_bh = NULL;
1210         struct buffer_head *gdp_bh;
1211         int group_no;
1212         int goal_group;
1213         ext3_grpblk_t grp_target_blk;   /* blockgroup relative goal block */
1214         ext3_grpblk_t grp_alloc_blk;    /* blockgroup-relative allocated block*/
1215         ext3_fsblk_t ret_block;         /* filesyetem-wide allocated block */
1216         int bgi;                        /* blockgroup iteration index */
1217         int fatal = 0, err;
1218         int performed_allocation = 0;
1219         ext3_grpblk_t free_blocks;      /* number of free blocks in a group */
1220         struct super_block *sb;
1221         struct ext3_group_desc *gdp;
1222         struct ext3_super_block *es;
1223         struct ext3_sb_info *sbi;
1224         struct ext3_reserve_window_node *my_rsv = NULL;
1225         struct ext3_block_alloc_info *block_i;
1226         unsigned short windowsz = 0;
1227 #ifdef EXT3FS_DEBUG
1228         static int goal_hits, goal_attempts;
1229 #endif
1230         unsigned long ngroups;
1231         unsigned long num = *count;
1232
1233         *errp = -ENOSPC;
1234         sb = inode->i_sb;
1235         if (!sb) {
1236                 printk("ext3_new_block: nonexistent device");
1237                 return 0;
1238         }
1239
1240         /*
1241          * Check quota for allocation of this block.
1242          */
1243         if (DQUOT_ALLOC_BLOCK(inode, num)) {
1244                 *errp = -EDQUOT;
1245                 return 0;
1246         }
1247
1248         sbi = EXT3_SB(sb);
1249         es = EXT3_SB(sb)->s_es;
1250         ext3_debug("goal=%lu.\n", goal);
1251         /*
1252          * Allocate a block from reservation only when
1253          * filesystem is mounted with reservation(default,-o reservation), and
1254          * it's a regular file, and
1255          * the desired window size is greater than 0 (One could use ioctl
1256          * command EXT3_IOC_SETRSVSZ to set the window size to 0 to turn off
1257          * reservation on that particular file)
1258          */
1259         block_i = EXT3_I(inode)->i_block_alloc_info;
1260         if (block_i && ((windowsz = block_i->rsv_window_node.rsv_goal_size) > 0))
1261                 my_rsv = &block_i->rsv_window_node;
1262
1263         if (!ext3_has_free_blocks(sbi)) {
1264                 *errp = -ENOSPC;
1265                 goto out;
1266         }
1267
1268         /*
1269          * First, test whether the goal block is free.
1270          */
1271         if (goal < le32_to_cpu(es->s_first_data_block) ||
1272             goal >= le32_to_cpu(es->s_blocks_count))
1273                 goal = le32_to_cpu(es->s_first_data_block);
1274         group_no = (goal - le32_to_cpu(es->s_first_data_block)) /
1275                         EXT3_BLOCKS_PER_GROUP(sb);
1276         goal_group = group_no;
1277 retry_alloc:
1278         gdp = ext3_get_group_desc(sb, group_no, &gdp_bh);
1279         if (!gdp)
1280                 goto io_error;
1281
1282         free_blocks = le16_to_cpu(gdp->bg_free_blocks_count);
1283         /*
1284          * if there is not enough free blocks to make a new resevation
1285          * turn off reservation for this allocation
1286          */
1287         if (my_rsv && (free_blocks < windowsz)
1288                 && (rsv_is_empty(&my_rsv->rsv_window)))
1289                 my_rsv = NULL;
1290
1291         if (free_blocks > 0) {
1292                 grp_target_blk = ((goal - le32_to_cpu(es->s_first_data_block)) %
1293                                 EXT3_BLOCKS_PER_GROUP(sb));
1294                 bitmap_bh = read_block_bitmap(sb, group_no);
1295                 if (!bitmap_bh)
1296                         goto io_error;
1297                 grp_alloc_blk = ext3_try_to_allocate_with_rsv(sb, handle,
1298                                         group_no, bitmap_bh, grp_target_blk,
1299                                         my_rsv, &num, &fatal);
1300                 if (fatal)
1301                         goto out;
1302                 if (grp_alloc_blk >= 0)
1303                         goto allocated;
1304         }
1305
1306         ngroups = EXT3_SB(sb)->s_groups_count;
1307         smp_rmb();
1308
1309         /*
1310          * Now search the rest of the groups.  We assume that
1311          * i and gdp correctly point to the last group visited.
1312          */
1313         for (bgi = 0; bgi < ngroups; bgi++) {
1314                 group_no++;
1315                 if (group_no >= ngroups)
1316                         group_no = 0;
1317                 gdp = ext3_get_group_desc(sb, group_no, &gdp_bh);
1318                 if (!gdp) {
1319                         *errp = -EIO;
1320                         goto out;
1321                 }
1322                 free_blocks = le16_to_cpu(gdp->bg_free_blocks_count);
1323                 /*
1324                  * skip this group if the number of
1325                  * free blocks is less than half of the reservation
1326                  * window size.
1327                  */
1328                 if (free_blocks <= (windowsz/2))
1329                         continue;
1330
1331                 brelse(bitmap_bh);
1332                 bitmap_bh = read_block_bitmap(sb, group_no);
1333                 if (!bitmap_bh)
1334                         goto io_error;
1335                 /*
1336                  * try to allocate block(s) from this group, without a goal(-1).
1337                  */
1338                 grp_alloc_blk = ext3_try_to_allocate_with_rsv(sb, handle,
1339                                         group_no, bitmap_bh, -1, my_rsv,
1340                                         &num, &fatal);
1341                 if (fatal)
1342                         goto out;
1343                 if (grp_alloc_blk >= 0)
1344                         goto allocated;
1345         }
1346         /*
1347          * We may end up a bogus ealier ENOSPC error due to
1348          * filesystem is "full" of reservations, but
1349          * there maybe indeed free blocks avaliable on disk
1350          * In this case, we just forget about the reservations
1351          * just do block allocation as without reservations.
1352          */
1353         if (my_rsv) {
1354                 my_rsv = NULL;
1355                 group_no = goal_group;
1356                 goto retry_alloc;
1357         }
1358         /* No space left on the device */
1359         *errp = -ENOSPC;
1360         goto out;
1361
1362 allocated:
1363
1364         ext3_debug("using block group %d(%d)\n",
1365                         group_no, gdp->bg_free_blocks_count);
1366
1367         BUFFER_TRACE(gdp_bh, "get_write_access");
1368         fatal = ext3_journal_get_write_access(handle, gdp_bh);
1369         if (fatal)
1370                 goto out;
1371
1372         ret_block = grp_alloc_blk + ext3_group_first_block_no(sb, group_no);
1373
1374         if (in_range(le32_to_cpu(gdp->bg_block_bitmap), ret_block, num) ||
1375             in_range(le32_to_cpu(gdp->bg_inode_bitmap), ret_block, num) ||
1376             in_range(ret_block, le32_to_cpu(gdp->bg_inode_table),
1377                       EXT3_SB(sb)->s_itb_per_group) ||
1378             in_range(ret_block + num - 1, le32_to_cpu(gdp->bg_inode_table),
1379                       EXT3_SB(sb)->s_itb_per_group))
1380                 ext3_error(sb, "ext3_new_block",
1381                             "Allocating block in system zone - "
1382                             "blocks from "E3FSBLK", length %lu",
1383                              ret_block, num);
1384
1385         performed_allocation = 1;
1386
1387 #ifdef CONFIG_JBD_DEBUG
1388         {
1389                 struct buffer_head *debug_bh;
1390
1391                 /* Record bitmap buffer state in the newly allocated block */
1392                 debug_bh = sb_find_get_block(sb, ret_block);
1393                 if (debug_bh) {
1394                         BUFFER_TRACE(debug_bh, "state when allocated");
1395                         BUFFER_TRACE2(debug_bh, bitmap_bh, "bitmap state");
1396                         brelse(debug_bh);
1397                 }
1398         }
1399         jbd_lock_bh_state(bitmap_bh);
1400         spin_lock(sb_bgl_lock(sbi, group_no));
1401         if (buffer_jbd(bitmap_bh) && bh2jh(bitmap_bh)->b_committed_data) {
1402                 int i;
1403
1404                 for (i = 0; i < num; i++) {
1405                         if (ext3_test_bit(grp_alloc_blk+i,
1406                                         bh2jh(bitmap_bh)->b_committed_data)) {
1407                                 printk("%s: block was unexpectedly set in "
1408                                         "b_committed_data\n", __FUNCTION__);
1409                         }
1410                 }
1411         }
1412         ext3_debug("found bit %d\n", grp_alloc_blk);
1413         spin_unlock(sb_bgl_lock(sbi, group_no));
1414         jbd_unlock_bh_state(bitmap_bh);
1415 #endif
1416
1417         if (ret_block + num - 1 >= le32_to_cpu(es->s_blocks_count)) {
1418                 ext3_error(sb, "ext3_new_block",
1419                             "block("E3FSBLK") >= blocks count(%d) - "
1420                             "block_group = %d, es == %p ", ret_block,
1421                         le32_to_cpu(es->s_blocks_count), group_no, es);
1422                 goto out;
1423         }
1424
1425         /*
1426          * It is up to the caller to add the new buffer to a journal
1427          * list of some description.  We don't know in advance whether
1428          * the caller wants to use it as metadata or data.
1429          */
1430         ext3_debug("allocating block %lu. Goal hits %d of %d.\n",
1431                         ret_block, goal_hits, goal_attempts);
1432
1433         spin_lock(sb_bgl_lock(sbi, group_no));
1434         gdp->bg_free_blocks_count =
1435                         cpu_to_le16(le16_to_cpu(gdp->bg_free_blocks_count) - num);
1436         spin_unlock(sb_bgl_lock(sbi, group_no));
1437         percpu_counter_mod(&sbi->s_freeblocks_counter, -num);
1438
1439         BUFFER_TRACE(gdp_bh, "journal_dirty_metadata for group descriptor");
1440         err = ext3_journal_dirty_metadata(handle, gdp_bh);
1441         if (!fatal)
1442                 fatal = err;
1443
1444         sb->s_dirt = 1;
1445         if (fatal)
1446                 goto out;
1447
1448         *errp = 0;
1449         brelse(bitmap_bh);
1450         DQUOT_FREE_BLOCK(inode, *count-num);
1451         *count = num;
1452         return ret_block;
1453
1454 io_error:
1455         *errp = -EIO;
1456 out:
1457         if (fatal) {
1458                 *errp = fatal;
1459                 ext3_std_error(sb, fatal);
1460         }
1461         /*
1462          * Undo the block allocation
1463          */
1464         if (!performed_allocation)
1465                 DQUOT_FREE_BLOCK(inode, *count);
1466         brelse(bitmap_bh);
1467         return 0;
1468 }
1469
1470 ext3_fsblk_t ext3_new_block(handle_t *handle, struct inode *inode,
1471                         ext3_fsblk_t goal, int *errp)
1472 {
1473         unsigned long count = 1;
1474
1475         return ext3_new_blocks(handle, inode, goal, &count, errp);
1476 }
1477
1478 ext3_fsblk_t ext3_count_free_blocks(struct super_block *sb)
1479 {
1480         ext3_fsblk_t desc_count;
1481         struct ext3_group_desc *gdp;
1482         int i;
1483         unsigned long ngroups = EXT3_SB(sb)->s_groups_count;
1484 #ifdef EXT3FS_DEBUG
1485         struct ext3_super_block *es;
1486         ext3_fsblk_t bitmap_count;
1487         unsigned long x;
1488         struct buffer_head *bitmap_bh = NULL;
1489
1490         es = EXT3_SB(sb)->s_es;
1491         desc_count = 0;
1492         bitmap_count = 0;
1493         gdp = NULL;
1494
1495         smp_rmb();
1496         for (i = 0; i < ngroups; i++) {
1497                 gdp = ext3_get_group_desc(sb, i, NULL);
1498                 if (!gdp)
1499                         continue;
1500                 desc_count += le16_to_cpu(gdp->bg_free_blocks_count);
1501                 brelse(bitmap_bh);
1502                 bitmap_bh = read_block_bitmap(sb, i);
1503                 if (bitmap_bh == NULL)
1504                         continue;
1505
1506                 x = ext3_count_free(bitmap_bh, sb->s_blocksize);
1507                 printk("group %d: stored = %d, counted = %lu\n",
1508                         i, le16_to_cpu(gdp->bg_free_blocks_count), x);
1509                 bitmap_count += x;
1510         }
1511         brelse(bitmap_bh);
1512         printk("ext3_count_free_blocks: stored = "E3FSBLK
1513                 ", computed = "E3FSBLK", "E3FSBLK"\n",
1514                le32_to_cpu(es->s_free_blocks_count),
1515                 desc_count, bitmap_count);
1516         return bitmap_count;
1517 #else
1518         desc_count = 0;
1519         smp_rmb();
1520         for (i = 0; i < ngroups; i++) {
1521                 gdp = ext3_get_group_desc(sb, i, NULL);
1522                 if (!gdp)
1523                         continue;
1524                 desc_count += le16_to_cpu(gdp->bg_free_blocks_count);
1525         }
1526
1527         return desc_count;
1528 #endif
1529 }
1530
1531 static inline int
1532 block_in_use(ext3_fsblk_t block, struct super_block *sb, unsigned char *map)
1533 {
1534         return ext3_test_bit ((block -
1535                 le32_to_cpu(EXT3_SB(sb)->s_es->s_first_data_block)) %
1536                          EXT3_BLOCKS_PER_GROUP(sb), map);
1537 }
1538
1539 static inline int test_root(int a, int b)
1540 {
1541         int num = b;
1542
1543         while (a > num)
1544                 num *= b;
1545         return num == a;
1546 }
1547
1548 static int ext3_group_sparse(int group)
1549 {
1550         if (group <= 1)
1551                 return 1;
1552         if (!(group & 1))
1553                 return 0;
1554         return (test_root(group, 7) || test_root(group, 5) ||
1555                 test_root(group, 3));
1556 }
1557
1558 /**
1559  *      ext3_bg_has_super - number of blocks used by the superblock in group
1560  *      @sb: superblock for filesystem
1561  *      @group: group number to check
1562  *
1563  *      Return the number of blocks used by the superblock (primary or backup)
1564  *      in this group.  Currently this will be only 0 or 1.
1565  */
1566 int ext3_bg_has_super(struct super_block *sb, int group)
1567 {
1568         if (EXT3_HAS_RO_COMPAT_FEATURE(sb,
1569                                 EXT3_FEATURE_RO_COMPAT_SPARSE_SUPER) &&
1570                         !ext3_group_sparse(group))
1571                 return 0;
1572         return 1;
1573 }
1574
1575 static unsigned long ext3_bg_num_gdb_meta(struct super_block *sb, int group)
1576 {
1577         unsigned long metagroup = group / EXT3_DESC_PER_BLOCK(sb);
1578         unsigned long first = metagroup * EXT3_DESC_PER_BLOCK(sb);
1579         unsigned long last = first + EXT3_DESC_PER_BLOCK(sb) - 1;
1580
1581         if (group == first || group == first + 1 || group == last)
1582                 return 1;
1583         return 0;
1584 }
1585
1586 static unsigned long ext3_bg_num_gdb_nometa(struct super_block *sb, int group)
1587 {
1588         if (EXT3_HAS_RO_COMPAT_FEATURE(sb,
1589                                 EXT3_FEATURE_RO_COMPAT_SPARSE_SUPER) &&
1590                         !ext3_group_sparse(group))
1591                 return 0;
1592         return EXT3_SB(sb)->s_gdb_count;
1593 }
1594
1595 /**
1596  *      ext3_bg_num_gdb - number of blocks used by the group table in group
1597  *      @sb: superblock for filesystem
1598  *      @group: group number to check
1599  *
1600  *      Return the number of blocks used by the group descriptor table
1601  *      (primary or backup) in this group.  In the future there may be a
1602  *      different number of descriptor blocks in each group.
1603  */
1604 unsigned long ext3_bg_num_gdb(struct super_block *sb, int group)
1605 {
1606         unsigned long first_meta_bg =
1607                         le32_to_cpu(EXT3_SB(sb)->s_es->s_first_meta_bg);
1608         unsigned long metagroup = group / EXT3_DESC_PER_BLOCK(sb);
1609
1610         if (!EXT3_HAS_INCOMPAT_FEATURE(sb,EXT3_FEATURE_INCOMPAT_META_BG) ||
1611                         metagroup < first_meta_bg)
1612                 return ext3_bg_num_gdb_nometa(sb,group);
1613
1614         return ext3_bg_num_gdb_meta(sb,group);
1615
1616 }