ext4: Widen type of ext4_sb_info.s_mb_maxs[]
[safe/jmp/linux-2.6] / fs / ext4 / mballoc.c
1 /*
2  * Copyright (c) 2003-2006, Cluster File Systems, Inc, info@clusterfs.com
3  * Written by Alex Tomas <alex@clusterfs.com>
4  *
5  * This program is free software; you can redistribute it and/or modify
6  * it under the terms of the GNU General Public License version 2 as
7  * published by the Free Software Foundation.
8  *
9  * This program is distributed in the hope that it will be useful,
10  * but WITHOUT ANY WARRANTY; without even the implied warranty of
11  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
12  * GNU General Public License for more details.
13  *
14  * You should have received a copy of the GNU General Public Licens
15  * along with this program; if not, write to the Free Software
16  * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-
17  */
18
19
20 /*
21  * mballoc.c contains the multiblocks allocation routines
22  */
23
24 #include "mballoc.h"
25 /*
26  * MUSTDO:
27  *   - test ext4_ext_search_left() and ext4_ext_search_right()
28  *   - search for metadata in few groups
29  *
30  * TODO v4:
31  *   - normalization should take into account whether file is still open
32  *   - discard preallocations if no free space left (policy?)
33  *   - don't normalize tails
34  *   - quota
35  *   - reservation for superuser
36  *
37  * TODO v3:
38  *   - bitmap read-ahead (proposed by Oleg Drokin aka green)
39  *   - track min/max extents in each group for better group selection
40  *   - mb_mark_used() may allocate chunk right after splitting buddy
41  *   - tree of groups sorted by number of free blocks
42  *   - error handling
43  */
44
45 /*
46  * The allocation request involve request for multiple number of blocks
47  * near to the goal(block) value specified.
48  *
49  * During initialization phase of the allocator we decide to use the group
50  * preallocation or inode preallocation depending on the size file. The
51  * size of the file could be the resulting file size we would have after
52  * allocation or the current file size which ever is larger. If the size is
53  * less that sbi->s_mb_stream_request we select the group
54  * preallocation. The default value of s_mb_stream_request is 16
55  * blocks. This can also be tuned via
56  * /proc/fs/ext4/<partition>/stream_req. The value is represented in terms
57  * of number of blocks.
58  *
59  * The main motivation for having small file use group preallocation is to
60  * ensure that we have small file closer in the disk.
61  *
62  * First stage the allocator looks at the inode prealloc list
63  * ext4_inode_info->i_prealloc_list contain list of prealloc spaces for
64  * this particular inode. The inode prealloc space is represented as:
65  *
66  * pa_lstart -> the logical start block for this prealloc space
67  * pa_pstart -> the physical start block for this prealloc space
68  * pa_len    -> lenght for this prealloc space
69  * pa_free   ->  free space available in this prealloc space
70  *
71  * The inode preallocation space is used looking at the _logical_ start
72  * block. If only the logical file block falls within the range of prealloc
73  * space we will consume the particular prealloc space. This make sure that
74  * that the we have contiguous physical blocks representing the file blocks
75  *
76  * The important thing to be noted in case of inode prealloc space is that
77  * we don't modify the values associated to inode prealloc space except
78  * pa_free.
79  *
80  * If we are not able to find blocks in the inode prealloc space and if we
81  * have the group allocation flag set then we look at the locality group
82  * prealloc space. These are per CPU prealloc list repreasented as
83  *
84  * ext4_sb_info.s_locality_groups[smp_processor_id()]
85  *
86  * The reason for having a per cpu locality group is to reduce the contention
87  * between CPUs. It is possible to get scheduled at this point.
88  *
89  * The locality group prealloc space is used looking at whether we have
90  * enough free space (pa_free) withing the prealloc space.
91  *
92  * If we can't allocate blocks via inode prealloc or/and locality group
93  * prealloc then we look at the buddy cache. The buddy cache is represented
94  * by ext4_sb_info.s_buddy_cache (struct inode) whose file offset gets
95  * mapped to the buddy and bitmap information regarding different
96  * groups. The buddy information is attached to buddy cache inode so that
97  * we can access them through the page cache. The information regarding
98  * each group is loaded via ext4_mb_load_buddy.  The information involve
99  * block bitmap and buddy information. The information are stored in the
100  * inode as:
101  *
102  *  {                        page                        }
103  *  [ group 0 buddy][ group 0 bitmap] [group 1][ group 1]...
104  *
105  *
106  * one block each for bitmap and buddy information.  So for each group we
107  * take up 2 blocks. A page can contain blocks_per_page (PAGE_CACHE_SIZE /
108  * blocksize) blocks.  So it can have information regarding groups_per_page
109  * which is blocks_per_page/2
110  *
111  * The buddy cache inode is not stored on disk. The inode is thrown
112  * away when the filesystem is unmounted.
113  *
114  * We look for count number of blocks in the buddy cache. If we were able
115  * to locate that many free blocks we return with additional information
116  * regarding rest of the contiguous physical block available
117  *
118  * Before allocating blocks via buddy cache we normalize the request
119  * blocks. This ensure we ask for more blocks that we needed. The extra
120  * blocks that we get after allocation is added to the respective prealloc
121  * list. In case of inode preallocation we follow a list of heuristics
122  * based on file size. This can be found in ext4_mb_normalize_request. If
123  * we are doing a group prealloc we try to normalize the request to
124  * sbi->s_mb_group_prealloc. Default value of s_mb_group_prealloc is set to
125  * 512 blocks. This can be tuned via
126  * /proc/fs/ext4/<partition/group_prealloc. The value is represented in
127  * terms of number of blocks. If we have mounted the file system with -O
128  * stripe=<value> option the group prealloc request is normalized to the
129  * stripe value (sbi->s_stripe)
130  *
131  * The regular allocator(using the buddy cache) support few tunables.
132  *
133  * /proc/fs/ext4/<partition>/min_to_scan
134  * /proc/fs/ext4/<partition>/max_to_scan
135  * /proc/fs/ext4/<partition>/order2_req
136  *
137  * The regular allocator use buddy scan only if the request len is power of
138  * 2 blocks and the order of allocation is >= sbi->s_mb_order2_reqs. The
139  * value of s_mb_order2_reqs can be tuned via
140  * /proc/fs/ext4/<partition>/order2_req.  If the request len is equal to
141  * stripe size (sbi->s_stripe), we try to search for contigous block in
142  * stripe size. This should result in better allocation on RAID setup. If
143  * not we search in the specific group using bitmap for best extents. The
144  * tunable min_to_scan and max_to_scan controll the behaviour here.
145  * min_to_scan indicate how long the mballoc __must__ look for a best
146  * extent and max_to_scanindicate how long the mballoc __can__ look for a
147  * best extent in the found extents. Searching for the blocks starts with
148  * the group specified as the goal value in allocation context via
149  * ac_g_ex. Each group is first checked based on the criteria whether it
150  * can used for allocation. ext4_mb_good_group explains how the groups are
151  * checked.
152  *
153  * Both the prealloc space are getting populated as above. So for the first
154  * request we will hit the buddy cache which will result in this prealloc
155  * space getting filled. The prealloc space is then later used for the
156  * subsequent request.
157  */
158
159 /*
160  * mballoc operates on the following data:
161  *  - on-disk bitmap
162  *  - in-core buddy (actually includes buddy and bitmap)
163  *  - preallocation descriptors (PAs)
164  *
165  * there are two types of preallocations:
166  *  - inode
167  *    assiged to specific inode and can be used for this inode only.
168  *    it describes part of inode's space preallocated to specific
169  *    physical blocks. any block from that preallocated can be used
170  *    independent. the descriptor just tracks number of blocks left
171  *    unused. so, before taking some block from descriptor, one must
172  *    make sure corresponded logical block isn't allocated yet. this
173  *    also means that freeing any block within descriptor's range
174  *    must discard all preallocated blocks.
175  *  - locality group
176  *    assigned to specific locality group which does not translate to
177  *    permanent set of inodes: inode can join and leave group. space
178  *    from this type of preallocation can be used for any inode. thus
179  *    it's consumed from the beginning to the end.
180  *
181  * relation between them can be expressed as:
182  *    in-core buddy = on-disk bitmap + preallocation descriptors
183  *
184  * this mean blocks mballoc considers used are:
185  *  - allocated blocks (persistent)
186  *  - preallocated blocks (non-persistent)
187  *
188  * consistency in mballoc world means that at any time a block is either
189  * free or used in ALL structures. notice: "any time" should not be read
190  * literally -- time is discrete and delimited by locks.
191  *
192  *  to keep it simple, we don't use block numbers, instead we count number of
193  *  blocks: how many blocks marked used/free in on-disk bitmap, buddy and PA.
194  *
195  * all operations can be expressed as:
196  *  - init buddy:                       buddy = on-disk + PAs
197  *  - new PA:                           buddy += N; PA = N
198  *  - use inode PA:                     on-disk += N; PA -= N
199  *  - discard inode PA                  buddy -= on-disk - PA; PA = 0
200  *  - use locality group PA             on-disk += N; PA -= N
201  *  - discard locality group PA         buddy -= PA; PA = 0
202  *  note: 'buddy -= on-disk - PA' is used to show that on-disk bitmap
203  *        is used in real operation because we can't know actual used
204  *        bits from PA, only from on-disk bitmap
205  *
206  * if we follow this strict logic, then all operations above should be atomic.
207  * given some of them can block, we'd have to use something like semaphores
208  * killing performance on high-end SMP hardware. let's try to relax it using
209  * the following knowledge:
210  *  1) if buddy is referenced, it's already initialized
211  *  2) while block is used in buddy and the buddy is referenced,
212  *     nobody can re-allocate that block
213  *  3) we work on bitmaps and '+' actually means 'set bits'. if on-disk has
214  *     bit set and PA claims same block, it's OK. IOW, one can set bit in
215  *     on-disk bitmap if buddy has same bit set or/and PA covers corresponded
216  *     block
217  *
218  * so, now we're building a concurrency table:
219  *  - init buddy vs.
220  *    - new PA
221  *      blocks for PA are allocated in the buddy, buddy must be referenced
222  *      until PA is linked to allocation group to avoid concurrent buddy init
223  *    - use inode PA
224  *      we need to make sure that either on-disk bitmap or PA has uptodate data
225  *      given (3) we care that PA-=N operation doesn't interfere with init
226  *    - discard inode PA
227  *      the simplest way would be to have buddy initialized by the discard
228  *    - use locality group PA
229  *      again PA-=N must be serialized with init
230  *    - discard locality group PA
231  *      the simplest way would be to have buddy initialized by the discard
232  *  - new PA vs.
233  *    - use inode PA
234  *      i_data_sem serializes them
235  *    - discard inode PA
236  *      discard process must wait until PA isn't used by another process
237  *    - use locality group PA
238  *      some mutex should serialize them
239  *    - discard locality group PA
240  *      discard process must wait until PA isn't used by another process
241  *  - use inode PA
242  *    - use inode PA
243  *      i_data_sem or another mutex should serializes them
244  *    - discard inode PA
245  *      discard process must wait until PA isn't used by another process
246  *    - use locality group PA
247  *      nothing wrong here -- they're different PAs covering different blocks
248  *    - discard locality group PA
249  *      discard process must wait until PA isn't used by another process
250  *
251  * now we're ready to make few consequences:
252  *  - PA is referenced and while it is no discard is possible
253  *  - PA is referenced until block isn't marked in on-disk bitmap
254  *  - PA changes only after on-disk bitmap
255  *  - discard must not compete with init. either init is done before
256  *    any discard or they're serialized somehow
257  *  - buddy init as sum of on-disk bitmap and PAs is done atomically
258  *
259  * a special case when we've used PA to emptiness. no need to modify buddy
260  * in this case, but we should care about concurrent init
261  *
262  */
263
264  /*
265  * Logic in few words:
266  *
267  *  - allocation:
268  *    load group
269  *    find blocks
270  *    mark bits in on-disk bitmap
271  *    release group
272  *
273  *  - use preallocation:
274  *    find proper PA (per-inode or group)
275  *    load group
276  *    mark bits in on-disk bitmap
277  *    release group
278  *    release PA
279  *
280  *  - free:
281  *    load group
282  *    mark bits in on-disk bitmap
283  *    release group
284  *
285  *  - discard preallocations in group:
286  *    mark PAs deleted
287  *    move them onto local list
288  *    load on-disk bitmap
289  *    load group
290  *    remove PA from object (inode or locality group)
291  *    mark free blocks in-core
292  *
293  *  - discard inode's preallocations:
294  */
295
296 /*
297  * Locking rules
298  *
299  * Locks:
300  *  - bitlock on a group        (group)
301  *  - object (inode/locality)   (object)
302  *  - per-pa lock               (pa)
303  *
304  * Paths:
305  *  - new pa
306  *    object
307  *    group
308  *
309  *  - find and use pa:
310  *    pa
311  *
312  *  - release consumed pa:
313  *    pa
314  *    group
315  *    object
316  *
317  *  - generate in-core bitmap:
318  *    group
319  *        pa
320  *
321  *  - discard all for given object (inode, locality group):
322  *    object
323  *        pa
324  *    group
325  *
326  *  - discard all for given group:
327  *    group
328  *        pa
329  *    group
330  *        object
331  *
332  */
333
334 static inline void *mb_correct_addr_and_bit(int *bit, void *addr)
335 {
336 #if BITS_PER_LONG == 64
337         *bit += ((unsigned long) addr & 7UL) << 3;
338         addr = (void *) ((unsigned long) addr & ~7UL);
339 #elif BITS_PER_LONG == 32
340         *bit += ((unsigned long) addr & 3UL) << 3;
341         addr = (void *) ((unsigned long) addr & ~3UL);
342 #else
343 #error "how many bits you are?!"
344 #endif
345         return addr;
346 }
347
348 static inline int mb_test_bit(int bit, void *addr)
349 {
350         /*
351          * ext4_test_bit on architecture like powerpc
352          * needs unsigned long aligned address
353          */
354         addr = mb_correct_addr_and_bit(&bit, addr);
355         return ext4_test_bit(bit, addr);
356 }
357
358 static inline void mb_set_bit(int bit, void *addr)
359 {
360         addr = mb_correct_addr_and_bit(&bit, addr);
361         ext4_set_bit(bit, addr);
362 }
363
364 static inline void mb_set_bit_atomic(spinlock_t *lock, int bit, void *addr)
365 {
366         addr = mb_correct_addr_and_bit(&bit, addr);
367         ext4_set_bit_atomic(lock, bit, addr);
368 }
369
370 static inline void mb_clear_bit(int bit, void *addr)
371 {
372         addr = mb_correct_addr_and_bit(&bit, addr);
373         ext4_clear_bit(bit, addr);
374 }
375
376 static inline void mb_clear_bit_atomic(spinlock_t *lock, int bit, void *addr)
377 {
378         addr = mb_correct_addr_and_bit(&bit, addr);
379         ext4_clear_bit_atomic(lock, bit, addr);
380 }
381
382 static inline int mb_find_next_zero_bit(void *addr, int max, int start)
383 {
384         int fix = 0, ret, tmpmax;
385         addr = mb_correct_addr_and_bit(&fix, addr);
386         tmpmax = max + fix;
387         start += fix;
388
389         ret = ext4_find_next_zero_bit(addr, tmpmax, start) - fix;
390         if (ret > max)
391                 return max;
392         return ret;
393 }
394
395 static inline int mb_find_next_bit(void *addr, int max, int start)
396 {
397         int fix = 0, ret, tmpmax;
398         addr = mb_correct_addr_and_bit(&fix, addr);
399         tmpmax = max + fix;
400         start += fix;
401
402         ret = ext4_find_next_bit(addr, tmpmax, start) - fix;
403         if (ret > max)
404                 return max;
405         return ret;
406 }
407
408 static void *mb_find_buddy(struct ext4_buddy *e4b, int order, int *max)
409 {
410         char *bb;
411
412         BUG_ON(EXT4_MB_BITMAP(e4b) == EXT4_MB_BUDDY(e4b));
413         BUG_ON(max == NULL);
414
415         if (order > e4b->bd_blkbits + 1) {
416                 *max = 0;
417                 return NULL;
418         }
419
420         /* at order 0 we see each particular block */
421         *max = 1 << (e4b->bd_blkbits + 3);
422         if (order == 0)
423                 return EXT4_MB_BITMAP(e4b);
424
425         bb = EXT4_MB_BUDDY(e4b) + EXT4_SB(e4b->bd_sb)->s_mb_offsets[order];
426         *max = EXT4_SB(e4b->bd_sb)->s_mb_maxs[order];
427
428         return bb;
429 }
430
431 #ifdef DOUBLE_CHECK
432 static void mb_free_blocks_double(struct inode *inode, struct ext4_buddy *e4b,
433                            int first, int count)
434 {
435         int i;
436         struct super_block *sb = e4b->bd_sb;
437
438         if (unlikely(e4b->bd_info->bb_bitmap == NULL))
439                 return;
440         BUG_ON(!ext4_is_group_locked(sb, e4b->bd_group));
441         for (i = 0; i < count; i++) {
442                 if (!mb_test_bit(first + i, e4b->bd_info->bb_bitmap)) {
443                         ext4_fsblk_t blocknr;
444                         blocknr = e4b->bd_group * EXT4_BLOCKS_PER_GROUP(sb);
445                         blocknr += first + i;
446                         blocknr +=
447                             le32_to_cpu(EXT4_SB(sb)->s_es->s_first_data_block);
448
449                         ext4_error(sb, __func__, "double-free of inode"
450                                    " %lu's block %llu(bit %u in group %lu)\n",
451                                    inode ? inode->i_ino : 0, blocknr,
452                                    first + i, e4b->bd_group);
453                 }
454                 mb_clear_bit(first + i, e4b->bd_info->bb_bitmap);
455         }
456 }
457
458 static void mb_mark_used_double(struct ext4_buddy *e4b, int first, int count)
459 {
460         int i;
461
462         if (unlikely(e4b->bd_info->bb_bitmap == NULL))
463                 return;
464         BUG_ON(!ext4_is_group_locked(e4b->bd_sb, e4b->bd_group));
465         for (i = 0; i < count; i++) {
466                 BUG_ON(mb_test_bit(first + i, e4b->bd_info->bb_bitmap));
467                 mb_set_bit(first + i, e4b->bd_info->bb_bitmap);
468         }
469 }
470
471 static void mb_cmp_bitmaps(struct ext4_buddy *e4b, void *bitmap)
472 {
473         if (memcmp(e4b->bd_info->bb_bitmap, bitmap, e4b->bd_sb->s_blocksize)) {
474                 unsigned char *b1, *b2;
475                 int i;
476                 b1 = (unsigned char *) e4b->bd_info->bb_bitmap;
477                 b2 = (unsigned char *) bitmap;
478                 for (i = 0; i < e4b->bd_sb->s_blocksize; i++) {
479                         if (b1[i] != b2[i]) {
480                                 printk(KERN_ERR "corruption in group %lu "
481                                        "at byte %u(%u): %x in copy != %x "
482                                        "on disk/prealloc\n",
483                                        e4b->bd_group, i, i * 8, b1[i], b2[i]);
484                                 BUG();
485                         }
486                 }
487         }
488 }
489
490 #else
491 static inline void mb_free_blocks_double(struct inode *inode,
492                                 struct ext4_buddy *e4b, int first, int count)
493 {
494         return;
495 }
496 static inline void mb_mark_used_double(struct ext4_buddy *e4b,
497                                                 int first, int count)
498 {
499         return;
500 }
501 static inline void mb_cmp_bitmaps(struct ext4_buddy *e4b, void *bitmap)
502 {
503         return;
504 }
505 #endif
506
507 #ifdef AGGRESSIVE_CHECK
508
509 #define MB_CHECK_ASSERT(assert)                                         \
510 do {                                                                    \
511         if (!(assert)) {                                                \
512                 printk(KERN_EMERG                                       \
513                         "Assertion failure in %s() at %s:%d: \"%s\"\n", \
514                         function, file, line, # assert);                \
515                 BUG();                                                  \
516         }                                                               \
517 } while (0)
518
519 static int __mb_check_buddy(struct ext4_buddy *e4b, char *file,
520                                 const char *function, int line)
521 {
522         struct super_block *sb = e4b->bd_sb;
523         int order = e4b->bd_blkbits + 1;
524         int max;
525         int max2;
526         int i;
527         int j;
528         int k;
529         int count;
530         struct ext4_group_info *grp;
531         int fragments = 0;
532         int fstart;
533         struct list_head *cur;
534         void *buddy;
535         void *buddy2;
536
537         {
538                 static int mb_check_counter;
539                 if (mb_check_counter++ % 100 != 0)
540                         return 0;
541         }
542
543         while (order > 1) {
544                 buddy = mb_find_buddy(e4b, order, &max);
545                 MB_CHECK_ASSERT(buddy);
546                 buddy2 = mb_find_buddy(e4b, order - 1, &max2);
547                 MB_CHECK_ASSERT(buddy2);
548                 MB_CHECK_ASSERT(buddy != buddy2);
549                 MB_CHECK_ASSERT(max * 2 == max2);
550
551                 count = 0;
552                 for (i = 0; i < max; i++) {
553
554                         if (mb_test_bit(i, buddy)) {
555                                 /* only single bit in buddy2 may be 1 */
556                                 if (!mb_test_bit(i << 1, buddy2)) {
557                                         MB_CHECK_ASSERT(
558                                                 mb_test_bit((i<<1)+1, buddy2));
559                                 } else if (!mb_test_bit((i << 1) + 1, buddy2)) {
560                                         MB_CHECK_ASSERT(
561                                                 mb_test_bit(i << 1, buddy2));
562                                 }
563                                 continue;
564                         }
565
566                         /* both bits in buddy2 must be 0 */
567                         MB_CHECK_ASSERT(mb_test_bit(i << 1, buddy2));
568                         MB_CHECK_ASSERT(mb_test_bit((i << 1) + 1, buddy2));
569
570                         for (j = 0; j < (1 << order); j++) {
571                                 k = (i * (1 << order)) + j;
572                                 MB_CHECK_ASSERT(
573                                         !mb_test_bit(k, EXT4_MB_BITMAP(e4b)));
574                         }
575                         count++;
576                 }
577                 MB_CHECK_ASSERT(e4b->bd_info->bb_counters[order] == count);
578                 order--;
579         }
580
581         fstart = -1;
582         buddy = mb_find_buddy(e4b, 0, &max);
583         for (i = 0; i < max; i++) {
584                 if (!mb_test_bit(i, buddy)) {
585                         MB_CHECK_ASSERT(i >= e4b->bd_info->bb_first_free);
586                         if (fstart == -1) {
587                                 fragments++;
588                                 fstart = i;
589                         }
590                         continue;
591                 }
592                 fstart = -1;
593                 /* check used bits only */
594                 for (j = 0; j < e4b->bd_blkbits + 1; j++) {
595                         buddy2 = mb_find_buddy(e4b, j, &max2);
596                         k = i >> j;
597                         MB_CHECK_ASSERT(k < max2);
598                         MB_CHECK_ASSERT(mb_test_bit(k, buddy2));
599                 }
600         }
601         MB_CHECK_ASSERT(!EXT4_MB_GRP_NEED_INIT(e4b->bd_info));
602         MB_CHECK_ASSERT(e4b->bd_info->bb_fragments == fragments);
603
604         grp = ext4_get_group_info(sb, e4b->bd_group);
605         buddy = mb_find_buddy(e4b, 0, &max);
606         list_for_each(cur, &grp->bb_prealloc_list) {
607                 ext4_group_t groupnr;
608                 struct ext4_prealloc_space *pa;
609                 pa = list_entry(cur, struct ext4_prealloc_space, pa_group_list);
610                 ext4_get_group_no_and_offset(sb, pa->pa_pstart, &groupnr, &k);
611                 MB_CHECK_ASSERT(groupnr == e4b->bd_group);
612                 for (i = 0; i < pa->pa_len; i++)
613                         MB_CHECK_ASSERT(mb_test_bit(k + i, buddy));
614         }
615         return 0;
616 }
617 #undef MB_CHECK_ASSERT
618 #define mb_check_buddy(e4b) __mb_check_buddy(e4b,       \
619                                         __FILE__, __func__, __LINE__)
620 #else
621 #define mb_check_buddy(e4b)
622 #endif
623
624 /* FIXME!! need more doc */
625 static void ext4_mb_mark_free_simple(struct super_block *sb,
626                                 void *buddy, unsigned first, int len,
627                                         struct ext4_group_info *grp)
628 {
629         struct ext4_sb_info *sbi = EXT4_SB(sb);
630         unsigned short min;
631         unsigned short max;
632         unsigned short chunk;
633         unsigned short border;
634
635         BUG_ON(len > EXT4_BLOCKS_PER_GROUP(sb));
636
637         border = 2 << sb->s_blocksize_bits;
638
639         while (len > 0) {
640                 /* find how many blocks can be covered since this position */
641                 max = ffs(first | border) - 1;
642
643                 /* find how many blocks of power 2 we need to mark */
644                 min = fls(len) - 1;
645
646                 if (max < min)
647                         min = max;
648                 chunk = 1 << min;
649
650                 /* mark multiblock chunks only */
651                 grp->bb_counters[min]++;
652                 if (min > 0)
653                         mb_clear_bit(first >> min,
654                                      buddy + sbi->s_mb_offsets[min]);
655
656                 len -= chunk;
657                 first += chunk;
658         }
659 }
660
661 static void ext4_mb_generate_buddy(struct super_block *sb,
662                                 void *buddy, void *bitmap, ext4_group_t group)
663 {
664         struct ext4_group_info *grp = ext4_get_group_info(sb, group);
665         unsigned short max = EXT4_BLOCKS_PER_GROUP(sb);
666         unsigned short i = 0;
667         unsigned short first;
668         unsigned short len;
669         unsigned free = 0;
670         unsigned fragments = 0;
671         unsigned long long period = get_cycles();
672
673         /* initialize buddy from bitmap which is aggregation
674          * of on-disk bitmap and preallocations */
675         i = mb_find_next_zero_bit(bitmap, max, 0);
676         grp->bb_first_free = i;
677         while (i < max) {
678                 fragments++;
679                 first = i;
680                 i = mb_find_next_bit(bitmap, max, i);
681                 len = i - first;
682                 free += len;
683                 if (len > 1)
684                         ext4_mb_mark_free_simple(sb, buddy, first, len, grp);
685                 else
686                         grp->bb_counters[0]++;
687                 if (i < max)
688                         i = mb_find_next_zero_bit(bitmap, max, i);
689         }
690         grp->bb_fragments = fragments;
691
692         if (free != grp->bb_free) {
693                 ext4_error(sb, __func__,
694                         "EXT4-fs: group %lu: %u blocks in bitmap, %u in gd\n",
695                         group, free, grp->bb_free);
696                 /*
697                  * If we intent to continue, we consider group descritor
698                  * corrupt and update bb_free using bitmap value
699                  */
700                 grp->bb_free = free;
701         }
702
703         clear_bit(EXT4_GROUP_INFO_NEED_INIT_BIT, &(grp->bb_state));
704
705         period = get_cycles() - period;
706         spin_lock(&EXT4_SB(sb)->s_bal_lock);
707         EXT4_SB(sb)->s_mb_buddies_generated++;
708         EXT4_SB(sb)->s_mb_generation_time += period;
709         spin_unlock(&EXT4_SB(sb)->s_bal_lock);
710 }
711
712 /* The buddy information is attached the buddy cache inode
713  * for convenience. The information regarding each group
714  * is loaded via ext4_mb_load_buddy. The information involve
715  * block bitmap and buddy information. The information are
716  * stored in the inode as
717  *
718  * {                        page                        }
719  * [ group 0 buddy][ group 0 bitmap] [group 1][ group 1]...
720  *
721  *
722  * one block each for bitmap and buddy information.
723  * So for each group we take up 2 blocks. A page can
724  * contain blocks_per_page (PAGE_CACHE_SIZE / blocksize)  blocks.
725  * So it can have information regarding groups_per_page which
726  * is blocks_per_page/2
727  */
728
729 static int ext4_mb_init_cache(struct page *page, char *incore)
730 {
731         int blocksize;
732         int blocks_per_page;
733         int groups_per_page;
734         int err = 0;
735         int i;
736         ext4_group_t first_group;
737         int first_block;
738         struct super_block *sb;
739         struct buffer_head *bhs;
740         struct buffer_head **bh;
741         struct inode *inode;
742         char *data;
743         char *bitmap;
744
745         mb_debug("init page %lu\n", page->index);
746
747         inode = page->mapping->host;
748         sb = inode->i_sb;
749         blocksize = 1 << inode->i_blkbits;
750         blocks_per_page = PAGE_CACHE_SIZE / blocksize;
751
752         groups_per_page = blocks_per_page >> 1;
753         if (groups_per_page == 0)
754                 groups_per_page = 1;
755
756         /* allocate buffer_heads to read bitmaps */
757         if (groups_per_page > 1) {
758                 err = -ENOMEM;
759                 i = sizeof(struct buffer_head *) * groups_per_page;
760                 bh = kzalloc(i, GFP_NOFS);
761                 if (bh == NULL)
762                         goto out;
763         } else
764                 bh = &bhs;
765
766         first_group = page->index * blocks_per_page / 2;
767
768         /* read all groups the page covers into the cache */
769         for (i = 0; i < groups_per_page; i++) {
770                 struct ext4_group_desc *desc;
771
772                 if (first_group + i >= EXT4_SB(sb)->s_groups_count)
773                         break;
774
775                 err = -EIO;
776                 desc = ext4_get_group_desc(sb, first_group + i, NULL);
777                 if (desc == NULL)
778                         goto out;
779
780                 err = -ENOMEM;
781                 bh[i] = sb_getblk(sb, ext4_block_bitmap(sb, desc));
782                 if (bh[i] == NULL)
783                         goto out;
784
785                 if (buffer_uptodate(bh[i]) &&
786                     !(desc->bg_flags & cpu_to_le16(EXT4_BG_BLOCK_UNINIT)))
787                         continue;
788
789                 lock_buffer(bh[i]);
790                 spin_lock(sb_bgl_lock(EXT4_SB(sb), first_group + i));
791                 if (desc->bg_flags & cpu_to_le16(EXT4_BG_BLOCK_UNINIT)) {
792                         ext4_init_block_bitmap(sb, bh[i],
793                                                 first_group + i, desc);
794                         set_buffer_uptodate(bh[i]);
795                         unlock_buffer(bh[i]);
796                         spin_unlock(sb_bgl_lock(EXT4_SB(sb), first_group + i));
797                         continue;
798                 }
799                 spin_unlock(sb_bgl_lock(EXT4_SB(sb), first_group + i));
800                 get_bh(bh[i]);
801                 bh[i]->b_end_io = end_buffer_read_sync;
802                 submit_bh(READ, bh[i]);
803                 mb_debug("read bitmap for group %lu\n", first_group + i);
804         }
805
806         /* wait for I/O completion */
807         for (i = 0; i < groups_per_page && bh[i]; i++)
808                 wait_on_buffer(bh[i]);
809
810         err = -EIO;
811         for (i = 0; i < groups_per_page && bh[i]; i++)
812                 if (!buffer_uptodate(bh[i]))
813                         goto out;
814
815         err = 0;
816         first_block = page->index * blocks_per_page;
817         for (i = 0; i < blocks_per_page; i++) {
818                 int group;
819                 struct ext4_group_info *grinfo;
820
821                 group = (first_block + i) >> 1;
822                 if (group >= EXT4_SB(sb)->s_groups_count)
823                         break;
824
825                 /*
826                  * data carry information regarding this
827                  * particular group in the format specified
828                  * above
829                  *
830                  */
831                 data = page_address(page) + (i * blocksize);
832                 bitmap = bh[group - first_group]->b_data;
833
834                 /*
835                  * We place the buddy block and bitmap block
836                  * close together
837                  */
838                 if ((first_block + i) & 1) {
839                         /* this is block of buddy */
840                         BUG_ON(incore == NULL);
841                         mb_debug("put buddy for group %u in page %lu/%x\n",
842                                 group, page->index, i * blocksize);
843                         memset(data, 0xff, blocksize);
844                         grinfo = ext4_get_group_info(sb, group);
845                         grinfo->bb_fragments = 0;
846                         memset(grinfo->bb_counters, 0,
847                                sizeof(unsigned short)*(sb->s_blocksize_bits+2));
848                         /*
849                          * incore got set to the group block bitmap below
850                          */
851                         ext4_mb_generate_buddy(sb, data, incore, group);
852                         incore = NULL;
853                 } else {
854                         /* this is block of bitmap */
855                         BUG_ON(incore != NULL);
856                         mb_debug("put bitmap for group %u in page %lu/%x\n",
857                                 group, page->index, i * blocksize);
858
859                         /* see comments in ext4_mb_put_pa() */
860                         ext4_lock_group(sb, group);
861                         memcpy(data, bitmap, blocksize);
862
863                         /* mark all preallocated blks used in in-core bitmap */
864                         ext4_mb_generate_from_pa(sb, data, group);
865                         ext4_unlock_group(sb, group);
866
867                         /* set incore so that the buddy information can be
868                          * generated using this
869                          */
870                         incore = data;
871                 }
872         }
873         SetPageUptodate(page);
874
875 out:
876         if (bh) {
877                 for (i = 0; i < groups_per_page && bh[i]; i++)
878                         brelse(bh[i]);
879                 if (bh != &bhs)
880                         kfree(bh);
881         }
882         return err;
883 }
884
885 static noinline_for_stack int
886 ext4_mb_load_buddy(struct super_block *sb, ext4_group_t group,
887                                         struct ext4_buddy *e4b)
888 {
889         struct ext4_sb_info *sbi = EXT4_SB(sb);
890         struct inode *inode = sbi->s_buddy_cache;
891         int blocks_per_page;
892         int block;
893         int pnum;
894         int poff;
895         struct page *page;
896         int ret;
897
898         mb_debug("load group %lu\n", group);
899
900         blocks_per_page = PAGE_CACHE_SIZE / sb->s_blocksize;
901
902         e4b->bd_blkbits = sb->s_blocksize_bits;
903         e4b->bd_info = ext4_get_group_info(sb, group);
904         e4b->bd_sb = sb;
905         e4b->bd_group = group;
906         e4b->bd_buddy_page = NULL;
907         e4b->bd_bitmap_page = NULL;
908
909         /*
910          * the buddy cache inode stores the block bitmap
911          * and buddy information in consecutive blocks.
912          * So for each group we need two blocks.
913          */
914         block = group * 2;
915         pnum = block / blocks_per_page;
916         poff = block % blocks_per_page;
917
918         /* we could use find_or_create_page(), but it locks page
919          * what we'd like to avoid in fast path ... */
920         page = find_get_page(inode->i_mapping, pnum);
921         if (page == NULL || !PageUptodate(page)) {
922                 if (page)
923                         page_cache_release(page);
924                 page = find_or_create_page(inode->i_mapping, pnum, GFP_NOFS);
925                 if (page) {
926                         BUG_ON(page->mapping != inode->i_mapping);
927                         if (!PageUptodate(page)) {
928                                 ret = ext4_mb_init_cache(page, NULL);
929                                 if (ret) {
930                                         unlock_page(page);
931                                         goto err;
932                                 }
933                                 mb_cmp_bitmaps(e4b, page_address(page) +
934                                                (poff * sb->s_blocksize));
935                         }
936                         unlock_page(page);
937                 }
938         }
939         if (page == NULL || !PageUptodate(page)) {
940                 ret = -EIO;
941                 goto err;
942         }
943         e4b->bd_bitmap_page = page;
944         e4b->bd_bitmap = page_address(page) + (poff * sb->s_blocksize);
945         mark_page_accessed(page);
946
947         block++;
948         pnum = block / blocks_per_page;
949         poff = block % blocks_per_page;
950
951         page = find_get_page(inode->i_mapping, pnum);
952         if (page == NULL || !PageUptodate(page)) {
953                 if (page)
954                         page_cache_release(page);
955                 page = find_or_create_page(inode->i_mapping, pnum, GFP_NOFS);
956                 if (page) {
957                         BUG_ON(page->mapping != inode->i_mapping);
958                         if (!PageUptodate(page)) {
959                                 ret = ext4_mb_init_cache(page, e4b->bd_bitmap);
960                                 if (ret) {
961                                         unlock_page(page);
962                                         goto err;
963                                 }
964                         }
965                         unlock_page(page);
966                 }
967         }
968         if (page == NULL || !PageUptodate(page)) {
969                 ret = -EIO;
970                 goto err;
971         }
972         e4b->bd_buddy_page = page;
973         e4b->bd_buddy = page_address(page) + (poff * sb->s_blocksize);
974         mark_page_accessed(page);
975
976         BUG_ON(e4b->bd_bitmap_page == NULL);
977         BUG_ON(e4b->bd_buddy_page == NULL);
978
979         return 0;
980
981 err:
982         if (e4b->bd_bitmap_page)
983                 page_cache_release(e4b->bd_bitmap_page);
984         if (e4b->bd_buddy_page)
985                 page_cache_release(e4b->bd_buddy_page);
986         e4b->bd_buddy = NULL;
987         e4b->bd_bitmap = NULL;
988         return ret;
989 }
990
991 static void ext4_mb_release_desc(struct ext4_buddy *e4b)
992 {
993         if (e4b->bd_bitmap_page)
994                 page_cache_release(e4b->bd_bitmap_page);
995         if (e4b->bd_buddy_page)
996                 page_cache_release(e4b->bd_buddy_page);
997 }
998
999
1000 static int mb_find_order_for_block(struct ext4_buddy *e4b, int block)
1001 {
1002         int order = 1;
1003         void *bb;
1004
1005         BUG_ON(EXT4_MB_BITMAP(e4b) == EXT4_MB_BUDDY(e4b));
1006         BUG_ON(block >= (1 << (e4b->bd_blkbits + 3)));
1007
1008         bb = EXT4_MB_BUDDY(e4b);
1009         while (order <= e4b->bd_blkbits + 1) {
1010                 block = block >> 1;
1011                 if (!mb_test_bit(block, bb)) {
1012                         /* this block is part of buddy of order 'order' */
1013                         return order;
1014                 }
1015                 bb += 1 << (e4b->bd_blkbits - order);
1016                 order++;
1017         }
1018         return 0;
1019 }
1020
1021 static void mb_clear_bits(spinlock_t *lock, void *bm, int cur, int len)
1022 {
1023         __u32 *addr;
1024
1025         len = cur + len;
1026         while (cur < len) {
1027                 if ((cur & 31) == 0 && (len - cur) >= 32) {
1028                         /* fast path: clear whole word at once */
1029                         addr = bm + (cur >> 3);
1030                         *addr = 0;
1031                         cur += 32;
1032                         continue;
1033                 }
1034                 mb_clear_bit_atomic(lock, cur, bm);
1035                 cur++;
1036         }
1037 }
1038
1039 static void mb_set_bits(spinlock_t *lock, void *bm, int cur, int len)
1040 {
1041         __u32 *addr;
1042
1043         len = cur + len;
1044         while (cur < len) {
1045                 if ((cur & 31) == 0 && (len - cur) >= 32) {
1046                         /* fast path: set whole word at once */
1047                         addr = bm + (cur >> 3);
1048                         *addr = 0xffffffff;
1049                         cur += 32;
1050                         continue;
1051                 }
1052                 mb_set_bit_atomic(lock, cur, bm);
1053                 cur++;
1054         }
1055 }
1056
1057 static void mb_free_blocks(struct inode *inode, struct ext4_buddy *e4b,
1058                           int first, int count)
1059 {
1060         int block = 0;
1061         int max = 0;
1062         int order;
1063         void *buddy;
1064         void *buddy2;
1065         struct super_block *sb = e4b->bd_sb;
1066
1067         BUG_ON(first + count > (sb->s_blocksize << 3));
1068         BUG_ON(!ext4_is_group_locked(sb, e4b->bd_group));
1069         mb_check_buddy(e4b);
1070         mb_free_blocks_double(inode, e4b, first, count);
1071
1072         e4b->bd_info->bb_free += count;
1073         if (first < e4b->bd_info->bb_first_free)
1074                 e4b->bd_info->bb_first_free = first;
1075
1076         /* let's maintain fragments counter */
1077         if (first != 0)
1078                 block = !mb_test_bit(first - 1, EXT4_MB_BITMAP(e4b));
1079         if (first + count < EXT4_SB(sb)->s_mb_maxs[0])
1080                 max = !mb_test_bit(first + count, EXT4_MB_BITMAP(e4b));
1081         if (block && max)
1082                 e4b->bd_info->bb_fragments--;
1083         else if (!block && !max)
1084                 e4b->bd_info->bb_fragments++;
1085
1086         /* let's maintain buddy itself */
1087         while (count-- > 0) {
1088                 block = first++;
1089                 order = 0;
1090
1091                 if (!mb_test_bit(block, EXT4_MB_BITMAP(e4b))) {
1092                         ext4_fsblk_t blocknr;
1093                         blocknr = e4b->bd_group * EXT4_BLOCKS_PER_GROUP(sb);
1094                         blocknr += block;
1095                         blocknr +=
1096                             le32_to_cpu(EXT4_SB(sb)->s_es->s_first_data_block);
1097                         ext4_unlock_group(sb, e4b->bd_group);
1098                         ext4_error(sb, __func__, "double-free of inode"
1099                                    " %lu's block %llu(bit %u in group %lu)\n",
1100                                    inode ? inode->i_ino : 0, blocknr, block,
1101                                    e4b->bd_group);
1102                         ext4_lock_group(sb, e4b->bd_group);
1103                 }
1104                 mb_clear_bit(block, EXT4_MB_BITMAP(e4b));
1105                 e4b->bd_info->bb_counters[order]++;
1106
1107                 /* start of the buddy */
1108                 buddy = mb_find_buddy(e4b, order, &max);
1109
1110                 do {
1111                         block &= ~1UL;
1112                         if (mb_test_bit(block, buddy) ||
1113                                         mb_test_bit(block + 1, buddy))
1114                                 break;
1115
1116                         /* both the buddies are free, try to coalesce them */
1117                         buddy2 = mb_find_buddy(e4b, order + 1, &max);
1118
1119                         if (!buddy2)
1120                                 break;
1121
1122                         if (order > 0) {
1123                                 /* for special purposes, we don't set
1124                                  * free bits in bitmap */
1125                                 mb_set_bit(block, buddy);
1126                                 mb_set_bit(block + 1, buddy);
1127                         }
1128                         e4b->bd_info->bb_counters[order]--;
1129                         e4b->bd_info->bb_counters[order]--;
1130
1131                         block = block >> 1;
1132                         order++;
1133                         e4b->bd_info->bb_counters[order]++;
1134
1135                         mb_clear_bit(block, buddy2);
1136                         buddy = buddy2;
1137                 } while (1);
1138         }
1139         mb_check_buddy(e4b);
1140 }
1141
1142 static int mb_find_extent(struct ext4_buddy *e4b, int order, int block,
1143                                 int needed, struct ext4_free_extent *ex)
1144 {
1145         int next = block;
1146         int max;
1147         int ord;
1148         void *buddy;
1149
1150         BUG_ON(!ext4_is_group_locked(e4b->bd_sb, e4b->bd_group));
1151         BUG_ON(ex == NULL);
1152
1153         buddy = mb_find_buddy(e4b, order, &max);
1154         BUG_ON(buddy == NULL);
1155         BUG_ON(block >= max);
1156         if (mb_test_bit(block, buddy)) {
1157                 ex->fe_len = 0;
1158                 ex->fe_start = 0;
1159                 ex->fe_group = 0;
1160                 return 0;
1161         }
1162
1163         /* FIXME dorp order completely ? */
1164         if (likely(order == 0)) {
1165                 /* find actual order */
1166                 order = mb_find_order_for_block(e4b, block);
1167                 block = block >> order;
1168         }
1169
1170         ex->fe_len = 1 << order;
1171         ex->fe_start = block << order;
1172         ex->fe_group = e4b->bd_group;
1173
1174         /* calc difference from given start */
1175         next = next - ex->fe_start;
1176         ex->fe_len -= next;
1177         ex->fe_start += next;
1178
1179         while (needed > ex->fe_len &&
1180                (buddy = mb_find_buddy(e4b, order, &max))) {
1181
1182                 if (block + 1 >= max)
1183                         break;
1184
1185                 next = (block + 1) * (1 << order);
1186                 if (mb_test_bit(next, EXT4_MB_BITMAP(e4b)))
1187                         break;
1188
1189                 ord = mb_find_order_for_block(e4b, next);
1190
1191                 order = ord;
1192                 block = next >> order;
1193                 ex->fe_len += 1 << order;
1194         }
1195
1196         BUG_ON(ex->fe_start + ex->fe_len > (1 << (e4b->bd_blkbits + 3)));
1197         return ex->fe_len;
1198 }
1199
1200 static int mb_mark_used(struct ext4_buddy *e4b, struct ext4_free_extent *ex)
1201 {
1202         int ord;
1203         int mlen = 0;
1204         int max = 0;
1205         int cur;
1206         int start = ex->fe_start;
1207         int len = ex->fe_len;
1208         unsigned ret = 0;
1209         int len0 = len;
1210         void *buddy;
1211
1212         BUG_ON(start + len > (e4b->bd_sb->s_blocksize << 3));
1213         BUG_ON(e4b->bd_group != ex->fe_group);
1214         BUG_ON(!ext4_is_group_locked(e4b->bd_sb, e4b->bd_group));
1215         mb_check_buddy(e4b);
1216         mb_mark_used_double(e4b, start, len);
1217
1218         e4b->bd_info->bb_free -= len;
1219         if (e4b->bd_info->bb_first_free == start)
1220                 e4b->bd_info->bb_first_free += len;
1221
1222         /* let's maintain fragments counter */
1223         if (start != 0)
1224                 mlen = !mb_test_bit(start - 1, EXT4_MB_BITMAP(e4b));
1225         if (start + len < EXT4_SB(e4b->bd_sb)->s_mb_maxs[0])
1226                 max = !mb_test_bit(start + len, EXT4_MB_BITMAP(e4b));
1227         if (mlen && max)
1228                 e4b->bd_info->bb_fragments++;
1229         else if (!mlen && !max)
1230                 e4b->bd_info->bb_fragments--;
1231
1232         /* let's maintain buddy itself */
1233         while (len) {
1234                 ord = mb_find_order_for_block(e4b, start);
1235
1236                 if (((start >> ord) << ord) == start && len >= (1 << ord)) {
1237                         /* the whole chunk may be allocated at once! */
1238                         mlen = 1 << ord;
1239                         buddy = mb_find_buddy(e4b, ord, &max);
1240                         BUG_ON((start >> ord) >= max);
1241                         mb_set_bit(start >> ord, buddy);
1242                         e4b->bd_info->bb_counters[ord]--;
1243                         start += mlen;
1244                         len -= mlen;
1245                         BUG_ON(len < 0);
1246                         continue;
1247                 }
1248
1249                 /* store for history */
1250                 if (ret == 0)
1251                         ret = len | (ord << 16);
1252
1253                 /* we have to split large buddy */
1254                 BUG_ON(ord <= 0);
1255                 buddy = mb_find_buddy(e4b, ord, &max);
1256                 mb_set_bit(start >> ord, buddy);
1257                 e4b->bd_info->bb_counters[ord]--;
1258
1259                 ord--;
1260                 cur = (start >> ord) & ~1U;
1261                 buddy = mb_find_buddy(e4b, ord, &max);
1262                 mb_clear_bit(cur, buddy);
1263                 mb_clear_bit(cur + 1, buddy);
1264                 e4b->bd_info->bb_counters[ord]++;
1265                 e4b->bd_info->bb_counters[ord]++;
1266         }
1267
1268         mb_set_bits(sb_bgl_lock(EXT4_SB(e4b->bd_sb), ex->fe_group),
1269                         EXT4_MB_BITMAP(e4b), ex->fe_start, len0);
1270         mb_check_buddy(e4b);
1271
1272         return ret;
1273 }
1274
1275 /*
1276  * Must be called under group lock!
1277  */
1278 static void ext4_mb_use_best_found(struct ext4_allocation_context *ac,
1279                                         struct ext4_buddy *e4b)
1280 {
1281         struct ext4_sb_info *sbi = EXT4_SB(ac->ac_sb);
1282         int ret;
1283
1284         BUG_ON(ac->ac_b_ex.fe_group != e4b->bd_group);
1285         BUG_ON(ac->ac_status == AC_STATUS_FOUND);
1286
1287         ac->ac_b_ex.fe_len = min(ac->ac_b_ex.fe_len, ac->ac_g_ex.fe_len);
1288         ac->ac_b_ex.fe_logical = ac->ac_g_ex.fe_logical;
1289         ret = mb_mark_used(e4b, &ac->ac_b_ex);
1290
1291         /* preallocation can change ac_b_ex, thus we store actually
1292          * allocated blocks for history */
1293         ac->ac_f_ex = ac->ac_b_ex;
1294
1295         ac->ac_status = AC_STATUS_FOUND;
1296         ac->ac_tail = ret & 0xffff;
1297         ac->ac_buddy = ret >> 16;
1298
1299         /* XXXXXXX: SUCH A HORRIBLE **CK */
1300         /*FIXME!! Why ? */
1301         ac->ac_bitmap_page = e4b->bd_bitmap_page;
1302         get_page(ac->ac_bitmap_page);
1303         ac->ac_buddy_page = e4b->bd_buddy_page;
1304         get_page(ac->ac_buddy_page);
1305
1306         /* store last allocated for subsequent stream allocation */
1307         if ((ac->ac_flags & EXT4_MB_HINT_DATA)) {
1308                 spin_lock(&sbi->s_md_lock);
1309                 sbi->s_mb_last_group = ac->ac_f_ex.fe_group;
1310                 sbi->s_mb_last_start = ac->ac_f_ex.fe_start;
1311                 spin_unlock(&sbi->s_md_lock);
1312         }
1313 }
1314
1315 /*
1316  * regular allocator, for general purposes allocation
1317  */
1318
1319 static void ext4_mb_check_limits(struct ext4_allocation_context *ac,
1320                                         struct ext4_buddy *e4b,
1321                                         int finish_group)
1322 {
1323         struct ext4_sb_info *sbi = EXT4_SB(ac->ac_sb);
1324         struct ext4_free_extent *bex = &ac->ac_b_ex;
1325         struct ext4_free_extent *gex = &ac->ac_g_ex;
1326         struct ext4_free_extent ex;
1327         int max;
1328
1329         /*
1330          * We don't want to scan for a whole year
1331          */
1332         if (ac->ac_found > sbi->s_mb_max_to_scan &&
1333                         !(ac->ac_flags & EXT4_MB_HINT_FIRST)) {
1334                 ac->ac_status = AC_STATUS_BREAK;
1335                 return;
1336         }
1337
1338         /*
1339          * Haven't found good chunk so far, let's continue
1340          */
1341         if (bex->fe_len < gex->fe_len)
1342                 return;
1343
1344         if ((finish_group || ac->ac_found > sbi->s_mb_min_to_scan)
1345                         && bex->fe_group == e4b->bd_group) {
1346                 /* recheck chunk's availability - we don't know
1347                  * when it was found (within this lock-unlock
1348                  * period or not) */
1349                 max = mb_find_extent(e4b, 0, bex->fe_start, gex->fe_len, &ex);
1350                 if (max >= gex->fe_len) {
1351                         ext4_mb_use_best_found(ac, e4b);
1352                         return;
1353                 }
1354         }
1355 }
1356
1357 /*
1358  * The routine checks whether found extent is good enough. If it is,
1359  * then the extent gets marked used and flag is set to the context
1360  * to stop scanning. Otherwise, the extent is compared with the
1361  * previous found extent and if new one is better, then it's stored
1362  * in the context. Later, the best found extent will be used, if
1363  * mballoc can't find good enough extent.
1364  *
1365  * FIXME: real allocation policy is to be designed yet!
1366  */
1367 static void ext4_mb_measure_extent(struct ext4_allocation_context *ac,
1368                                         struct ext4_free_extent *ex,
1369                                         struct ext4_buddy *e4b)
1370 {
1371         struct ext4_free_extent *bex = &ac->ac_b_ex;
1372         struct ext4_free_extent *gex = &ac->ac_g_ex;
1373
1374         BUG_ON(ex->fe_len <= 0);
1375         BUG_ON(ex->fe_len >= EXT4_BLOCKS_PER_GROUP(ac->ac_sb));
1376         BUG_ON(ex->fe_start >= EXT4_BLOCKS_PER_GROUP(ac->ac_sb));
1377         BUG_ON(ac->ac_status != AC_STATUS_CONTINUE);
1378
1379         ac->ac_found++;
1380
1381         /*
1382          * The special case - take what you catch first
1383          */
1384         if (unlikely(ac->ac_flags & EXT4_MB_HINT_FIRST)) {
1385                 *bex = *ex;
1386                 ext4_mb_use_best_found(ac, e4b);
1387                 return;
1388         }
1389
1390         /*
1391          * Let's check whether the chuck is good enough
1392          */
1393         if (ex->fe_len == gex->fe_len) {
1394                 *bex = *ex;
1395                 ext4_mb_use_best_found(ac, e4b);
1396                 return;
1397         }
1398
1399         /*
1400          * If this is first found extent, just store it in the context
1401          */
1402         if (bex->fe_len == 0) {
1403                 *bex = *ex;
1404                 return;
1405         }
1406
1407         /*
1408          * If new found extent is better, store it in the context
1409          */
1410         if (bex->fe_len < gex->fe_len) {
1411                 /* if the request isn't satisfied, any found extent
1412                  * larger than previous best one is better */
1413                 if (ex->fe_len > bex->fe_len)
1414                         *bex = *ex;
1415         } else if (ex->fe_len > gex->fe_len) {
1416                 /* if the request is satisfied, then we try to find
1417                  * an extent that still satisfy the request, but is
1418                  * smaller than previous one */
1419                 if (ex->fe_len < bex->fe_len)
1420                         *bex = *ex;
1421         }
1422
1423         ext4_mb_check_limits(ac, e4b, 0);
1424 }
1425
1426 static int ext4_mb_try_best_found(struct ext4_allocation_context *ac,
1427                                         struct ext4_buddy *e4b)
1428 {
1429         struct ext4_free_extent ex = ac->ac_b_ex;
1430         ext4_group_t group = ex.fe_group;
1431         int max;
1432         int err;
1433
1434         BUG_ON(ex.fe_len <= 0);
1435         err = ext4_mb_load_buddy(ac->ac_sb, group, e4b);
1436         if (err)
1437                 return err;
1438
1439         ext4_lock_group(ac->ac_sb, group);
1440         max = mb_find_extent(e4b, 0, ex.fe_start, ex.fe_len, &ex);
1441
1442         if (max > 0) {
1443                 ac->ac_b_ex = ex;
1444                 ext4_mb_use_best_found(ac, e4b);
1445         }
1446
1447         ext4_unlock_group(ac->ac_sb, group);
1448         ext4_mb_release_desc(e4b);
1449
1450         return 0;
1451 }
1452
1453 static int ext4_mb_find_by_goal(struct ext4_allocation_context *ac,
1454                                 struct ext4_buddy *e4b)
1455 {
1456         ext4_group_t group = ac->ac_g_ex.fe_group;
1457         int max;
1458         int err;
1459         struct ext4_sb_info *sbi = EXT4_SB(ac->ac_sb);
1460         struct ext4_super_block *es = sbi->s_es;
1461         struct ext4_free_extent ex;
1462
1463         if (!(ac->ac_flags & EXT4_MB_HINT_TRY_GOAL))
1464                 return 0;
1465
1466         err = ext4_mb_load_buddy(ac->ac_sb, group, e4b);
1467         if (err)
1468                 return err;
1469
1470         ext4_lock_group(ac->ac_sb, group);
1471         max = mb_find_extent(e4b, 0, ac->ac_g_ex.fe_start,
1472                              ac->ac_g_ex.fe_len, &ex);
1473
1474         if (max >= ac->ac_g_ex.fe_len && ac->ac_g_ex.fe_len == sbi->s_stripe) {
1475                 ext4_fsblk_t start;
1476
1477                 start = (e4b->bd_group * EXT4_BLOCKS_PER_GROUP(ac->ac_sb)) +
1478                         ex.fe_start + le32_to_cpu(es->s_first_data_block);
1479                 /* use do_div to get remainder (would be 64-bit modulo) */
1480                 if (do_div(start, sbi->s_stripe) == 0) {
1481                         ac->ac_found++;
1482                         ac->ac_b_ex = ex;
1483                         ext4_mb_use_best_found(ac, e4b);
1484                 }
1485         } else if (max >= ac->ac_g_ex.fe_len) {
1486                 BUG_ON(ex.fe_len <= 0);
1487                 BUG_ON(ex.fe_group != ac->ac_g_ex.fe_group);
1488                 BUG_ON(ex.fe_start != ac->ac_g_ex.fe_start);
1489                 ac->ac_found++;
1490                 ac->ac_b_ex = ex;
1491                 ext4_mb_use_best_found(ac, e4b);
1492         } else if (max > 0 && (ac->ac_flags & EXT4_MB_HINT_MERGE)) {
1493                 /* Sometimes, caller may want to merge even small
1494                  * number of blocks to an existing extent */
1495                 BUG_ON(ex.fe_len <= 0);
1496                 BUG_ON(ex.fe_group != ac->ac_g_ex.fe_group);
1497                 BUG_ON(ex.fe_start != ac->ac_g_ex.fe_start);
1498                 ac->ac_found++;
1499                 ac->ac_b_ex = ex;
1500                 ext4_mb_use_best_found(ac, e4b);
1501         }
1502         ext4_unlock_group(ac->ac_sb, group);
1503         ext4_mb_release_desc(e4b);
1504
1505         return 0;
1506 }
1507
1508 /*
1509  * The routine scans buddy structures (not bitmap!) from given order
1510  * to max order and tries to find big enough chunk to satisfy the req
1511  */
1512 static void ext4_mb_simple_scan_group(struct ext4_allocation_context *ac,
1513                                         struct ext4_buddy *e4b)
1514 {
1515         struct super_block *sb = ac->ac_sb;
1516         struct ext4_group_info *grp = e4b->bd_info;
1517         void *buddy;
1518         int i;
1519         int k;
1520         int max;
1521
1522         BUG_ON(ac->ac_2order <= 0);
1523         for (i = ac->ac_2order; i <= sb->s_blocksize_bits + 1; i++) {
1524                 if (grp->bb_counters[i] == 0)
1525                         continue;
1526
1527                 buddy = mb_find_buddy(e4b, i, &max);
1528                 BUG_ON(buddy == NULL);
1529
1530                 k = mb_find_next_zero_bit(buddy, max, 0);
1531                 BUG_ON(k >= max);
1532
1533                 ac->ac_found++;
1534
1535                 ac->ac_b_ex.fe_len = 1 << i;
1536                 ac->ac_b_ex.fe_start = k << i;
1537                 ac->ac_b_ex.fe_group = e4b->bd_group;
1538
1539                 ext4_mb_use_best_found(ac, e4b);
1540
1541                 BUG_ON(ac->ac_b_ex.fe_len != ac->ac_g_ex.fe_len);
1542
1543                 if (EXT4_SB(sb)->s_mb_stats)
1544                         atomic_inc(&EXT4_SB(sb)->s_bal_2orders);
1545
1546                 break;
1547         }
1548 }
1549
1550 /*
1551  * The routine scans the group and measures all found extents.
1552  * In order to optimize scanning, caller must pass number of
1553  * free blocks in the group, so the routine can know upper limit.
1554  */
1555 static void ext4_mb_complex_scan_group(struct ext4_allocation_context *ac,
1556                                         struct ext4_buddy *e4b)
1557 {
1558         struct super_block *sb = ac->ac_sb;
1559         void *bitmap = EXT4_MB_BITMAP(e4b);
1560         struct ext4_free_extent ex;
1561         int i;
1562         int free;
1563
1564         free = e4b->bd_info->bb_free;
1565         BUG_ON(free <= 0);
1566
1567         i = e4b->bd_info->bb_first_free;
1568
1569         while (free && ac->ac_status == AC_STATUS_CONTINUE) {
1570                 i = mb_find_next_zero_bit(bitmap,
1571                                                 EXT4_BLOCKS_PER_GROUP(sb), i);
1572                 if (i >= EXT4_BLOCKS_PER_GROUP(sb)) {
1573                         /*
1574                          * IF we have corrupt bitmap, we won't find any
1575                          * free blocks even though group info says we
1576                          * we have free blocks
1577                          */
1578                         ext4_error(sb, __func__, "%d free blocks as per "
1579                                         "group info. But bitmap says 0\n",
1580                                         free);
1581                         break;
1582                 }
1583
1584                 mb_find_extent(e4b, 0, i, ac->ac_g_ex.fe_len, &ex);
1585                 BUG_ON(ex.fe_len <= 0);
1586                 if (free < ex.fe_len) {
1587                         ext4_error(sb, __func__, "%d free blocks as per "
1588                                         "group info. But got %d blocks\n",
1589                                         free, ex.fe_len);
1590                         /*
1591                          * The number of free blocks differs. This mostly
1592                          * indicate that the bitmap is corrupt. So exit
1593                          * without claiming the space.
1594                          */
1595                         break;
1596                 }
1597
1598                 ext4_mb_measure_extent(ac, &ex, e4b);
1599
1600                 i += ex.fe_len;
1601                 free -= ex.fe_len;
1602         }
1603
1604         ext4_mb_check_limits(ac, e4b, 1);
1605 }
1606
1607 /*
1608  * This is a special case for storages like raid5
1609  * we try to find stripe-aligned chunks for stripe-size requests
1610  * XXX should do so at least for multiples of stripe size as well
1611  */
1612 static void ext4_mb_scan_aligned(struct ext4_allocation_context *ac,
1613                                  struct ext4_buddy *e4b)
1614 {
1615         struct super_block *sb = ac->ac_sb;
1616         struct ext4_sb_info *sbi = EXT4_SB(sb);
1617         void *bitmap = EXT4_MB_BITMAP(e4b);
1618         struct ext4_free_extent ex;
1619         ext4_fsblk_t first_group_block;
1620         ext4_fsblk_t a;
1621         ext4_grpblk_t i;
1622         int max;
1623
1624         BUG_ON(sbi->s_stripe == 0);
1625
1626         /* find first stripe-aligned block in group */
1627         first_group_block = e4b->bd_group * EXT4_BLOCKS_PER_GROUP(sb)
1628                 + le32_to_cpu(sbi->s_es->s_first_data_block);
1629         a = first_group_block + sbi->s_stripe - 1;
1630         do_div(a, sbi->s_stripe);
1631         i = (a * sbi->s_stripe) - first_group_block;
1632
1633         while (i < EXT4_BLOCKS_PER_GROUP(sb)) {
1634                 if (!mb_test_bit(i, bitmap)) {
1635                         max = mb_find_extent(e4b, 0, i, sbi->s_stripe, &ex);
1636                         if (max >= sbi->s_stripe) {
1637                                 ac->ac_found++;
1638                                 ac->ac_b_ex = ex;
1639                                 ext4_mb_use_best_found(ac, e4b);
1640                                 break;
1641                         }
1642                 }
1643                 i += sbi->s_stripe;
1644         }
1645 }
1646
1647 static int ext4_mb_good_group(struct ext4_allocation_context *ac,
1648                                 ext4_group_t group, int cr)
1649 {
1650         unsigned free, fragments;
1651         unsigned i, bits;
1652         struct ext4_group_desc *desc;
1653         struct ext4_group_info *grp = ext4_get_group_info(ac->ac_sb, group);
1654
1655         BUG_ON(cr < 0 || cr >= 4);
1656         BUG_ON(EXT4_MB_GRP_NEED_INIT(grp));
1657
1658         free = grp->bb_free;
1659         fragments = grp->bb_fragments;
1660         if (free == 0)
1661                 return 0;
1662         if (fragments == 0)
1663                 return 0;
1664
1665         switch (cr) {
1666         case 0:
1667                 BUG_ON(ac->ac_2order == 0);
1668                 /* If this group is uninitialized, skip it initially */
1669                 desc = ext4_get_group_desc(ac->ac_sb, group, NULL);
1670                 if (desc->bg_flags & cpu_to_le16(EXT4_BG_BLOCK_UNINIT))
1671                         return 0;
1672
1673                 bits = ac->ac_sb->s_blocksize_bits + 1;
1674                 for (i = ac->ac_2order; i <= bits; i++)
1675                         if (grp->bb_counters[i] > 0)
1676                                 return 1;
1677                 break;
1678         case 1:
1679                 if ((free / fragments) >= ac->ac_g_ex.fe_len)
1680                         return 1;
1681                 break;
1682         case 2:
1683                 if (free >= ac->ac_g_ex.fe_len)
1684                         return 1;
1685                 break;
1686         case 3:
1687                 return 1;
1688         default:
1689                 BUG();
1690         }
1691
1692         return 0;
1693 }
1694
1695 static noinline_for_stack int
1696 ext4_mb_regular_allocator(struct ext4_allocation_context *ac)
1697 {
1698         ext4_group_t group;
1699         ext4_group_t i;
1700         int cr;
1701         int err = 0;
1702         int bsbits;
1703         struct ext4_sb_info *sbi;
1704         struct super_block *sb;
1705         struct ext4_buddy e4b;
1706         loff_t size, isize;
1707
1708         sb = ac->ac_sb;
1709         sbi = EXT4_SB(sb);
1710         BUG_ON(ac->ac_status == AC_STATUS_FOUND);
1711
1712         /* first, try the goal */
1713         err = ext4_mb_find_by_goal(ac, &e4b);
1714         if (err || ac->ac_status == AC_STATUS_FOUND)
1715                 goto out;
1716
1717         if (unlikely(ac->ac_flags & EXT4_MB_HINT_GOAL_ONLY))
1718                 goto out;
1719
1720         /*
1721          * ac->ac2_order is set only if the fe_len is a power of 2
1722          * if ac2_order is set we also set criteria to 0 so that we
1723          * try exact allocation using buddy.
1724          */
1725         i = fls(ac->ac_g_ex.fe_len);
1726         ac->ac_2order = 0;
1727         /*
1728          * We search using buddy data only if the order of the request
1729          * is greater than equal to the sbi_s_mb_order2_reqs
1730          * You can tune it via /proc/fs/ext4/<partition>/order2_req
1731          */
1732         if (i >= sbi->s_mb_order2_reqs) {
1733                 /*
1734                  * This should tell if fe_len is exactly power of 2
1735                  */
1736                 if ((ac->ac_g_ex.fe_len & (~(1 << (i - 1)))) == 0)
1737                         ac->ac_2order = i - 1;
1738         }
1739
1740         bsbits = ac->ac_sb->s_blocksize_bits;
1741         /* if stream allocation is enabled, use global goal */
1742         size = ac->ac_o_ex.fe_logical + ac->ac_o_ex.fe_len;
1743         isize = i_size_read(ac->ac_inode) >> bsbits;
1744         if (size < isize)
1745                 size = isize;
1746
1747         if (size < sbi->s_mb_stream_request &&
1748                         (ac->ac_flags & EXT4_MB_HINT_DATA)) {
1749                 /* TBD: may be hot point */
1750                 spin_lock(&sbi->s_md_lock);
1751                 ac->ac_g_ex.fe_group = sbi->s_mb_last_group;
1752                 ac->ac_g_ex.fe_start = sbi->s_mb_last_start;
1753                 spin_unlock(&sbi->s_md_lock);
1754         }
1755         /* Let's just scan groups to find more-less suitable blocks */
1756         cr = ac->ac_2order ? 0 : 1;
1757         /*
1758          * cr == 0 try to get exact allocation,
1759          * cr == 3  try to get anything
1760          */
1761 repeat:
1762         for (; cr < 4 && ac->ac_status == AC_STATUS_CONTINUE; cr++) {
1763                 ac->ac_criteria = cr;
1764                 /*
1765                  * searching for the right group start
1766                  * from the goal value specified
1767                  */
1768                 group = ac->ac_g_ex.fe_group;
1769
1770                 for (i = 0; i < EXT4_SB(sb)->s_groups_count; group++, i++) {
1771                         struct ext4_group_info *grp;
1772                         struct ext4_group_desc *desc;
1773
1774                         if (group == EXT4_SB(sb)->s_groups_count)
1775                                 group = 0;
1776
1777                         /* quick check to skip empty groups */
1778                         grp = ext4_get_group_info(ac->ac_sb, group);
1779                         if (grp->bb_free == 0)
1780                                 continue;
1781
1782                         /*
1783                          * if the group is already init we check whether it is
1784                          * a good group and if not we don't load the buddy
1785                          */
1786                         if (EXT4_MB_GRP_NEED_INIT(grp)) {
1787                                 /*
1788                                  * we need full data about the group
1789                                  * to make a good selection
1790                                  */
1791                                 err = ext4_mb_load_buddy(sb, group, &e4b);
1792                                 if (err)
1793                                         goto out;
1794                                 ext4_mb_release_desc(&e4b);
1795                         }
1796
1797                         /*
1798                          * If the particular group doesn't satisfy our
1799                          * criteria we continue with the next group
1800                          */
1801                         if (!ext4_mb_good_group(ac, group, cr))
1802                                 continue;
1803
1804                         err = ext4_mb_load_buddy(sb, group, &e4b);
1805                         if (err)
1806                                 goto out;
1807
1808                         ext4_lock_group(sb, group);
1809                         if (!ext4_mb_good_group(ac, group, cr)) {
1810                                 /* someone did allocation from this group */
1811                                 ext4_unlock_group(sb, group);
1812                                 ext4_mb_release_desc(&e4b);
1813                                 continue;
1814                         }
1815
1816                         ac->ac_groups_scanned++;
1817                         desc = ext4_get_group_desc(sb, group, NULL);
1818                         if (cr == 0 || (desc->bg_flags &
1819                                         cpu_to_le16(EXT4_BG_BLOCK_UNINIT) &&
1820                                         ac->ac_2order != 0))
1821                                 ext4_mb_simple_scan_group(ac, &e4b);
1822                         else if (cr == 1 &&
1823                                         ac->ac_g_ex.fe_len == sbi->s_stripe)
1824                                 ext4_mb_scan_aligned(ac, &e4b);
1825                         else
1826                                 ext4_mb_complex_scan_group(ac, &e4b);
1827
1828                         ext4_unlock_group(sb, group);
1829                         ext4_mb_release_desc(&e4b);
1830
1831                         if (ac->ac_status != AC_STATUS_CONTINUE)
1832                                 break;
1833                 }
1834         }
1835
1836         if (ac->ac_b_ex.fe_len > 0 && ac->ac_status != AC_STATUS_FOUND &&
1837             !(ac->ac_flags & EXT4_MB_HINT_FIRST)) {
1838                 /*
1839                  * We've been searching too long. Let's try to allocate
1840                  * the best chunk we've found so far
1841                  */
1842
1843                 ext4_mb_try_best_found(ac, &e4b);
1844                 if (ac->ac_status != AC_STATUS_FOUND) {
1845                         /*
1846                          * Someone more lucky has already allocated it.
1847                          * The only thing we can do is just take first
1848                          * found block(s)
1849                         printk(KERN_DEBUG "EXT4-fs: someone won our chunk\n");
1850                          */
1851                         ac->ac_b_ex.fe_group = 0;
1852                         ac->ac_b_ex.fe_start = 0;
1853                         ac->ac_b_ex.fe_len = 0;
1854                         ac->ac_status = AC_STATUS_CONTINUE;
1855                         ac->ac_flags |= EXT4_MB_HINT_FIRST;
1856                         cr = 3;
1857                         atomic_inc(&sbi->s_mb_lost_chunks);
1858                         goto repeat;
1859                 }
1860         }
1861 out:
1862         return err;
1863 }
1864
1865 #ifdef EXT4_MB_HISTORY
1866 struct ext4_mb_proc_session {
1867         struct ext4_mb_history *history;
1868         struct super_block *sb;
1869         int start;
1870         int max;
1871 };
1872
1873 static void *ext4_mb_history_skip_empty(struct ext4_mb_proc_session *s,
1874                                         struct ext4_mb_history *hs,
1875                                         int first)
1876 {
1877         if (hs == s->history + s->max)
1878                 hs = s->history;
1879         if (!first && hs == s->history + s->start)
1880                 return NULL;
1881         while (hs->orig.fe_len == 0) {
1882                 hs++;
1883                 if (hs == s->history + s->max)
1884                         hs = s->history;
1885                 if (hs == s->history + s->start)
1886                         return NULL;
1887         }
1888         return hs;
1889 }
1890
1891 static void *ext4_mb_seq_history_start(struct seq_file *seq, loff_t *pos)
1892 {
1893         struct ext4_mb_proc_session *s = seq->private;
1894         struct ext4_mb_history *hs;
1895         int l = *pos;
1896
1897         if (l == 0)
1898                 return SEQ_START_TOKEN;
1899         hs = ext4_mb_history_skip_empty(s, s->history + s->start, 1);
1900         if (!hs)
1901                 return NULL;
1902         while (--l && (hs = ext4_mb_history_skip_empty(s, ++hs, 0)) != NULL);
1903         return hs;
1904 }
1905
1906 static void *ext4_mb_seq_history_next(struct seq_file *seq, void *v,
1907                                       loff_t *pos)
1908 {
1909         struct ext4_mb_proc_session *s = seq->private;
1910         struct ext4_mb_history *hs = v;
1911
1912         ++*pos;
1913         if (v == SEQ_START_TOKEN)
1914                 return ext4_mb_history_skip_empty(s, s->history + s->start, 1);
1915         else
1916                 return ext4_mb_history_skip_empty(s, ++hs, 0);
1917 }
1918
1919 static int ext4_mb_seq_history_show(struct seq_file *seq, void *v)
1920 {
1921         char buf[25], buf2[25], buf3[25], *fmt;
1922         struct ext4_mb_history *hs = v;
1923
1924         if (v == SEQ_START_TOKEN) {
1925                 seq_printf(seq, "%-5s %-8s %-23s %-23s %-23s %-5s "
1926                                 "%-5s %-2s %-5s %-5s %-5s %-6s\n",
1927                           "pid", "inode", "original", "goal", "result", "found",
1928                            "grps", "cr", "flags", "merge", "tail", "broken");
1929                 return 0;
1930         }
1931
1932         if (hs->op == EXT4_MB_HISTORY_ALLOC) {
1933                 fmt = "%-5u %-8u %-23s %-23s %-23s %-5u %-5u %-2u "
1934                         "%-5u %-5s %-5u %-6u\n";
1935                 sprintf(buf2, "%lu/%d/%u@%u", hs->result.fe_group,
1936                         hs->result.fe_start, hs->result.fe_len,
1937                         hs->result.fe_logical);
1938                 sprintf(buf, "%lu/%d/%u@%u", hs->orig.fe_group,
1939                         hs->orig.fe_start, hs->orig.fe_len,
1940                         hs->orig.fe_logical);
1941                 sprintf(buf3, "%lu/%d/%u@%u", hs->goal.fe_group,
1942                         hs->goal.fe_start, hs->goal.fe_len,
1943                         hs->goal.fe_logical);
1944                 seq_printf(seq, fmt, hs->pid, hs->ino, buf, buf3, buf2,
1945                                 hs->found, hs->groups, hs->cr, hs->flags,
1946                                 hs->merged ? "M" : "", hs->tail,
1947                                 hs->buddy ? 1 << hs->buddy : 0);
1948         } else if (hs->op == EXT4_MB_HISTORY_PREALLOC) {
1949                 fmt = "%-5u %-8u %-23s %-23s %-23s\n";
1950                 sprintf(buf2, "%lu/%d/%u@%u", hs->result.fe_group,
1951                         hs->result.fe_start, hs->result.fe_len,
1952                         hs->result.fe_logical);
1953                 sprintf(buf, "%lu/%d/%u@%u", hs->orig.fe_group,
1954                         hs->orig.fe_start, hs->orig.fe_len,
1955                         hs->orig.fe_logical);
1956                 seq_printf(seq, fmt, hs->pid, hs->ino, buf, "", buf2);
1957         } else if (hs->op == EXT4_MB_HISTORY_DISCARD) {
1958                 sprintf(buf2, "%lu/%d/%u", hs->result.fe_group,
1959                         hs->result.fe_start, hs->result.fe_len);
1960                 seq_printf(seq, "%-5u %-8u %-23s discard\n",
1961                                 hs->pid, hs->ino, buf2);
1962         } else if (hs->op == EXT4_MB_HISTORY_FREE) {
1963                 sprintf(buf2, "%lu/%d/%u", hs->result.fe_group,
1964                         hs->result.fe_start, hs->result.fe_len);
1965                 seq_printf(seq, "%-5u %-8u %-23s free\n",
1966                                 hs->pid, hs->ino, buf2);
1967         }
1968         return 0;
1969 }
1970
1971 static void ext4_mb_seq_history_stop(struct seq_file *seq, void *v)
1972 {
1973 }
1974
1975 static struct seq_operations ext4_mb_seq_history_ops = {
1976         .start  = ext4_mb_seq_history_start,
1977         .next   = ext4_mb_seq_history_next,
1978         .stop   = ext4_mb_seq_history_stop,
1979         .show   = ext4_mb_seq_history_show,
1980 };
1981
1982 static int ext4_mb_seq_history_open(struct inode *inode, struct file *file)
1983 {
1984         struct super_block *sb = PDE(inode)->data;
1985         struct ext4_sb_info *sbi = EXT4_SB(sb);
1986         struct ext4_mb_proc_session *s;
1987         int rc;
1988         int size;
1989
1990         if (unlikely(sbi->s_mb_history == NULL))
1991                 return -ENOMEM;
1992         s = kmalloc(sizeof(*s), GFP_KERNEL);
1993         if (s == NULL)
1994                 return -ENOMEM;
1995         s->sb = sb;
1996         size = sizeof(struct ext4_mb_history) * sbi->s_mb_history_max;
1997         s->history = kmalloc(size, GFP_KERNEL);
1998         if (s->history == NULL) {
1999                 kfree(s);
2000                 return -ENOMEM;
2001         }
2002
2003         spin_lock(&sbi->s_mb_history_lock);
2004         memcpy(s->history, sbi->s_mb_history, size);
2005         s->max = sbi->s_mb_history_max;
2006         s->start = sbi->s_mb_history_cur % s->max;
2007         spin_unlock(&sbi->s_mb_history_lock);
2008
2009         rc = seq_open(file, &ext4_mb_seq_history_ops);
2010         if (rc == 0) {
2011                 struct seq_file *m = (struct seq_file *)file->private_data;
2012                 m->private = s;
2013         } else {
2014                 kfree(s->history);
2015                 kfree(s);
2016         }
2017         return rc;
2018
2019 }
2020
2021 static int ext4_mb_seq_history_release(struct inode *inode, struct file *file)
2022 {
2023         struct seq_file *seq = (struct seq_file *)file->private_data;
2024         struct ext4_mb_proc_session *s = seq->private;
2025         kfree(s->history);
2026         kfree(s);
2027         return seq_release(inode, file);
2028 }
2029
2030 static ssize_t ext4_mb_seq_history_write(struct file *file,
2031                                 const char __user *buffer,
2032                                 size_t count, loff_t *ppos)
2033 {
2034         struct seq_file *seq = (struct seq_file *)file->private_data;
2035         struct ext4_mb_proc_session *s = seq->private;
2036         struct super_block *sb = s->sb;
2037         char str[32];
2038         int value;
2039
2040         if (count >= sizeof(str)) {
2041                 printk(KERN_ERR "EXT4-fs: %s string too long, max %u bytes\n",
2042                                 "mb_history", (int)sizeof(str));
2043                 return -EOVERFLOW;
2044         }
2045
2046         if (copy_from_user(str, buffer, count))
2047                 return -EFAULT;
2048
2049         value = simple_strtol(str, NULL, 0);
2050         if (value < 0)
2051                 return -ERANGE;
2052         EXT4_SB(sb)->s_mb_history_filter = value;
2053
2054         return count;
2055 }
2056
2057 static struct file_operations ext4_mb_seq_history_fops = {
2058         .owner          = THIS_MODULE,
2059         .open           = ext4_mb_seq_history_open,
2060         .read           = seq_read,
2061         .write          = ext4_mb_seq_history_write,
2062         .llseek         = seq_lseek,
2063         .release        = ext4_mb_seq_history_release,
2064 };
2065
2066 static void *ext4_mb_seq_groups_start(struct seq_file *seq, loff_t *pos)
2067 {
2068         struct super_block *sb = seq->private;
2069         struct ext4_sb_info *sbi = EXT4_SB(sb);
2070         ext4_group_t group;
2071
2072         if (*pos < 0 || *pos >= sbi->s_groups_count)
2073                 return NULL;
2074
2075         group = *pos + 1;
2076         return (void *) group;
2077 }
2078
2079 static void *ext4_mb_seq_groups_next(struct seq_file *seq, void *v, loff_t *pos)
2080 {
2081         struct super_block *sb = seq->private;
2082         struct ext4_sb_info *sbi = EXT4_SB(sb);
2083         ext4_group_t group;
2084
2085         ++*pos;
2086         if (*pos < 0 || *pos >= sbi->s_groups_count)
2087                 return NULL;
2088         group = *pos + 1;
2089         return (void *) group;;
2090 }
2091
2092 static int ext4_mb_seq_groups_show(struct seq_file *seq, void *v)
2093 {
2094         struct super_block *sb = seq->private;
2095         long group = (long) v;
2096         int i;
2097         int err;
2098         struct ext4_buddy e4b;
2099         struct sg {
2100                 struct ext4_group_info info;
2101                 unsigned short counters[16];
2102         } sg;
2103
2104         group--;
2105         if (group == 0)
2106                 seq_printf(seq, "#%-5s: %-5s %-5s %-5s "
2107                                 "[ %-5s %-5s %-5s %-5s %-5s %-5s %-5s "
2108                                   "%-5s %-5s %-5s %-5s %-5s %-5s %-5s ]\n",
2109                            "group", "free", "frags", "first",
2110                            "2^0", "2^1", "2^2", "2^3", "2^4", "2^5", "2^6",
2111                            "2^7", "2^8", "2^9", "2^10", "2^11", "2^12", "2^13");
2112
2113         i = (sb->s_blocksize_bits + 2) * sizeof(sg.info.bb_counters[0]) +
2114                 sizeof(struct ext4_group_info);
2115         err = ext4_mb_load_buddy(sb, group, &e4b);
2116         if (err) {
2117                 seq_printf(seq, "#%-5lu: I/O error\n", group);
2118                 return 0;
2119         }
2120         ext4_lock_group(sb, group);
2121         memcpy(&sg, ext4_get_group_info(sb, group), i);
2122         ext4_unlock_group(sb, group);
2123         ext4_mb_release_desc(&e4b);
2124
2125         seq_printf(seq, "#%-5lu: %-5u %-5u %-5u [", group, sg.info.bb_free,
2126                         sg.info.bb_fragments, sg.info.bb_first_free);
2127         for (i = 0; i <= 13; i++)
2128                 seq_printf(seq, " %-5u", i <= sb->s_blocksize_bits + 1 ?
2129                                 sg.info.bb_counters[i] : 0);
2130         seq_printf(seq, " ]\n");
2131
2132         return 0;
2133 }
2134
2135 static void ext4_mb_seq_groups_stop(struct seq_file *seq, void *v)
2136 {
2137 }
2138
2139 static struct seq_operations ext4_mb_seq_groups_ops = {
2140         .start  = ext4_mb_seq_groups_start,
2141         .next   = ext4_mb_seq_groups_next,
2142         .stop   = ext4_mb_seq_groups_stop,
2143         .show   = ext4_mb_seq_groups_show,
2144 };
2145
2146 static int ext4_mb_seq_groups_open(struct inode *inode, struct file *file)
2147 {
2148         struct super_block *sb = PDE(inode)->data;
2149         int rc;
2150
2151         rc = seq_open(file, &ext4_mb_seq_groups_ops);
2152         if (rc == 0) {
2153                 struct seq_file *m = (struct seq_file *)file->private_data;
2154                 m->private = sb;
2155         }
2156         return rc;
2157
2158 }
2159
2160 static struct file_operations ext4_mb_seq_groups_fops = {
2161         .owner          = THIS_MODULE,
2162         .open           = ext4_mb_seq_groups_open,
2163         .read           = seq_read,
2164         .llseek         = seq_lseek,
2165         .release        = seq_release,
2166 };
2167
2168 static void ext4_mb_history_release(struct super_block *sb)
2169 {
2170         struct ext4_sb_info *sbi = EXT4_SB(sb);
2171
2172         if (sbi->s_proc != NULL) {
2173                 remove_proc_entry("mb_groups", sbi->s_proc);
2174                 remove_proc_entry("mb_history", sbi->s_proc);
2175         }
2176         kfree(sbi->s_mb_history);
2177 }
2178
2179 static void ext4_mb_history_init(struct super_block *sb)
2180 {
2181         struct ext4_sb_info *sbi = EXT4_SB(sb);
2182         int i;
2183
2184         if (sbi->s_proc != NULL) {
2185                 proc_create_data("mb_history", S_IRUGO, sbi->s_proc,
2186                                  &ext4_mb_seq_history_fops, sb);
2187                 proc_create_data("mb_groups", S_IRUGO, sbi->s_proc,
2188                                  &ext4_mb_seq_groups_fops, sb);
2189         }
2190
2191         sbi->s_mb_history_max = 1000;
2192         sbi->s_mb_history_cur = 0;
2193         spin_lock_init(&sbi->s_mb_history_lock);
2194         i = sbi->s_mb_history_max * sizeof(struct ext4_mb_history);
2195         sbi->s_mb_history = kzalloc(i, GFP_KERNEL);
2196         /* if we can't allocate history, then we simple won't use it */
2197 }
2198
2199 static noinline_for_stack void
2200 ext4_mb_store_history(struct ext4_allocation_context *ac)
2201 {
2202         struct ext4_sb_info *sbi = EXT4_SB(ac->ac_sb);
2203         struct ext4_mb_history h;
2204
2205         if (unlikely(sbi->s_mb_history == NULL))
2206                 return;
2207
2208         if (!(ac->ac_op & sbi->s_mb_history_filter))
2209                 return;
2210
2211         h.op = ac->ac_op;
2212         h.pid = current->pid;
2213         h.ino = ac->ac_inode ? ac->ac_inode->i_ino : 0;
2214         h.orig = ac->ac_o_ex;
2215         h.result = ac->ac_b_ex;
2216         h.flags = ac->ac_flags;
2217         h.found = ac->ac_found;
2218         h.groups = ac->ac_groups_scanned;
2219         h.cr = ac->ac_criteria;
2220         h.tail = ac->ac_tail;
2221         h.buddy = ac->ac_buddy;
2222         h.merged = 0;
2223         if (ac->ac_op == EXT4_MB_HISTORY_ALLOC) {
2224                 if (ac->ac_g_ex.fe_start == ac->ac_b_ex.fe_start &&
2225                                 ac->ac_g_ex.fe_group == ac->ac_b_ex.fe_group)
2226                         h.merged = 1;
2227                 h.goal = ac->ac_g_ex;
2228                 h.result = ac->ac_f_ex;
2229         }
2230
2231         spin_lock(&sbi->s_mb_history_lock);
2232         memcpy(sbi->s_mb_history + sbi->s_mb_history_cur, &h, sizeof(h));
2233         if (++sbi->s_mb_history_cur >= sbi->s_mb_history_max)
2234                 sbi->s_mb_history_cur = 0;
2235         spin_unlock(&sbi->s_mb_history_lock);
2236 }
2237
2238 #else
2239 #define ext4_mb_history_release(sb)
2240 #define ext4_mb_history_init(sb)
2241 #endif
2242
2243
2244 /* Create and initialize ext4_group_info data for the given group. */
2245 int ext4_mb_add_groupinfo(struct super_block *sb, ext4_group_t group,
2246                           struct ext4_group_desc *desc)
2247 {
2248         int i, len;
2249         int metalen = 0;
2250         struct ext4_sb_info *sbi = EXT4_SB(sb);
2251         struct ext4_group_info **meta_group_info;
2252
2253         /*
2254          * First check if this group is the first of a reserved block.
2255          * If it's true, we have to allocate a new table of pointers
2256          * to ext4_group_info structures
2257          */
2258         if (group % EXT4_DESC_PER_BLOCK(sb) == 0) {
2259                 metalen = sizeof(*meta_group_info) <<
2260                         EXT4_DESC_PER_BLOCK_BITS(sb);
2261                 meta_group_info = kmalloc(metalen, GFP_KERNEL);
2262                 if (meta_group_info == NULL) {
2263                         printk(KERN_ERR "EXT4-fs: can't allocate mem for a "
2264                                "buddy group\n");
2265                         goto exit_meta_group_info;
2266                 }
2267                 sbi->s_group_info[group >> EXT4_DESC_PER_BLOCK_BITS(sb)] =
2268                         meta_group_info;
2269         }
2270
2271         /*
2272          * calculate needed size. if change bb_counters size,
2273          * don't forget about ext4_mb_generate_buddy()
2274          */
2275         len = offsetof(typeof(**meta_group_info),
2276                        bb_counters[sb->s_blocksize_bits + 2]);
2277
2278         meta_group_info =
2279                 sbi->s_group_info[group >> EXT4_DESC_PER_BLOCK_BITS(sb)];
2280         i = group & (EXT4_DESC_PER_BLOCK(sb) - 1);
2281
2282         meta_group_info[i] = kzalloc(len, GFP_KERNEL);
2283         if (meta_group_info[i] == NULL) {
2284                 printk(KERN_ERR "EXT4-fs: can't allocate buddy mem\n");
2285                 goto exit_group_info;
2286         }
2287         set_bit(EXT4_GROUP_INFO_NEED_INIT_BIT,
2288                 &(meta_group_info[i]->bb_state));
2289
2290         /*
2291          * initialize bb_free to be able to skip
2292          * empty groups without initialization
2293          */
2294         if (desc->bg_flags & cpu_to_le16(EXT4_BG_BLOCK_UNINIT)) {
2295                 meta_group_info[i]->bb_free =
2296                         ext4_free_blocks_after_init(sb, group, desc);
2297         } else {
2298                 meta_group_info[i]->bb_free =
2299                         le16_to_cpu(desc->bg_free_blocks_count);
2300         }
2301
2302         INIT_LIST_HEAD(&meta_group_info[i]->bb_prealloc_list);
2303         meta_group_info[i]->bb_free_root.rb_node = NULL;;
2304
2305 #ifdef DOUBLE_CHECK
2306         {
2307                 struct buffer_head *bh;
2308                 meta_group_info[i]->bb_bitmap =
2309                         kmalloc(sb->s_blocksize, GFP_KERNEL);
2310                 BUG_ON(meta_group_info[i]->bb_bitmap == NULL);
2311                 bh = ext4_read_block_bitmap(sb, group);
2312                 BUG_ON(bh == NULL);
2313                 memcpy(meta_group_info[i]->bb_bitmap, bh->b_data,
2314                         sb->s_blocksize);
2315                 put_bh(bh);
2316         }
2317 #endif
2318
2319         return 0;
2320
2321 exit_group_info:
2322         /* If a meta_group_info table has been allocated, release it now */
2323         if (group % EXT4_DESC_PER_BLOCK(sb) == 0)
2324                 kfree(sbi->s_group_info[group >> EXT4_DESC_PER_BLOCK_BITS(sb)]);
2325 exit_meta_group_info:
2326         return -ENOMEM;
2327 } /* ext4_mb_add_groupinfo */
2328
2329 /*
2330  * Add a group to the existing groups.
2331  * This function is used for online resize
2332  */
2333 int ext4_mb_add_more_groupinfo(struct super_block *sb, ext4_group_t group,
2334                                struct ext4_group_desc *desc)
2335 {
2336         struct ext4_sb_info *sbi = EXT4_SB(sb);
2337         struct inode *inode = sbi->s_buddy_cache;
2338         int blocks_per_page;
2339         int block;
2340         int pnum;
2341         struct page *page;
2342         int err;
2343
2344         /* Add group based on group descriptor*/
2345         err = ext4_mb_add_groupinfo(sb, group, desc);
2346         if (err)
2347                 return err;
2348
2349         /*
2350          * Cache pages containing dynamic mb_alloc datas (buddy and bitmap
2351          * datas) are set not up to date so that they will be re-initilaized
2352          * during the next call to ext4_mb_load_buddy
2353          */
2354
2355         /* Set buddy page as not up to date */
2356         blocks_per_page = PAGE_CACHE_SIZE / sb->s_blocksize;
2357         block = group * 2;
2358         pnum = block / blocks_per_page;
2359         page = find_get_page(inode->i_mapping, pnum);
2360         if (page != NULL) {
2361                 ClearPageUptodate(page);
2362                 page_cache_release(page);
2363         }
2364
2365         /* Set bitmap page as not up to date */
2366         block++;
2367         pnum = block / blocks_per_page;
2368         page = find_get_page(inode->i_mapping, pnum);
2369         if (page != NULL) {
2370                 ClearPageUptodate(page);
2371                 page_cache_release(page);
2372         }
2373
2374         return 0;
2375 }
2376
2377 /*
2378  * Update an existing group.
2379  * This function is used for online resize
2380  */
2381 void ext4_mb_update_group_info(struct ext4_group_info *grp, ext4_grpblk_t add)
2382 {
2383         grp->bb_free += add;
2384 }
2385
2386 static int ext4_mb_init_backend(struct super_block *sb)
2387 {
2388         ext4_group_t i;
2389         int metalen;
2390         struct ext4_sb_info *sbi = EXT4_SB(sb);
2391         struct ext4_super_block *es = sbi->s_es;
2392         int num_meta_group_infos;
2393         int num_meta_group_infos_max;
2394         int array_size;
2395         struct ext4_group_info **meta_group_info;
2396         struct ext4_group_desc *desc;
2397
2398         /* This is the number of blocks used by GDT */
2399         num_meta_group_infos = (sbi->s_groups_count + EXT4_DESC_PER_BLOCK(sb) -
2400                                 1) >> EXT4_DESC_PER_BLOCK_BITS(sb);
2401
2402         /*
2403          * This is the total number of blocks used by GDT including
2404          * the number of reserved blocks for GDT.
2405          * The s_group_info array is allocated with this value
2406          * to allow a clean online resize without a complex
2407          * manipulation of pointer.
2408          * The drawback is the unused memory when no resize
2409          * occurs but it's very low in terms of pages
2410          * (see comments below)
2411          * Need to handle this properly when META_BG resizing is allowed
2412          */
2413         num_meta_group_infos_max = num_meta_group_infos +
2414                                 le16_to_cpu(es->s_reserved_gdt_blocks);
2415
2416         /*
2417          * array_size is the size of s_group_info array. We round it
2418          * to the next power of two because this approximation is done
2419          * internally by kmalloc so we can have some more memory
2420          * for free here (e.g. may be used for META_BG resize).
2421          */
2422         array_size = 1;
2423         while (array_size < sizeof(*sbi->s_group_info) *
2424                num_meta_group_infos_max)
2425                 array_size = array_size << 1;
2426         /* An 8TB filesystem with 64-bit pointers requires a 4096 byte
2427          * kmalloc. A 128kb malloc should suffice for a 256TB filesystem.
2428          * So a two level scheme suffices for now. */
2429         sbi->s_group_info = kmalloc(array_size, GFP_KERNEL);
2430         if (sbi->s_group_info == NULL) {
2431                 printk(KERN_ERR "EXT4-fs: can't allocate buddy meta group\n");
2432                 return -ENOMEM;
2433         }
2434         sbi->s_buddy_cache = new_inode(sb);
2435         if (sbi->s_buddy_cache == NULL) {
2436                 printk(KERN_ERR "EXT4-fs: can't get new inode\n");
2437                 goto err_freesgi;
2438         }
2439         EXT4_I(sbi->s_buddy_cache)->i_disksize = 0;
2440
2441         metalen = sizeof(*meta_group_info) << EXT4_DESC_PER_BLOCK_BITS(sb);
2442         for (i = 0; i < num_meta_group_infos; i++) {
2443                 if ((i + 1) == num_meta_group_infos)
2444                         metalen = sizeof(*meta_group_info) *
2445                                 (sbi->s_groups_count -
2446                                         (i << EXT4_DESC_PER_BLOCK_BITS(sb)));
2447                 meta_group_info = kmalloc(metalen, GFP_KERNEL);
2448                 if (meta_group_info == NULL) {
2449                         printk(KERN_ERR "EXT4-fs: can't allocate mem for a "
2450                                "buddy group\n");
2451                         goto err_freemeta;
2452                 }
2453                 sbi->s_group_info[i] = meta_group_info;
2454         }
2455
2456         for (i = 0; i < sbi->s_groups_count; i++) {
2457                 desc = ext4_get_group_desc(sb, i, NULL);
2458                 if (desc == NULL) {
2459                         printk(KERN_ERR
2460                                 "EXT4-fs: can't read descriptor %lu\n", i);
2461                         goto err_freebuddy;
2462                 }
2463                 if (ext4_mb_add_groupinfo(sb, i, desc) != 0)
2464                         goto err_freebuddy;
2465         }
2466
2467         return 0;
2468
2469 err_freebuddy:
2470         while (i-- > 0)
2471                 kfree(ext4_get_group_info(sb, i));
2472         i = num_meta_group_infos;
2473 err_freemeta:
2474         while (i-- > 0)
2475                 kfree(sbi->s_group_info[i]);
2476         iput(sbi->s_buddy_cache);
2477 err_freesgi:
2478         kfree(sbi->s_group_info);
2479         return -ENOMEM;
2480 }
2481
2482 int ext4_mb_init(struct super_block *sb, int needs_recovery)
2483 {
2484         struct ext4_sb_info *sbi = EXT4_SB(sb);
2485         unsigned i, j;
2486         unsigned offset;
2487         unsigned max;
2488         int ret;
2489
2490         i = (sb->s_blocksize_bits + 2) * sizeof(unsigned short);
2491
2492         sbi->s_mb_offsets = kmalloc(i, GFP_KERNEL);
2493         if (sbi->s_mb_offsets == NULL) {
2494                 return -ENOMEM;
2495         }
2496
2497         i = (sb->s_blocksize_bits + 2) * sizeof(unsigned int);
2498         sbi->s_mb_maxs = kmalloc(i, GFP_KERNEL);
2499         if (sbi->s_mb_maxs == NULL) {
2500                 kfree(sbi->s_mb_maxs);
2501                 return -ENOMEM;
2502         }
2503
2504         /* order 0 is regular bitmap */
2505         sbi->s_mb_maxs[0] = sb->s_blocksize << 3;
2506         sbi->s_mb_offsets[0] = 0;
2507
2508         i = 1;
2509         offset = 0;
2510         max = sb->s_blocksize << 2;
2511         do {
2512                 sbi->s_mb_offsets[i] = offset;
2513                 sbi->s_mb_maxs[i] = max;
2514                 offset += 1 << (sb->s_blocksize_bits - i);
2515                 max = max >> 1;
2516                 i++;
2517         } while (i <= sb->s_blocksize_bits + 1);
2518
2519         /* init file for buddy data */
2520         ret = ext4_mb_init_backend(sb);
2521         if (ret != 0) {
2522                 kfree(sbi->s_mb_offsets);
2523                 kfree(sbi->s_mb_maxs);
2524                 return ret;
2525         }
2526
2527         spin_lock_init(&sbi->s_md_lock);
2528         spin_lock_init(&sbi->s_bal_lock);
2529
2530         sbi->s_mb_max_to_scan = MB_DEFAULT_MAX_TO_SCAN;
2531         sbi->s_mb_min_to_scan = MB_DEFAULT_MIN_TO_SCAN;
2532         sbi->s_mb_stats = MB_DEFAULT_STATS;
2533         sbi->s_mb_stream_request = MB_DEFAULT_STREAM_THRESHOLD;
2534         sbi->s_mb_order2_reqs = MB_DEFAULT_ORDER2_REQS;
2535         sbi->s_mb_history_filter = EXT4_MB_HISTORY_DEFAULT;
2536         sbi->s_mb_group_prealloc = MB_DEFAULT_GROUP_PREALLOC;
2537
2538         sbi->s_locality_groups = alloc_percpu(struct ext4_locality_group);
2539         if (sbi->s_locality_groups == NULL) {
2540                 kfree(sbi->s_mb_offsets);
2541                 kfree(sbi->s_mb_maxs);
2542                 return -ENOMEM;
2543         }
2544         for_each_possible_cpu(i) {
2545                 struct ext4_locality_group *lg;
2546                 lg = per_cpu_ptr(sbi->s_locality_groups, i);
2547                 mutex_init(&lg->lg_mutex);
2548                 for (j = 0; j < PREALLOC_TB_SIZE; j++)
2549                         INIT_LIST_HEAD(&lg->lg_prealloc_list[j]);
2550                 spin_lock_init(&lg->lg_prealloc_lock);
2551         }
2552
2553         ext4_mb_init_per_dev_proc(sb);
2554         ext4_mb_history_init(sb);
2555
2556         sbi->s_journal->j_commit_callback = release_blocks_on_commit;
2557
2558         printk(KERN_INFO "EXT4-fs: mballoc enabled\n");
2559         return 0;
2560 }
2561
2562 /* need to called with ext4 group lock (ext4_lock_group) */
2563 static void ext4_mb_cleanup_pa(struct ext4_group_info *grp)
2564 {
2565         struct ext4_prealloc_space *pa;
2566         struct list_head *cur, *tmp;
2567         int count = 0;
2568
2569         list_for_each_safe(cur, tmp, &grp->bb_prealloc_list) {
2570                 pa = list_entry(cur, struct ext4_prealloc_space, pa_group_list);
2571                 list_del(&pa->pa_group_list);
2572                 count++;
2573                 kmem_cache_free(ext4_pspace_cachep, pa);
2574         }
2575         if (count)
2576                 mb_debug("mballoc: %u PAs left\n", count);
2577
2578 }
2579
2580 int ext4_mb_release(struct super_block *sb)
2581 {
2582         ext4_group_t i;
2583         int num_meta_group_infos;
2584         struct ext4_group_info *grinfo;
2585         struct ext4_sb_info *sbi = EXT4_SB(sb);
2586
2587         if (sbi->s_group_info) {
2588                 for (i = 0; i < sbi->s_groups_count; i++) {
2589                         grinfo = ext4_get_group_info(sb, i);
2590 #ifdef DOUBLE_CHECK
2591                         kfree(grinfo->bb_bitmap);
2592 #endif
2593                         ext4_lock_group(sb, i);
2594                         ext4_mb_cleanup_pa(grinfo);
2595                         ext4_unlock_group(sb, i);
2596                         kfree(grinfo);
2597                 }
2598                 num_meta_group_infos = (sbi->s_groups_count +
2599                                 EXT4_DESC_PER_BLOCK(sb) - 1) >>
2600                         EXT4_DESC_PER_BLOCK_BITS(sb);
2601                 for (i = 0; i < num_meta_group_infos; i++)
2602                         kfree(sbi->s_group_info[i]);
2603                 kfree(sbi->s_group_info);
2604         }
2605         kfree(sbi->s_mb_offsets);
2606         kfree(sbi->s_mb_maxs);
2607         if (sbi->s_buddy_cache)
2608                 iput(sbi->s_buddy_cache);
2609         if (sbi->s_mb_stats) {
2610                 printk(KERN_INFO
2611                        "EXT4-fs: mballoc: %u blocks %u reqs (%u success)\n",
2612                                 atomic_read(&sbi->s_bal_allocated),
2613                                 atomic_read(&sbi->s_bal_reqs),
2614                                 atomic_read(&sbi->s_bal_success));
2615                 printk(KERN_INFO
2616                       "EXT4-fs: mballoc: %u extents scanned, %u goal hits, "
2617                                 "%u 2^N hits, %u breaks, %u lost\n",
2618                                 atomic_read(&sbi->s_bal_ex_scanned),
2619                                 atomic_read(&sbi->s_bal_goals),
2620                                 atomic_read(&sbi->s_bal_2orders),
2621                                 atomic_read(&sbi->s_bal_breaks),
2622                                 atomic_read(&sbi->s_mb_lost_chunks));
2623                 printk(KERN_INFO
2624                        "EXT4-fs: mballoc: %lu generated and it took %Lu\n",
2625                                 sbi->s_mb_buddies_generated++,
2626                                 sbi->s_mb_generation_time);
2627                 printk(KERN_INFO
2628                        "EXT4-fs: mballoc: %u preallocated, %u discarded\n",
2629                                 atomic_read(&sbi->s_mb_preallocated),
2630                                 atomic_read(&sbi->s_mb_discarded));
2631         }
2632
2633         free_percpu(sbi->s_locality_groups);
2634         ext4_mb_history_release(sb);
2635         ext4_mb_destroy_per_dev_proc(sb);
2636
2637         return 0;
2638 }
2639
2640 /*
2641  * This function is called by the jbd2 layer once the commit has finished,
2642  * so we know we can free the blocks that were released with that commit.
2643  */
2644 static void release_blocks_on_commit(journal_t *journal, transaction_t *txn)
2645 {
2646         struct super_block *sb = journal->j_private;
2647         struct ext4_buddy e4b;
2648         struct ext4_group_info *db;
2649         int err, count = 0, count2 = 0;
2650         struct ext4_free_data *entry;
2651         ext4_fsblk_t discard_block;
2652         struct list_head *l, *ltmp;
2653
2654         list_for_each_safe(l, ltmp, &txn->t_private_list) {
2655                 entry = list_entry(l, struct ext4_free_data, list);
2656
2657                 mb_debug("gonna free %u blocks in group %lu (0x%p):",
2658                          entry->count, entry->group, entry);
2659
2660                 err = ext4_mb_load_buddy(sb, entry->group, &e4b);
2661                 /* we expect to find existing buddy because it's pinned */
2662                 BUG_ON(err != 0);
2663
2664                 db = e4b.bd_info;
2665                 /* there are blocks to put in buddy to make them really free */
2666                 count += entry->count;
2667                 count2++;
2668                 ext4_lock_group(sb, entry->group);
2669                 /* Take it out of per group rb tree */
2670                 rb_erase(&entry->node, &(db->bb_free_root));
2671                 mb_free_blocks(NULL, &e4b, entry->start_blk, entry->count);
2672
2673                 if (!db->bb_free_root.rb_node) {
2674                         /* No more items in the per group rb tree
2675                          * balance refcounts from ext4_mb_free_metadata()
2676                          */
2677                         page_cache_release(e4b.bd_buddy_page);
2678                         page_cache_release(e4b.bd_bitmap_page);
2679                 }
2680                 ext4_unlock_group(sb, entry->group);
2681                 discard_block = (ext4_fsblk_t) entry->group * EXT4_BLOCKS_PER_GROUP(sb)
2682                         + entry->start_blk
2683                         + le32_to_cpu(EXT4_SB(sb)->s_es->s_first_data_block);
2684                 trace_mark(ext4_discard_blocks, "dev %s blk %llu count %u", sb->s_id,
2685                            (unsigned long long) discard_block, entry->count);
2686                 sb_issue_discard(sb, discard_block, entry->count);
2687
2688                 kmem_cache_free(ext4_free_ext_cachep, entry);
2689                 ext4_mb_release_desc(&e4b);
2690         }
2691
2692         mb_debug("freed %u blocks in %u structures\n", count, count2);
2693 }
2694
2695 #define EXT4_MB_STATS_NAME              "stats"
2696 #define EXT4_MB_MAX_TO_SCAN_NAME        "max_to_scan"
2697 #define EXT4_MB_MIN_TO_SCAN_NAME        "min_to_scan"
2698 #define EXT4_MB_ORDER2_REQ              "order2_req"
2699 #define EXT4_MB_STREAM_REQ              "stream_req"
2700 #define EXT4_MB_GROUP_PREALLOC          "group_prealloc"
2701
2702 static int ext4_mb_init_per_dev_proc(struct super_block *sb)
2703 {
2704 #ifdef CONFIG_PROC_FS
2705         mode_t mode = S_IFREG | S_IRUGO | S_IWUSR;
2706         struct ext4_sb_info *sbi = EXT4_SB(sb);
2707         struct proc_dir_entry *proc;
2708
2709         if (sbi->s_proc == NULL)
2710                 return -EINVAL;
2711
2712         EXT4_PROC_HANDLER(EXT4_MB_STATS_NAME, mb_stats);
2713         EXT4_PROC_HANDLER(EXT4_MB_MAX_TO_SCAN_NAME, mb_max_to_scan);
2714         EXT4_PROC_HANDLER(EXT4_MB_MIN_TO_SCAN_NAME, mb_min_to_scan);
2715         EXT4_PROC_HANDLER(EXT4_MB_ORDER2_REQ, mb_order2_reqs);
2716         EXT4_PROC_HANDLER(EXT4_MB_STREAM_REQ, mb_stream_request);
2717         EXT4_PROC_HANDLER(EXT4_MB_GROUP_PREALLOC, mb_group_prealloc);
2718         return 0;
2719
2720 err_out:
2721         remove_proc_entry(EXT4_MB_GROUP_PREALLOC, sbi->s_proc);
2722         remove_proc_entry(EXT4_MB_STREAM_REQ, sbi->s_proc);
2723         remove_proc_entry(EXT4_MB_ORDER2_REQ, sbi->s_proc);
2724         remove_proc_entry(EXT4_MB_MIN_TO_SCAN_NAME, sbi->s_proc);
2725         remove_proc_entry(EXT4_MB_MAX_TO_SCAN_NAME, sbi->s_proc);
2726         remove_proc_entry(EXT4_MB_STATS_NAME, sbi->s_proc);
2727         return -ENOMEM;
2728 #else
2729         return 0;
2730 #endif
2731 }
2732
2733 static int ext4_mb_destroy_per_dev_proc(struct super_block *sb)
2734 {
2735 #ifdef CONFIG_PROC_FS
2736         struct ext4_sb_info *sbi = EXT4_SB(sb);
2737
2738         if (sbi->s_proc == NULL)
2739                 return -EINVAL;
2740
2741         remove_proc_entry(EXT4_MB_GROUP_PREALLOC, sbi->s_proc);
2742         remove_proc_entry(EXT4_MB_STREAM_REQ, sbi->s_proc);
2743         remove_proc_entry(EXT4_MB_ORDER2_REQ, sbi->s_proc);
2744         remove_proc_entry(EXT4_MB_MIN_TO_SCAN_NAME, sbi->s_proc);
2745         remove_proc_entry(EXT4_MB_MAX_TO_SCAN_NAME, sbi->s_proc);
2746         remove_proc_entry(EXT4_MB_STATS_NAME, sbi->s_proc);
2747 #endif
2748         return 0;
2749 }
2750
2751 int __init init_ext4_mballoc(void)
2752 {
2753         ext4_pspace_cachep =
2754                 kmem_cache_create("ext4_prealloc_space",
2755                                      sizeof(struct ext4_prealloc_space),
2756                                      0, SLAB_RECLAIM_ACCOUNT, NULL);
2757         if (ext4_pspace_cachep == NULL)
2758                 return -ENOMEM;
2759
2760         ext4_ac_cachep =
2761                 kmem_cache_create("ext4_alloc_context",
2762                                      sizeof(struct ext4_allocation_context),
2763                                      0, SLAB_RECLAIM_ACCOUNT, NULL);
2764         if (ext4_ac_cachep == NULL) {
2765                 kmem_cache_destroy(ext4_pspace_cachep);
2766                 return -ENOMEM;
2767         }
2768
2769         ext4_free_ext_cachep =
2770                 kmem_cache_create("ext4_free_block_extents",
2771                                      sizeof(struct ext4_free_data),
2772                                      0, SLAB_RECLAIM_ACCOUNT, NULL);
2773         if (ext4_free_ext_cachep == NULL) {
2774                 kmem_cache_destroy(ext4_pspace_cachep);
2775                 kmem_cache_destroy(ext4_ac_cachep);
2776                 return -ENOMEM;
2777         }
2778         return 0;
2779 }
2780
2781 void exit_ext4_mballoc(void)
2782 {
2783         /* XXX: synchronize_rcu(); */
2784         kmem_cache_destroy(ext4_pspace_cachep);
2785         kmem_cache_destroy(ext4_ac_cachep);
2786         kmem_cache_destroy(ext4_free_ext_cachep);
2787 }
2788
2789
2790 /*
2791  * Check quota and mark choosed space (ac->ac_b_ex) non-free in bitmaps
2792  * Returns 0 if success or error code
2793  */
2794 static noinline_for_stack int
2795 ext4_mb_mark_diskspace_used(struct ext4_allocation_context *ac,
2796                                 handle_t *handle, unsigned long reserv_blks)
2797 {
2798         struct buffer_head *bitmap_bh = NULL;
2799         struct ext4_super_block *es;
2800         struct ext4_group_desc *gdp;
2801         struct buffer_head *gdp_bh;
2802         struct ext4_sb_info *sbi;
2803         struct super_block *sb;
2804         ext4_fsblk_t block;
2805         int err, len;
2806
2807         BUG_ON(ac->ac_status != AC_STATUS_FOUND);
2808         BUG_ON(ac->ac_b_ex.fe_len <= 0);
2809
2810         sb = ac->ac_sb;
2811         sbi = EXT4_SB(sb);
2812         es = sbi->s_es;
2813
2814
2815         err = -EIO;
2816         bitmap_bh = ext4_read_block_bitmap(sb, ac->ac_b_ex.fe_group);
2817         if (!bitmap_bh)
2818                 goto out_err;
2819
2820         err = ext4_journal_get_write_access(handle, bitmap_bh);
2821         if (err)
2822                 goto out_err;
2823
2824         err = -EIO;
2825         gdp = ext4_get_group_desc(sb, ac->ac_b_ex.fe_group, &gdp_bh);
2826         if (!gdp)
2827                 goto out_err;
2828
2829         ext4_debug("using block group %lu(%d)\n", ac->ac_b_ex.fe_group,
2830                         gdp->bg_free_blocks_count);
2831
2832         err = ext4_journal_get_write_access(handle, gdp_bh);
2833         if (err)
2834                 goto out_err;
2835
2836         block = ac->ac_b_ex.fe_group * EXT4_BLOCKS_PER_GROUP(sb)
2837                 + ac->ac_b_ex.fe_start
2838                 + le32_to_cpu(es->s_first_data_block);
2839
2840         len = ac->ac_b_ex.fe_len;
2841         if (in_range(ext4_block_bitmap(sb, gdp), block, len) ||
2842             in_range(ext4_inode_bitmap(sb, gdp), block, len) ||
2843             in_range(block, ext4_inode_table(sb, gdp),
2844                      EXT4_SB(sb)->s_itb_per_group) ||
2845             in_range(block + len - 1, ext4_inode_table(sb, gdp),
2846                      EXT4_SB(sb)->s_itb_per_group)) {
2847                 ext4_error(sb, __func__,
2848                            "Allocating block in system zone - block = %llu",
2849                            block);
2850                 /* File system mounted not to panic on error
2851                  * Fix the bitmap and repeat the block allocation
2852                  * We leak some of the blocks here.
2853                  */
2854                 mb_set_bits(sb_bgl_lock(sbi, ac->ac_b_ex.fe_group),
2855                                 bitmap_bh->b_data, ac->ac_b_ex.fe_start,
2856                                 ac->ac_b_ex.fe_len);
2857                 err = ext4_journal_dirty_metadata(handle, bitmap_bh);
2858                 if (!err)
2859                         err = -EAGAIN;
2860                 goto out_err;
2861         }
2862 #ifdef AGGRESSIVE_CHECK
2863         {
2864                 int i;
2865                 for (i = 0; i < ac->ac_b_ex.fe_len; i++) {
2866                         BUG_ON(mb_test_bit(ac->ac_b_ex.fe_start + i,
2867                                                 bitmap_bh->b_data));
2868                 }
2869         }
2870 #endif
2871         mb_set_bits(sb_bgl_lock(sbi, ac->ac_b_ex.fe_group), bitmap_bh->b_data,
2872                                 ac->ac_b_ex.fe_start, ac->ac_b_ex.fe_len);
2873
2874         spin_lock(sb_bgl_lock(sbi, ac->ac_b_ex.fe_group));
2875         if (gdp->bg_flags & cpu_to_le16(EXT4_BG_BLOCK_UNINIT)) {
2876                 gdp->bg_flags &= cpu_to_le16(~EXT4_BG_BLOCK_UNINIT);
2877                 gdp->bg_free_blocks_count =
2878                         cpu_to_le16(ext4_free_blocks_after_init(sb,
2879                                                 ac->ac_b_ex.fe_group,
2880                                                 gdp));
2881         }
2882         le16_add_cpu(&gdp->bg_free_blocks_count, -ac->ac_b_ex.fe_len);
2883         gdp->bg_checksum = ext4_group_desc_csum(sbi, ac->ac_b_ex.fe_group, gdp);
2884         spin_unlock(sb_bgl_lock(sbi, ac->ac_b_ex.fe_group));
2885         percpu_counter_sub(&sbi->s_freeblocks_counter, ac->ac_b_ex.fe_len);
2886         /*
2887          * Now reduce the dirty block count also. Should not go negative
2888          */
2889         if (!(ac->ac_flags & EXT4_MB_DELALLOC_RESERVED))
2890                 /* release all the reserved blocks if non delalloc */
2891                 percpu_counter_sub(&sbi->s_dirtyblocks_counter, reserv_blks);
2892         else
2893                 percpu_counter_sub(&sbi->s_dirtyblocks_counter,
2894                                                 ac->ac_b_ex.fe_len);
2895
2896         if (sbi->s_log_groups_per_flex) {
2897                 ext4_group_t flex_group = ext4_flex_group(sbi,
2898                                                           ac->ac_b_ex.fe_group);
2899                 spin_lock(sb_bgl_lock(sbi, flex_group));
2900                 sbi->s_flex_groups[flex_group].free_blocks -= ac->ac_b_ex.fe_len;
2901                 spin_unlock(sb_bgl_lock(sbi, flex_group));
2902         }
2903
2904         err = ext4_journal_dirty_metadata(handle, bitmap_bh);
2905         if (err)
2906                 goto out_err;
2907         err = ext4_journal_dirty_metadata(handle, gdp_bh);
2908
2909 out_err:
2910         sb->s_dirt = 1;
2911         brelse(bitmap_bh);
2912         return err;
2913 }
2914
2915 /*
2916  * here we normalize request for locality group
2917  * Group request are normalized to s_strip size if we set the same via mount
2918  * option. If not we set it to s_mb_group_prealloc which can be configured via
2919  * /proc/fs/ext4/<partition>/group_prealloc
2920  *
2921  * XXX: should we try to preallocate more than the group has now?
2922  */
2923 static void ext4_mb_normalize_group_request(struct ext4_allocation_context *ac)
2924 {
2925         struct super_block *sb = ac->ac_sb;
2926         struct ext4_locality_group *lg = ac->ac_lg;
2927
2928         BUG_ON(lg == NULL);
2929         if (EXT4_SB(sb)->s_stripe)
2930                 ac->ac_g_ex.fe_len = EXT4_SB(sb)->s_stripe;
2931         else
2932                 ac->ac_g_ex.fe_len = EXT4_SB(sb)->s_mb_group_prealloc;
2933         mb_debug("#%u: goal %u blocks for locality group\n",
2934                 current->pid, ac->ac_g_ex.fe_len);
2935 }
2936
2937 /*
2938  * Normalization means making request better in terms of
2939  * size and alignment
2940  */
2941 static noinline_for_stack void
2942 ext4_mb_normalize_request(struct ext4_allocation_context *ac,
2943                                 struct ext4_allocation_request *ar)
2944 {
2945         int bsbits, max;
2946         ext4_lblk_t end;
2947         loff_t size, orig_size, start_off;
2948         ext4_lblk_t start, orig_start;
2949         struct ext4_inode_info *ei = EXT4_I(ac->ac_inode);
2950         struct ext4_prealloc_space *pa;
2951
2952         /* do normalize only data requests, metadata requests
2953            do not need preallocation */
2954         if (!(ac->ac_flags & EXT4_MB_HINT_DATA))
2955                 return;
2956
2957         /* sometime caller may want exact blocks */
2958         if (unlikely(ac->ac_flags & EXT4_MB_HINT_GOAL_ONLY))
2959                 return;
2960
2961         /* caller may indicate that preallocation isn't
2962          * required (it's a tail, for example) */
2963         if (ac->ac_flags & EXT4_MB_HINT_NOPREALLOC)
2964                 return;
2965
2966         if (ac->ac_flags & EXT4_MB_HINT_GROUP_ALLOC) {
2967                 ext4_mb_normalize_group_request(ac);
2968                 return ;
2969         }
2970
2971         bsbits = ac->ac_sb->s_blocksize_bits;
2972
2973         /* first, let's learn actual file size
2974          * given current request is allocated */
2975         size = ac->ac_o_ex.fe_logical + ac->ac_o_ex.fe_len;
2976         size = size << bsbits;
2977         if (size < i_size_read(ac->ac_inode))
2978                 size = i_size_read(ac->ac_inode);
2979
2980         /* max size of free chunks */
2981         max = 2 << bsbits;
2982
2983 #define NRL_CHECK_SIZE(req, size, max, chunk_size)      \
2984                 (req <= (size) || max <= (chunk_size))
2985
2986         /* first, try to predict filesize */
2987         /* XXX: should this table be tunable? */
2988         start_off = 0;
2989         if (size <= 16 * 1024) {
2990                 size = 16 * 1024;
2991         } else if (size <= 32 * 1024) {
2992                 size = 32 * 1024;
2993         } else if (size <= 64 * 1024) {
2994                 size = 64 * 1024;
2995         } else if (size <= 128 * 1024) {
2996                 size = 128 * 1024;
2997         } else if (size <= 256 * 1024) {
2998                 size = 256 * 1024;
2999         } else if (size <= 512 * 1024) {
3000                 size = 512 * 1024;
3001         } else if (size <= 1024 * 1024) {
3002                 size = 1024 * 1024;
3003         } else if (NRL_CHECK_SIZE(size, 4 * 1024 * 1024, max, 2 * 1024)) {
3004                 start_off = ((loff_t)ac->ac_o_ex.fe_logical >>
3005                                                 (21 - bsbits)) << 21;
3006                 size = 2 * 1024 * 1024;
3007         } else if (NRL_CHECK_SIZE(size, 8 * 1024 * 1024, max, 4 * 1024)) {
3008                 start_off = ((loff_t)ac->ac_o_ex.fe_logical >>
3009                                                         (22 - bsbits)) << 22;
3010                 size = 4 * 1024 * 1024;
3011         } else if (NRL_CHECK_SIZE(ac->ac_o_ex.fe_len,
3012                                         (8<<20)>>bsbits, max, 8 * 1024)) {
3013                 start_off = ((loff_t)ac->ac_o_ex.fe_logical >>
3014                                                         (23 - bsbits)) << 23;
3015                 size = 8 * 1024 * 1024;
3016         } else {
3017                 start_off = (loff_t)ac->ac_o_ex.fe_logical << bsbits;
3018                 size      = ac->ac_o_ex.fe_len << bsbits;
3019         }
3020         orig_size = size = size >> bsbits;
3021         orig_start = start = start_off >> bsbits;
3022
3023         /* don't cover already allocated blocks in selected range */
3024         if (ar->pleft && start <= ar->lleft) {
3025                 size -= ar->lleft + 1 - start;
3026                 start = ar->lleft + 1;
3027         }
3028         if (ar->pright && start + size - 1 >= ar->lright)
3029                 size -= start + size - ar->lright;
3030
3031         end = start + size;
3032
3033         /* check we don't cross already preallocated blocks */
3034         rcu_read_lock();
3035         list_for_each_entry_rcu(pa, &ei->i_prealloc_list, pa_inode_list) {
3036                 unsigned long pa_end;
3037
3038                 if (pa->pa_deleted)
3039                         continue;
3040                 spin_lock(&pa->pa_lock);
3041                 if (pa->pa_deleted) {
3042                         spin_unlock(&pa->pa_lock);
3043                         continue;
3044                 }
3045
3046                 pa_end = pa->pa_lstart + pa->pa_len;
3047
3048                 /* PA must not overlap original request */
3049                 BUG_ON(!(ac->ac_o_ex.fe_logical >= pa_end ||
3050                         ac->ac_o_ex.fe_logical < pa->pa_lstart));
3051
3052                 /* skip PA normalized request doesn't overlap with */
3053                 if (pa->pa_lstart >= end) {
3054                         spin_unlock(&pa->pa_lock);
3055                         continue;
3056                 }
3057                 if (pa_end <= start) {
3058                         spin_unlock(&pa->pa_lock);
3059                         continue;
3060                 }
3061                 BUG_ON(pa->pa_lstart <= start && pa_end >= end);
3062
3063                 if (pa_end <= ac->ac_o_ex.fe_logical) {
3064                         BUG_ON(pa_end < start);
3065                         start = pa_end;
3066                 }
3067
3068                 if (pa->pa_lstart > ac->ac_o_ex.fe_logical) {
3069                         BUG_ON(pa->pa_lstart > end);
3070                         end = pa->pa_lstart;
3071                 }
3072                 spin_unlock(&pa->pa_lock);
3073         }
3074         rcu_read_unlock();
3075         size = end - start;
3076
3077         /* XXX: extra loop to check we really don't overlap preallocations */
3078         rcu_read_lock();
3079         list_for_each_entry_rcu(pa, &ei->i_prealloc_list, pa_inode_list) {
3080                 unsigned long pa_end;
3081                 spin_lock(&pa->pa_lock);
3082                 if (pa->pa_deleted == 0) {
3083                         pa_end = pa->pa_lstart + pa->pa_len;
3084                         BUG_ON(!(start >= pa_end || end <= pa->pa_lstart));
3085                 }
3086                 spin_unlock(&pa->pa_lock);
3087         }
3088         rcu_read_unlock();
3089
3090         if (start + size <= ac->ac_o_ex.fe_logical &&
3091                         start > ac->ac_o_ex.fe_logical) {
3092                 printk(KERN_ERR "start %lu, size %lu, fe_logical %lu\n",
3093                         (unsigned long) start, (unsigned long) size,
3094                         (unsigned long) ac->ac_o_ex.fe_logical);
3095         }
3096         BUG_ON(start + size <= ac->ac_o_ex.fe_logical &&
3097                         start > ac->ac_o_ex.fe_logical);
3098         BUG_ON(size <= 0 || size >= EXT4_BLOCKS_PER_GROUP(ac->ac_sb));
3099
3100         /* now prepare goal request */
3101
3102         /* XXX: is it better to align blocks WRT to logical
3103          * placement or satisfy big request as is */
3104         ac->ac_g_ex.fe_logical = start;
3105         ac->ac_g_ex.fe_len = size;
3106
3107         /* define goal start in order to merge */
3108         if (ar->pright && (ar->lright == (start + size))) {
3109                 /* merge to the right */
3110                 ext4_get_group_no_and_offset(ac->ac_sb, ar->pright - size,
3111                                                 &ac->ac_f_ex.fe_group,
3112                                                 &ac->ac_f_ex.fe_start);
3113                 ac->ac_flags |= EXT4_MB_HINT_TRY_GOAL;
3114         }
3115         if (ar->pleft && (ar->lleft + 1 == start)) {
3116                 /* merge to the left */
3117                 ext4_get_group_no_and_offset(ac->ac_sb, ar->pleft + 1,
3118                                                 &ac->ac_f_ex.fe_group,
3119                                                 &ac->ac_f_ex.fe_start);
3120                 ac->ac_flags |= EXT4_MB_HINT_TRY_GOAL;
3121         }
3122
3123         mb_debug("goal: %u(was %u) blocks at %u\n", (unsigned) size,
3124                 (unsigned) orig_size, (unsigned) start);
3125 }
3126
3127 static void ext4_mb_collect_stats(struct ext4_allocation_context *ac)
3128 {
3129         struct ext4_sb_info *sbi = EXT4_SB(ac->ac_sb);
3130
3131         if (sbi->s_mb_stats && ac->ac_g_ex.fe_len > 1) {
3132                 atomic_inc(&sbi->s_bal_reqs);
3133                 atomic_add(ac->ac_b_ex.fe_len, &sbi->s_bal_allocated);
3134                 if (ac->ac_o_ex.fe_len >= ac->ac_g_ex.fe_len)
3135                         atomic_inc(&sbi->s_bal_success);
3136                 atomic_add(ac->ac_found, &sbi->s_bal_ex_scanned);
3137                 if (ac->ac_g_ex.fe_start == ac->ac_b_ex.fe_start &&
3138                                 ac->ac_g_ex.fe_group == ac->ac_b_ex.fe_group)
3139                         atomic_inc(&sbi->s_bal_goals);
3140                 if (ac->ac_found > sbi->s_mb_max_to_scan)
3141                         atomic_inc(&sbi->s_bal_breaks);
3142         }
3143
3144         ext4_mb_store_history(ac);
3145 }
3146
3147 /*
3148  * use blocks preallocated to inode
3149  */
3150 static void ext4_mb_use_inode_pa(struct ext4_allocation_context *ac,
3151                                 struct ext4_prealloc_space *pa)
3152 {
3153         ext4_fsblk_t start;
3154         ext4_fsblk_t end;
3155         int len;
3156
3157         /* found preallocated blocks, use them */
3158         start = pa->pa_pstart + (ac->ac_o_ex.fe_logical - pa->pa_lstart);
3159         end = min(pa->pa_pstart + pa->pa_len, start + ac->ac_o_ex.fe_len);
3160         len = end - start;
3161         ext4_get_group_no_and_offset(ac->ac_sb, start, &ac->ac_b_ex.fe_group,
3162                                         &ac->ac_b_ex.fe_start);
3163         ac->ac_b_ex.fe_len = len;
3164         ac->ac_status = AC_STATUS_FOUND;
3165         ac->ac_pa = pa;
3166
3167         BUG_ON(start < pa->pa_pstart);
3168         BUG_ON(start + len > pa->pa_pstart + pa->pa_len);
3169         BUG_ON(pa->pa_free < len);
3170         pa->pa_free -= len;
3171
3172         mb_debug("use %llu/%u from inode pa %p\n", start, len, pa);
3173 }
3174
3175 /*
3176  * use blocks preallocated to locality group
3177  */
3178 static void ext4_mb_use_group_pa(struct ext4_allocation_context *ac,
3179                                 struct ext4_prealloc_space *pa)
3180 {
3181         unsigned int len = ac->ac_o_ex.fe_len;
3182
3183         ext4_get_group_no_and_offset(ac->ac_sb, pa->pa_pstart,
3184                                         &ac->ac_b_ex.fe_group,
3185                                         &ac->ac_b_ex.fe_start);
3186         ac->ac_b_ex.fe_len = len;
3187         ac->ac_status = AC_STATUS_FOUND;
3188         ac->ac_pa = pa;
3189
3190         /* we don't correct pa_pstart or pa_plen here to avoid
3191          * possible race when the group is being loaded concurrently
3192          * instead we correct pa later, after blocks are marked
3193          * in on-disk bitmap -- see ext4_mb_release_context()
3194          * Other CPUs are prevented from allocating from this pa by lg_mutex
3195          */
3196         mb_debug("use %u/%u from group pa %p\n", pa->pa_lstart-len, len, pa);
3197 }
3198
3199 /*
3200  * Return the prealloc space that have minimal distance
3201  * from the goal block. @cpa is the prealloc
3202  * space that is having currently known minimal distance
3203  * from the goal block.
3204  */
3205 static struct ext4_prealloc_space *
3206 ext4_mb_check_group_pa(ext4_fsblk_t goal_block,
3207                         struct ext4_prealloc_space *pa,
3208                         struct ext4_prealloc_space *cpa)
3209 {
3210         ext4_fsblk_t cur_distance, new_distance;
3211
3212         if (cpa == NULL) {
3213                 atomic_inc(&pa->pa_count);
3214                 return pa;
3215         }
3216         cur_distance = abs(goal_block - cpa->pa_pstart);
3217         new_distance = abs(goal_block - pa->pa_pstart);
3218
3219         if (cur_distance < new_distance)
3220                 return cpa;
3221
3222         /* drop the previous reference */
3223         atomic_dec(&cpa->pa_count);
3224         atomic_inc(&pa->pa_count);
3225         return pa;
3226 }
3227
3228 /*
3229  * search goal blocks in preallocated space
3230  */
3231 static noinline_for_stack int
3232 ext4_mb_use_preallocated(struct ext4_allocation_context *ac)
3233 {
3234         int order, i;
3235         struct ext4_inode_info *ei = EXT4_I(ac->ac_inode);
3236         struct ext4_locality_group *lg;
3237         struct ext4_prealloc_space *pa, *cpa = NULL;
3238         ext4_fsblk_t goal_block;
3239
3240         /* only data can be preallocated */
3241         if (!(ac->ac_flags & EXT4_MB_HINT_DATA))
3242                 return 0;
3243
3244         /* first, try per-file preallocation */
3245         rcu_read_lock();
3246         list_for_each_entry_rcu(pa, &ei->i_prealloc_list, pa_inode_list) {
3247
3248                 /* all fields in this condition don't change,
3249                  * so we can skip locking for them */
3250                 if (ac->ac_o_ex.fe_logical < pa->pa_lstart ||
3251                         ac->ac_o_ex.fe_logical >= pa->pa_lstart + pa->pa_len)
3252                         continue;
3253
3254                 /* found preallocated blocks, use them */
3255                 spin_lock(&pa->pa_lock);
3256                 if (pa->pa_deleted == 0 && pa->pa_free) {
3257                         atomic_inc(&pa->pa_count);
3258                         ext4_mb_use_inode_pa(ac, pa);
3259                         spin_unlock(&pa->pa_lock);
3260                         ac->ac_criteria = 10;
3261                         rcu_read_unlock();
3262                         return 1;
3263                 }
3264                 spin_unlock(&pa->pa_lock);
3265         }
3266         rcu_read_unlock();
3267
3268         /* can we use group allocation? */
3269         if (!(ac->ac_flags & EXT4_MB_HINT_GROUP_ALLOC))
3270                 return 0;
3271
3272         /* inode may have no locality group for some reason */
3273         lg = ac->ac_lg;
3274         if (lg == NULL)
3275                 return 0;
3276         order  = fls(ac->ac_o_ex.fe_len) - 1;
3277         if (order > PREALLOC_TB_SIZE - 1)
3278                 /* The max size of hash table is PREALLOC_TB_SIZE */
3279                 order = PREALLOC_TB_SIZE - 1;
3280
3281         goal_block = ac->ac_g_ex.fe_group * EXT4_BLOCKS_PER_GROUP(ac->ac_sb) +
3282                      ac->ac_g_ex.fe_start +
3283                      le32_to_cpu(EXT4_SB(ac->ac_sb)->s_es->s_first_data_block);
3284         /*
3285          * search for the prealloc space that is having
3286          * minimal distance from the goal block.
3287          */
3288         for (i = order; i < PREALLOC_TB_SIZE; i++) {
3289                 rcu_read_lock();
3290                 list_for_each_entry_rcu(pa, &lg->lg_prealloc_list[i],
3291                                         pa_inode_list) {
3292                         spin_lock(&pa->pa_lock);
3293                         if (pa->pa_deleted == 0 &&
3294                                         pa->pa_free >= ac->ac_o_ex.fe_len) {
3295
3296                                 cpa = ext4_mb_check_group_pa(goal_block,
3297                                                                 pa, cpa);
3298                         }
3299                         spin_unlock(&pa->pa_lock);
3300                 }
3301                 rcu_read_unlock();
3302         }
3303         if (cpa) {
3304                 ext4_mb_use_group_pa(ac, cpa);
3305                 ac->ac_criteria = 20;
3306                 return 1;
3307         }
3308         return 0;
3309 }
3310
3311 /*
3312  * the function goes through all preallocation in this group and marks them
3313  * used in in-core bitmap. buddy must be generated from this bitmap
3314  * Need to be called with ext4 group lock (ext4_lock_group)
3315  */
3316 static void ext4_mb_generate_from_pa(struct super_block *sb, void *bitmap,
3317                                         ext4_group_t group)
3318 {
3319         struct ext4_group_info *grp = ext4_get_group_info(sb, group);
3320         struct ext4_prealloc_space *pa;
3321         struct list_head *cur;
3322         ext4_group_t groupnr;
3323         ext4_grpblk_t start;
3324         int preallocated = 0;
3325         int count = 0;
3326         int len;
3327
3328         /* all form of preallocation discards first load group,
3329          * so the only competing code is preallocation use.
3330          * we don't need any locking here
3331          * notice we do NOT ignore preallocations with pa_deleted
3332          * otherwise we could leave used blocks available for
3333          * allocation in buddy when concurrent ext4_mb_put_pa()
3334          * is dropping preallocation
3335          */
3336         list_for_each(cur, &grp->bb_prealloc_list) {
3337                 pa = list_entry(cur, struct ext4_prealloc_space, pa_group_list);
3338                 spin_lock(&pa->pa_lock);
3339                 ext4_get_group_no_and_offset(sb, pa->pa_pstart,
3340                                              &groupnr, &start);
3341                 len = pa->pa_len;
3342                 spin_unlock(&pa->pa_lock);
3343                 if (unlikely(len == 0))
3344                         continue;
3345                 BUG_ON(groupnr != group);
3346                 mb_set_bits(sb_bgl_lock(EXT4_SB(sb), group),
3347                                                 bitmap, start, len);
3348                 preallocated += len;
3349                 count++;
3350         }
3351         mb_debug("prellocated %u for group %lu\n", preallocated, group);
3352 }
3353
3354 static void ext4_mb_pa_callback(struct rcu_head *head)
3355 {
3356         struct ext4_prealloc_space *pa;
3357         pa = container_of(head, struct ext4_prealloc_space, u.pa_rcu);
3358         kmem_cache_free(ext4_pspace_cachep, pa);
3359 }
3360
3361 /*
3362  * drops a reference to preallocated space descriptor
3363  * if this was the last reference and the space is consumed
3364  */
3365 static void ext4_mb_put_pa(struct ext4_allocation_context *ac,
3366                         struct super_block *sb, struct ext4_prealloc_space *pa)
3367 {
3368         unsigned long grp;
3369
3370         if (!atomic_dec_and_test(&pa->pa_count) || pa->pa_free != 0)
3371                 return;
3372
3373         /* in this short window concurrent discard can set pa_deleted */
3374         spin_lock(&pa->pa_lock);
3375         if (pa->pa_deleted == 1) {
3376                 spin_unlock(&pa->pa_lock);
3377                 return;
3378         }
3379
3380         pa->pa_deleted = 1;
3381         spin_unlock(&pa->pa_lock);
3382
3383         /* -1 is to protect from crossing allocation group */
3384         ext4_get_group_no_and_offset(sb, pa->pa_pstart - 1, &grp, NULL);
3385
3386         /*
3387          * possible race:
3388          *
3389          *  P1 (buddy init)                     P2 (regular allocation)
3390          *                                      find block B in PA
3391          *  copy on-disk bitmap to buddy
3392          *                                      mark B in on-disk bitmap
3393          *                                      drop PA from group
3394          *  mark all PAs in buddy
3395          *
3396          * thus, P1 initializes buddy with B available. to prevent this
3397          * we make "copy" and "mark all PAs" atomic and serialize "drop PA"
3398          * against that pair
3399          */
3400         ext4_lock_group(sb, grp);
3401         list_del(&pa->pa_group_list);
3402         ext4_unlock_group(sb, grp);
3403
3404         spin_lock(pa->pa_obj_lock);
3405         list_del_rcu(&pa->pa_inode_list);
3406         spin_unlock(pa->pa_obj_lock);
3407
3408         call_rcu(&(pa)->u.pa_rcu, ext4_mb_pa_callback);
3409 }
3410
3411 /*
3412  * creates new preallocated space for given inode
3413  */
3414 static noinline_for_stack int
3415 ext4_mb_new_inode_pa(struct ext4_allocation_context *ac)
3416 {
3417         struct super_block *sb = ac->ac_sb;
3418         struct ext4_prealloc_space *pa;
3419         struct ext4_group_info *grp;
3420         struct ext4_inode_info *ei;
3421
3422         /* preallocate only when found space is larger then requested */
3423         BUG_ON(ac->ac_o_ex.fe_len >= ac->ac_b_ex.fe_len);
3424         BUG_ON(ac->ac_status != AC_STATUS_FOUND);
3425         BUG_ON(!S_ISREG(ac->ac_inode->i_mode));
3426
3427         pa = kmem_cache_alloc(ext4_pspace_cachep, GFP_NOFS);
3428         if (pa == NULL)
3429                 return -ENOMEM;
3430
3431         if (ac->ac_b_ex.fe_len < ac->ac_g_ex.fe_len) {
3432                 int winl;
3433                 int wins;
3434                 int win;
3435                 int offs;
3436
3437                 /* we can't allocate as much as normalizer wants.
3438                  * so, found space must get proper lstart
3439                  * to cover original request */
3440                 BUG_ON(ac->ac_g_ex.fe_logical > ac->ac_o_ex.fe_logical);
3441                 BUG_ON(ac->ac_g_ex.fe_len < ac->ac_o_ex.fe_len);
3442
3443                 /* we're limited by original request in that
3444                  * logical block must be covered any way
3445                  * winl is window we can move our chunk within */
3446                 winl = ac->ac_o_ex.fe_logical - ac->ac_g_ex.fe_logical;
3447
3448                 /* also, we should cover whole original request */
3449                 wins = ac->ac_b_ex.fe_len - ac->ac_o_ex.fe_len;
3450
3451                 /* the smallest one defines real window */
3452                 win = min(winl, wins);
3453
3454                 offs = ac->ac_o_ex.fe_logical % ac->ac_b_ex.fe_len;
3455                 if (offs && offs < win)
3456                         win = offs;
3457
3458                 ac->ac_b_ex.fe_logical = ac->ac_o_ex.fe_logical - win;
3459                 BUG_ON(ac->ac_o_ex.fe_logical < ac->ac_b_ex.fe_logical);
3460                 BUG_ON(ac->ac_o_ex.fe_len > ac->ac_b_ex.fe_len);
3461         }
3462
3463         /* preallocation can change ac_b_ex, thus we store actually
3464          * allocated blocks for history */
3465         ac->ac_f_ex = ac->ac_b_ex;
3466
3467         pa->pa_lstart = ac->ac_b_ex.fe_logical;
3468         pa->pa_pstart = ext4_grp_offs_to_block(sb, &ac->ac_b_ex);
3469         pa->pa_len = ac->ac_b_ex.fe_len;
3470         pa->pa_free = pa->pa_len;
3471         atomic_set(&pa->pa_count, 1);
3472         spin_lock_init(&pa->pa_lock);
3473         pa->pa_deleted = 0;
3474         pa->pa_linear = 0;
3475
3476         mb_debug("new inode pa %p: %llu/%u for %u\n", pa,
3477                         pa->pa_pstart, pa->pa_len, pa->pa_lstart);
3478
3479         ext4_mb_use_inode_pa(ac, pa);
3480         atomic_add(pa->pa_free, &EXT4_SB(sb)->s_mb_preallocated);
3481
3482         ei = EXT4_I(ac->ac_inode);
3483         grp = ext4_get_group_info(sb, ac->ac_b_ex.fe_group);
3484
3485         pa->pa_obj_lock = &ei->i_prealloc_lock;
3486         pa->pa_inode = ac->ac_inode;
3487
3488         ext4_lock_group(sb, ac->ac_b_ex.fe_group);
3489         list_add(&pa->pa_group_list, &grp->bb_prealloc_list);
3490         ext4_unlock_group(sb, ac->ac_b_ex.fe_group);
3491
3492         spin_lock(pa->pa_obj_lock);
3493         list_add_rcu(&pa->pa_inode_list, &ei->i_prealloc_list);
3494         spin_unlock(pa->pa_obj_lock);
3495
3496         return 0;
3497 }
3498
3499 /*
3500  * creates new preallocated space for locality group inodes belongs to
3501  */
3502 static noinline_for_stack int
3503 ext4_mb_new_group_pa(struct ext4_allocation_context *ac)
3504 {
3505         struct super_block *sb = ac->ac_sb;
3506         struct ext4_locality_group *lg;
3507         struct ext4_prealloc_space *pa;
3508         struct ext4_group_info *grp;
3509
3510         /* preallocate only when found space is larger then requested */
3511         BUG_ON(ac->ac_o_ex.fe_len >= ac->ac_b_ex.fe_len);
3512         BUG_ON(ac->ac_status != AC_STATUS_FOUND);
3513         BUG_ON(!S_ISREG(ac->ac_inode->i_mode));
3514
3515         BUG_ON(ext4_pspace_cachep == NULL);
3516         pa = kmem_cache_alloc(ext4_pspace_cachep, GFP_NOFS);
3517         if (pa == NULL)
3518                 return -ENOMEM;
3519
3520         /* preallocation can change ac_b_ex, thus we store actually
3521          * allocated blocks for history */
3522         ac->ac_f_ex = ac->ac_b_ex;
3523
3524         pa->pa_pstart = ext4_grp_offs_to_block(sb, &ac->ac_b_ex);
3525         pa->pa_lstart = pa->pa_pstart;
3526         pa->pa_len = ac->ac_b_ex.fe_len;
3527         pa->pa_free = pa->pa_len;
3528         atomic_set(&pa->pa_count, 1);
3529         spin_lock_init(&pa->pa_lock);
3530         INIT_LIST_HEAD(&pa->pa_inode_list);
3531         pa->pa_deleted = 0;
3532         pa->pa_linear = 1;
3533
3534         mb_debug("new group pa %p: %llu/%u for %u\n", pa,
3535                         pa->pa_pstart, pa->pa_len, pa->pa_lstart);
3536
3537         ext4_mb_use_group_pa(ac, pa);
3538         atomic_add(pa->pa_free, &EXT4_SB(sb)->s_mb_preallocated);
3539
3540         grp = ext4_get_group_info(sb, ac->ac_b_ex.fe_group);
3541         lg = ac->ac_lg;
3542         BUG_ON(lg == NULL);
3543
3544         pa->pa_obj_lock = &lg->lg_prealloc_lock;
3545         pa->pa_inode = NULL;
3546
3547         ext4_lock_group(sb, ac->ac_b_ex.fe_group);
3548         list_add(&pa->pa_group_list, &grp->bb_prealloc_list);
3549         ext4_unlock_group(sb, ac->ac_b_ex.fe_group);
3550
3551         /*
3552          * We will later add the new pa to the right bucket
3553          * after updating the pa_free in ext4_mb_release_context
3554          */
3555         return 0;
3556 }
3557
3558 static int ext4_mb_new_preallocation(struct ext4_allocation_context *ac)
3559 {
3560         int err;
3561
3562         if (ac->ac_flags & EXT4_MB_HINT_GROUP_ALLOC)
3563                 err = ext4_mb_new_group_pa(ac);
3564         else
3565                 err = ext4_mb_new_inode_pa(ac);
3566         return err;
3567 }
3568
3569 /*
3570  * finds all unused blocks in on-disk bitmap, frees them in
3571  * in-core bitmap and buddy.
3572  * @pa must be unlinked from inode and group lists, so that
3573  * nobody else can find/use it.
3574  * the caller MUST hold group/inode locks.
3575  * TODO: optimize the case when there are no in-core structures yet
3576  */
3577 static noinline_for_stack int
3578 ext4_mb_release_inode_pa(struct ext4_buddy *e4b, struct buffer_head *bitmap_bh,
3579                         struct ext4_prealloc_space *pa,
3580                         struct ext4_allocation_context *ac)
3581 {
3582         struct super_block *sb = e4b->bd_sb;
3583         struct ext4_sb_info *sbi = EXT4_SB(sb);
3584         unsigned long end;
3585         unsigned long next;
3586         ext4_group_t group;
3587         ext4_grpblk_t bit;
3588         sector_t start;
3589         int err = 0;
3590         int free = 0;
3591
3592         BUG_ON(pa->pa_deleted == 0);
3593         ext4_get_group_no_and_offset(sb, pa->pa_pstart, &group, &bit);
3594         BUG_ON(group != e4b->bd_group && pa->pa_len != 0);
3595         end = bit + pa->pa_len;
3596
3597         if (ac) {
3598                 ac->ac_sb = sb;
3599                 ac->ac_inode = pa->pa_inode;
3600                 ac->ac_op = EXT4_MB_HISTORY_DISCARD;
3601         }
3602
3603         while (bit < end) {
3604                 bit = mb_find_next_zero_bit(bitmap_bh->b_data, end, bit);
3605                 if (bit >= end)
3606                         break;
3607                 next = mb_find_next_bit(bitmap_bh->b_data, end, bit);
3608                 start = group * EXT4_BLOCKS_PER_GROUP(sb) + bit +
3609                                 le32_to_cpu(sbi->s_es->s_first_data_block);
3610                 mb_debug("    free preallocated %u/%u in group %u\n",
3611                                 (unsigned) start, (unsigned) next - bit,
3612                                 (unsigned) group);
3613                 free += next - bit;
3614
3615                 if (ac) {
3616                         ac->ac_b_ex.fe_group = group;
3617                         ac->ac_b_ex.fe_start = bit;
3618                         ac->ac_b_ex.fe_len = next - bit;
3619                         ac->ac_b_ex.fe_logical = 0;
3620                         ext4_mb_store_history(ac);
3621                 }
3622
3623                 mb_free_blocks(pa->pa_inode, e4b, bit, next - bit);
3624                 bit = next + 1;
3625         }
3626         if (free != pa->pa_free) {
3627                 printk(KERN_CRIT "pa %p: logic %lu, phys. %lu, len %lu\n",
3628                         pa, (unsigned long) pa->pa_lstart,
3629                         (unsigned long) pa->pa_pstart,
3630                         (unsigned long) pa->pa_len);
3631                 ext4_error(sb, __func__, "free %u, pa_free %u\n",
3632                                                 free, pa->pa_free);
3633                 /*
3634                  * pa is already deleted so we use the value obtained
3635                  * from the bitmap and continue.
3636                  */
3637         }
3638         atomic_add(free, &sbi->s_mb_discarded);
3639
3640         return err;
3641 }
3642
3643 static noinline_for_stack int
3644 ext4_mb_release_group_pa(struct ext4_buddy *e4b,
3645                                 struct ext4_prealloc_space *pa,
3646                                 struct ext4_allocation_context *ac)
3647 {
3648         struct super_block *sb = e4b->bd_sb;
3649         ext4_group_t group;
3650         ext4_grpblk_t bit;
3651
3652         if (ac)
3653                 ac->ac_op = EXT4_MB_HISTORY_DISCARD;
3654
3655         BUG_ON(pa->pa_deleted == 0);
3656         ext4_get_group_no_and_offset(sb, pa->pa_pstart, &group, &bit);
3657         BUG_ON(group != e4b->bd_group && pa->pa_len != 0);
3658         mb_free_blocks(pa->pa_inode, e4b, bit, pa->pa_len);
3659         atomic_add(pa->pa_len, &EXT4_SB(sb)->s_mb_discarded);
3660
3661         if (ac) {
3662                 ac->ac_sb = sb;
3663                 ac->ac_inode = NULL;
3664                 ac->ac_b_ex.fe_group = group;
3665                 ac->ac_b_ex.fe_start = bit;
3666                 ac->ac_b_ex.fe_len = pa->pa_len;
3667                 ac->ac_b_ex.fe_logical = 0;
3668                 ext4_mb_store_history(ac);
3669         }
3670
3671         return 0;
3672 }
3673
3674 /*
3675  * releases all preallocations in given group
3676  *
3677  * first, we need to decide discard policy:
3678  * - when do we discard
3679  *   1) ENOSPC
3680  * - how many do we discard
3681  *   1) how many requested
3682  */
3683 static noinline_for_stack int
3684 ext4_mb_discard_group_preallocations(struct super_block *sb,
3685                                         ext4_group_t group, int needed)
3686 {
3687         struct ext4_group_info *grp = ext4_get_group_info(sb, group);
3688         struct buffer_head *bitmap_bh = NULL;
3689         struct ext4_prealloc_space *pa, *tmp;
3690         struct ext4_allocation_context *ac;
3691         struct list_head list;
3692         struct ext4_buddy e4b;
3693         int err;
3694         int busy = 0;
3695         int free = 0;
3696
3697         mb_debug("discard preallocation for group %lu\n", group);
3698
3699         if (list_empty(&grp->bb_prealloc_list))
3700                 return 0;
3701
3702         bitmap_bh = ext4_read_block_bitmap(sb, group);
3703         if (bitmap_bh == NULL) {
3704                 ext4_error(sb, __func__, "Error in reading block "
3705                                 "bitmap for %lu\n", group);
3706                 return 0;
3707         }
3708
3709         err = ext4_mb_load_buddy(sb, group, &e4b);
3710         if (err) {
3711                 ext4_error(sb, __func__, "Error in loading buddy "
3712                                 "information for %lu\n", group);
3713                 put_bh(bitmap_bh);
3714                 return 0;
3715         }
3716
3717         if (needed == 0)
3718                 needed = EXT4_BLOCKS_PER_GROUP(sb) + 1;
3719
3720         INIT_LIST_HEAD(&list);
3721         ac = kmem_cache_alloc(ext4_ac_cachep, GFP_NOFS);
3722 repeat:
3723         ext4_lock_group(sb, group);
3724         list_for_each_entry_safe(pa, tmp,
3725                                 &grp->bb_prealloc_list, pa_group_list) {
3726                 spin_lock(&pa->pa_lock);
3727                 if (atomic_read(&pa->pa_count)) {
3728                         spin_unlock(&pa->pa_lock);
3729                         busy = 1;
3730                         continue;
3731                 }
3732                 if (pa->pa_deleted) {
3733                         spin_unlock(&pa->pa_lock);
3734                         continue;
3735                 }
3736
3737                 /* seems this one can be freed ... */
3738                 pa->pa_deleted = 1;
3739
3740                 /* we can trust pa_free ... */
3741                 free += pa->pa_free;
3742
3743                 spin_unlock(&pa->pa_lock);
3744
3745                 list_del(&pa->pa_group_list);
3746                 list_add(&pa->u.pa_tmp_list, &list);
3747         }
3748
3749         /* if we still need more blocks and some PAs were used, try again */
3750         if (free < needed && busy) {
3751                 busy = 0;
3752                 ext4_unlock_group(sb, group);
3753                 /*
3754                  * Yield the CPU here so that we don't get soft lockup
3755                  * in non preempt case.
3756                  */
3757                 yield();
3758                 goto repeat;
3759         }
3760
3761         /* found anything to free? */
3762         if (list_empty(&list)) {
3763                 BUG_ON(free != 0);
3764                 goto out;
3765         }
3766
3767         /* now free all selected PAs */
3768         list_for_each_entry_safe(pa, tmp, &list, u.pa_tmp_list) {
3769
3770                 /* remove from object (inode or locality group) */
3771                 spin_lock(pa->pa_obj_lock);
3772                 list_del_rcu(&pa->pa_inode_list);
3773                 spin_unlock(pa->pa_obj_lock);
3774
3775                 if (pa->pa_linear)
3776                         ext4_mb_release_group_pa(&e4b, pa, ac);
3777                 else
3778                         ext4_mb_release_inode_pa(&e4b, bitmap_bh, pa, ac);
3779
3780                 list_del(&pa->u.pa_tmp_list);
3781                 call_rcu(&(pa)->u.pa_rcu, ext4_mb_pa_callback);
3782         }
3783
3784 out:
3785         ext4_unlock_group(sb, group);
3786         if (ac)
3787                 kmem_cache_free(ext4_ac_cachep, ac);
3788         ext4_mb_release_desc(&e4b);
3789         put_bh(bitmap_bh);
3790         return free;
3791 }
3792
3793 /*
3794  * releases all non-used preallocated blocks for given inode
3795  *
3796  * It's important to discard preallocations under i_data_sem
3797  * We don't want another block to be served from the prealloc
3798  * space when we are discarding the inode prealloc space.
3799  *
3800  * FIXME!! Make sure it is valid at all the call sites
3801  */
3802 void ext4_discard_preallocations(struct inode *inode)
3803 {
3804         struct ext4_inode_info *ei = EXT4_I(inode);
3805         struct super_block *sb = inode->i_sb;
3806         struct buffer_head *bitmap_bh = NULL;
3807         struct ext4_prealloc_space *pa, *tmp;
3808         struct ext4_allocation_context *ac;
3809         ext4_group_t group = 0;
3810         struct list_head list;
3811         struct ext4_buddy e4b;
3812         int err;
3813
3814         if (!S_ISREG(inode->i_mode)) {
3815                 /*BUG_ON(!list_empty(&ei->i_prealloc_list));*/
3816                 return;
3817         }
3818
3819         mb_debug("discard preallocation for inode %lu\n", inode->i_ino);
3820
3821         INIT_LIST_HEAD(&list);
3822
3823         ac = kmem_cache_alloc(ext4_ac_cachep, GFP_NOFS);
3824 repeat:
3825         /* first, collect all pa's in the inode */
3826         spin_lock(&ei->i_prealloc_lock);
3827         while (!list_empty(&ei->i_prealloc_list)) {
3828                 pa = list_entry(ei->i_prealloc_list.next,
3829                                 struct ext4_prealloc_space, pa_inode_list);
3830                 BUG_ON(pa->pa_obj_lock != &ei->i_prealloc_lock);
3831                 spin_lock(&pa->pa_lock);
3832                 if (atomic_read(&pa->pa_count)) {
3833                         /* this shouldn't happen often - nobody should
3834                          * use preallocation while we're discarding it */
3835                         spin_unlock(&pa->pa_lock);
3836                         spin_unlock(&ei->i_prealloc_lock);
3837                         printk(KERN_ERR "uh-oh! used pa while discarding\n");
3838                         WARN_ON(1);
3839                         schedule_timeout_uninterruptible(HZ);
3840                         goto repeat;
3841
3842                 }
3843                 if (pa->pa_deleted == 0) {
3844                         pa->pa_deleted = 1;
3845                         spin_unlock(&pa->pa_lock);
3846                         list_del_rcu(&pa->pa_inode_list);
3847                         list_add(&pa->u.pa_tmp_list, &list);
3848                         continue;
3849                 }
3850
3851                 /* someone is deleting pa right now */
3852                 spin_unlock(&pa->pa_lock);
3853                 spin_unlock(&ei->i_prealloc_lock);
3854
3855                 /* we have to wait here because pa_deleted
3856                  * doesn't mean pa is already unlinked from
3857                  * the list. as we might be called from
3858                  * ->clear_inode() the inode will get freed
3859                  * and concurrent thread which is unlinking
3860                  * pa from inode's list may access already
3861                  * freed memory, bad-bad-bad */
3862
3863                 /* XXX: if this happens too often, we can
3864                  * add a flag to force wait only in case
3865                  * of ->clear_inode(), but not in case of
3866                  * regular truncate */
3867                 schedule_timeout_uninterruptible(HZ);
3868                 goto repeat;
3869         }
3870         spin_unlock(&ei->i_prealloc_lock);
3871
3872         list_for_each_entry_safe(pa, tmp, &list, u.pa_tmp_list) {
3873                 BUG_ON(pa->pa_linear != 0);
3874                 ext4_get_group_no_and_offset(sb, pa->pa_pstart, &group, NULL);
3875
3876                 err = ext4_mb_load_buddy(sb, group, &e4b);
3877                 if (err) {
3878                         ext4_error(sb, __func__, "Error in loading buddy "
3879                                         "information for %lu\n", group);
3880                         continue;
3881                 }
3882
3883                 bitmap_bh = ext4_read_block_bitmap(sb, group);
3884                 if (bitmap_bh == NULL) {
3885                         ext4_error(sb, __func__, "Error in reading block "
3886                                         "bitmap for %lu\n", group);
3887                         ext4_mb_release_desc(&e4b);
3888                         continue;
3889                 }
3890
3891                 ext4_lock_group(sb, group);
3892                 list_del(&pa->pa_group_list);
3893                 ext4_mb_release_inode_pa(&e4b, bitmap_bh, pa, ac);
3894                 ext4_unlock_group(sb, group);
3895
3896                 ext4_mb_release_desc(&e4b);
3897                 put_bh(bitmap_bh);
3898
3899                 list_del(&pa->u.pa_tmp_list);
3900                 call_rcu(&(pa)->u.pa_rcu, ext4_mb_pa_callback);
3901         }
3902         if (ac)
3903                 kmem_cache_free(ext4_ac_cachep, ac);
3904 }
3905
3906 /*
3907  * finds all preallocated spaces and return blocks being freed to them
3908  * if preallocated space becomes full (no block is used from the space)
3909  * then the function frees space in buddy
3910  * XXX: at the moment, truncate (which is the only way to free blocks)
3911  * discards all preallocations
3912  */
3913 static void ext4_mb_return_to_preallocation(struct inode *inode,
3914                                         struct ext4_buddy *e4b,
3915                                         sector_t block, int count)
3916 {
3917         BUG_ON(!list_empty(&EXT4_I(inode)->i_prealloc_list));
3918 }
3919 #ifdef MB_DEBUG
3920 static void ext4_mb_show_ac(struct ext4_allocation_context *ac)
3921 {
3922         struct super_block *sb = ac->ac_sb;
3923         ext4_group_t i;
3924
3925         printk(KERN_ERR "EXT4-fs: Can't allocate:"
3926                         " Allocation context details:\n");
3927         printk(KERN_ERR "EXT4-fs: status %d flags %d\n",
3928                         ac->ac_status, ac->ac_flags);
3929         printk(KERN_ERR "EXT4-fs: orig %lu/%lu/%lu@%lu, goal %lu/%lu/%lu@%lu, "
3930                         "best %lu/%lu/%lu@%lu cr %d\n",
3931                         (unsigned long)ac->ac_o_ex.fe_group,
3932                         (unsigned long)ac->ac_o_ex.fe_start,
3933                         (unsigned long)ac->ac_o_ex.fe_len,
3934                         (unsigned long)ac->ac_o_ex.fe_logical,
3935                         (unsigned long)ac->ac_g_ex.fe_group,
3936                         (unsigned long)ac->ac_g_ex.fe_start,
3937                         (unsigned long)ac->ac_g_ex.fe_len,
3938                         (unsigned long)ac->ac_g_ex.fe_logical,
3939                         (unsigned long)ac->ac_b_ex.fe_group,
3940                         (unsigned long)ac->ac_b_ex.fe_start,
3941                         (unsigned long)ac->ac_b_ex.fe_len,
3942                         (unsigned long)ac->ac_b_ex.fe_logical,
3943                         (int)ac->ac_criteria);
3944         printk(KERN_ERR "EXT4-fs: %lu scanned, %d found\n", ac->ac_ex_scanned,
3945                 ac->ac_found);
3946         printk(KERN_ERR "EXT4-fs: groups: \n");
3947         for (i = 0; i < EXT4_SB(sb)->s_groups_count; i++) {
3948                 struct ext4_group_info *grp = ext4_get_group_info(sb, i);
3949                 struct ext4_prealloc_space *pa;
3950                 ext4_grpblk_t start;
3951                 struct list_head *cur;
3952                 ext4_lock_group(sb, i);
3953                 list_for_each(cur, &grp->bb_prealloc_list) {
3954                         pa = list_entry(cur, struct ext4_prealloc_space,
3955                                         pa_group_list);
3956                         spin_lock(&pa->pa_lock);
3957                         ext4_get_group_no_and_offset(sb, pa->pa_pstart,
3958                                                      NULL, &start);
3959                         spin_unlock(&pa->pa_lock);
3960                         printk(KERN_ERR "PA:%lu:%d:%u \n", i,
3961                                                         start, pa->pa_len);
3962                 }
3963                 ext4_unlock_group(sb, i);
3964
3965                 if (grp->bb_free == 0)
3966                         continue;
3967                 printk(KERN_ERR "%lu: %d/%d \n",
3968                        i, grp->bb_free, grp->bb_fragments);
3969         }
3970         printk(KERN_ERR "\n");
3971 }
3972 #else
3973 static inline void ext4_mb_show_ac(struct ext4_allocation_context *ac)
3974 {
3975         return;
3976 }
3977 #endif
3978
3979 /*
3980  * We use locality group preallocation for small size file. The size of the
3981  * file is determined by the current size or the resulting size after
3982  * allocation which ever is larger
3983  *
3984  * One can tune this size via /proc/fs/ext4/<partition>/stream_req
3985  */
3986 static void ext4_mb_group_or_file(struct ext4_allocation_context *ac)
3987 {
3988         struct ext4_sb_info *sbi = EXT4_SB(ac->ac_sb);
3989         int bsbits = ac->ac_sb->s_blocksize_bits;
3990         loff_t size, isize;
3991
3992         if (!(ac->ac_flags & EXT4_MB_HINT_DATA))
3993                 return;
3994
3995         size = ac->ac_o_ex.fe_logical + ac->ac_o_ex.fe_len;
3996         isize = i_size_read(ac->ac_inode) >> bsbits;
3997         size = max(size, isize);
3998
3999         /* don't use group allocation for large files */
4000         if (size >= sbi->s_mb_stream_request)
4001                 return;
4002
4003         if (unlikely(ac->ac_flags & EXT4_MB_HINT_GOAL_ONLY))
4004                 return;
4005
4006         BUG_ON(ac->ac_lg != NULL);
4007         /*
4008          * locality group prealloc space are per cpu. The reason for having
4009          * per cpu locality group is to reduce the contention between block
4010          * request from multiple CPUs.
4011          */
4012         ac->ac_lg = per_cpu_ptr(sbi->s_locality_groups, raw_smp_processor_id());
4013
4014         /* we're going to use group allocation */
4015         ac->ac_flags |= EXT4_MB_HINT_GROUP_ALLOC;
4016
4017         /* serialize all allocations in the group */
4018         mutex_lock(&ac->ac_lg->lg_mutex);
4019 }
4020
4021 static noinline_for_stack int
4022 ext4_mb_initialize_context(struct ext4_allocation_context *ac,
4023                                 struct ext4_allocation_request *ar)
4024 {
4025         struct super_block *sb = ar->inode->i_sb;
4026         struct ext4_sb_info *sbi = EXT4_SB(sb);
4027         struct ext4_super_block *es = sbi->s_es;
4028         ext4_group_t group;
4029         unsigned long len;
4030         unsigned long goal;
4031         ext4_grpblk_t block;
4032
4033         /* we can't allocate > group size */
4034         len = ar->len;
4035
4036         /* just a dirty hack to filter too big requests  */
4037         if (len >= EXT4_BLOCKS_PER_GROUP(sb) - 10)
4038                 len = EXT4_BLOCKS_PER_GROUP(sb) - 10;
4039
4040         /* start searching from the goal */
4041         goal = ar->goal;
4042         if (goal < le32_to_cpu(es->s_first_data_block) ||
4043                         goal >= ext4_blocks_count(es))
4044                 goal = le32_to_cpu(es->s_first_data_block);
4045         ext4_get_group_no_and_offset(sb, goal, &group, &block);
4046
4047         /* set up allocation goals */
4048         ac->ac_b_ex.fe_logical = ar->logical;
4049         ac->ac_b_ex.fe_group = 0;
4050         ac->ac_b_ex.fe_start = 0;
4051         ac->ac_b_ex.fe_len = 0;
4052         ac->ac_status = AC_STATUS_CONTINUE;
4053         ac->ac_groups_scanned = 0;
4054         ac->ac_ex_scanned = 0;
4055         ac->ac_found = 0;
4056         ac->ac_sb = sb;
4057         ac->ac_inode = ar->inode;
4058         ac->ac_o_ex.fe_logical = ar->logical;
4059         ac->ac_o_ex.fe_group = group;
4060         ac->ac_o_ex.fe_start = block;
4061         ac->ac_o_ex.fe_len = len;
4062         ac->ac_g_ex.fe_logical = ar->logical;
4063         ac->ac_g_ex.fe_group = group;
4064         ac->ac_g_ex.fe_start = block;
4065         ac->ac_g_ex.fe_len = len;
4066         ac->ac_f_ex.fe_len = 0;
4067         ac->ac_flags = ar->flags;
4068         ac->ac_2order = 0;
4069         ac->ac_criteria = 0;
4070         ac->ac_pa = NULL;
4071         ac->ac_bitmap_page = NULL;
4072         ac->ac_buddy_page = NULL;
4073         ac->ac_lg = NULL;
4074
4075         /* we have to define context: we'll we work with a file or
4076          * locality group. this is a policy, actually */
4077         ext4_mb_group_or_file(ac);
4078
4079         mb_debug("init ac: %u blocks @ %u, goal %u, flags %x, 2^%d, "
4080                         "left: %u/%u, right %u/%u to %swritable\n",
4081                         (unsigned) ar->len, (unsigned) ar->logical,
4082                         (unsigned) ar->goal, ac->ac_flags, ac->ac_2order,
4083                         (unsigned) ar->lleft, (unsigned) ar->pleft,
4084                         (unsigned) ar->lright, (unsigned) ar->pright,
4085                         atomic_read(&ar->inode->i_writecount) ? "" : "non-");
4086         return 0;
4087
4088 }
4089
4090 static noinline_for_stack void
4091 ext4_mb_discard_lg_preallocations(struct super_block *sb,
4092                                         struct ext4_locality_group *lg,
4093                                         int order, int total_entries)
4094 {
4095         ext4_group_t group = 0;
4096         struct ext4_buddy e4b;
4097         struct list_head discard_list;
4098         struct ext4_prealloc_space *pa, *tmp;
4099         struct ext4_allocation_context *ac;
4100
4101         mb_debug("discard locality group preallocation\n");
4102
4103         INIT_LIST_HEAD(&discard_list);
4104         ac = kmem_cache_alloc(ext4_ac_cachep, GFP_NOFS);
4105
4106         spin_lock(&lg->lg_prealloc_lock);
4107         list_for_each_entry_rcu(pa, &lg->lg_prealloc_list[order],
4108                                                 pa_inode_list) {
4109                 spin_lock(&pa->pa_lock);
4110                 if (atomic_read(&pa->pa_count)) {
4111                         /*
4112                          * This is the pa that we just used
4113                          * for block allocation. So don't
4114                          * free that
4115                          */
4116                         spin_unlock(&pa->pa_lock);
4117                         continue;
4118                 }
4119                 if (pa->pa_deleted) {
4120                         spin_unlock(&pa->pa_lock);
4121                         continue;
4122                 }
4123                 /* only lg prealloc space */
4124                 BUG_ON(!pa->pa_linear);
4125
4126                 /* seems this one can be freed ... */
4127                 pa->pa_deleted = 1;
4128                 spin_unlock(&pa->pa_lock);
4129
4130                 list_del_rcu(&pa->pa_inode_list);
4131                 list_add(&pa->u.pa_tmp_list, &discard_list);
4132
4133                 total_entries--;
4134                 if (total_entries <= 5) {
4135                         /*
4136                          * we want to keep only 5 entries
4137                          * allowing it to grow to 8. This
4138                          * mak sure we don't call discard
4139                          * soon for this list.
4140                          */
4141                         break;
4142                 }
4143         }
4144         spin_unlock(&lg->lg_prealloc_lock);
4145
4146         list_for_each_entry_safe(pa, tmp, &discard_list, u.pa_tmp_list) {
4147
4148                 ext4_get_group_no_and_offset(sb, pa->pa_pstart, &group, NULL);
4149                 if (ext4_mb_load_buddy(sb, group, &e4b)) {
4150                         ext4_error(sb, __func__, "Error in loading buddy "
4151                                         "information for %lu\n", group);
4152                         continue;
4153                 }
4154                 ext4_lock_group(sb, group);
4155                 list_del(&pa->pa_group_list);
4156                 ext4_mb_release_group_pa(&e4b, pa, ac);
4157                 ext4_unlock_group(sb, group);
4158
4159                 ext4_mb_release_desc(&e4b);
4160                 list_del(&pa->u.pa_tmp_list);
4161                 call_rcu(&(pa)->u.pa_rcu, ext4_mb_pa_callback);
4162         }
4163         if (ac)
4164                 kmem_cache_free(ext4_ac_cachep, ac);
4165 }
4166
4167 /*
4168  * We have incremented pa_count. So it cannot be freed at this
4169  * point. Also we hold lg_mutex. So no parallel allocation is
4170  * possible from this lg. That means pa_free cannot be updated.
4171  *
4172  * A parallel ext4_mb_discard_group_preallocations is possible.
4173  * which can cause the lg_prealloc_list to be updated.
4174  */
4175
4176 static void ext4_mb_add_n_trim(struct ext4_allocation_context *ac)
4177 {
4178         int order, added = 0, lg_prealloc_count = 1;
4179         struct super_block *sb = ac->ac_sb;
4180         struct ext4_locality_group *lg = ac->ac_lg;
4181         struct ext4_prealloc_space *tmp_pa, *pa = ac->ac_pa;
4182
4183         order = fls(pa->pa_free) - 1;
4184         if (order > PREALLOC_TB_SIZE - 1)
4185                 /* The max size of hash table is PREALLOC_TB_SIZE */
4186                 order = PREALLOC_TB_SIZE - 1;
4187         /* Add the prealloc space to lg */
4188         rcu_read_lock();
4189         list_for_each_entry_rcu(tmp_pa, &lg->lg_prealloc_list[order],
4190                                                 pa_inode_list) {
4191                 spin_lock(&tmp_pa->pa_lock);
4192                 if (tmp_pa->pa_deleted) {
4193                         spin_unlock(&pa->pa_lock);
4194                         continue;
4195                 }
4196                 if (!added && pa->pa_free < tmp_pa->pa_free) {
4197                         /* Add to the tail of the previous entry */
4198                         list_add_tail_rcu(&pa->pa_inode_list,
4199                                                 &tmp_pa->pa_inode_list);
4200                         added = 1;
4201                         /*
4202                          * we want to count the total
4203                          * number of entries in the list
4204                          */
4205                 }
4206                 spin_unlock(&tmp_pa->pa_lock);
4207                 lg_prealloc_count++;
4208         }
4209         if (!added)
4210                 list_add_tail_rcu(&pa->pa_inode_list,
4211                                         &lg->lg_prealloc_list[order]);
4212         rcu_read_unlock();
4213
4214         /* Now trim the list to be not more than 8 elements */
4215         if (lg_prealloc_count > 8) {
4216                 ext4_mb_discard_lg_preallocations(sb, lg,
4217                                                 order, lg_prealloc_count);
4218                 return;
4219         }
4220         return ;
4221 }
4222
4223 /*
4224  * release all resource we used in allocation
4225  */
4226 static int ext4_mb_release_context(struct ext4_allocation_context *ac)
4227 {
4228         struct ext4_prealloc_space *pa = ac->ac_pa;
4229         if (pa) {
4230                 if (pa->pa_linear) {
4231                         /* see comment in ext4_mb_use_group_pa() */
4232                         spin_lock(&pa->pa_lock);
4233                         pa->pa_pstart += ac->ac_b_ex.fe_len;
4234                         pa->pa_lstart += ac->ac_b_ex.fe_len;
4235                         pa->pa_free -= ac->ac_b_ex.fe_len;
4236                         pa->pa_len -= ac->ac_b_ex.fe_len;
4237                         spin_unlock(&pa->pa_lock);
4238                         /*
4239                          * We want to add the pa to the right bucket.
4240                          * Remove it from the list and while adding
4241                          * make sure the list to which we are adding
4242                          * doesn't grow big.
4243                          */
4244                         if (likely(pa->pa_free)) {
4245                                 spin_lock(pa->pa_obj_lock);
4246                                 list_del_rcu(&pa->pa_inode_list);
4247                                 spin_unlock(pa->pa_obj_lock);
4248                                 ext4_mb_add_n_trim(ac);
4249                         }
4250                 }
4251                 ext4_mb_put_pa(ac, ac->ac_sb, pa);
4252         }
4253         if (ac->ac_bitmap_page)
4254                 page_cache_release(ac->ac_bitmap_page);
4255         if (ac->ac_buddy_page)
4256                 page_cache_release(ac->ac_buddy_page);
4257         if (ac->ac_flags & EXT4_MB_HINT_GROUP_ALLOC)
4258                 mutex_unlock(&ac->ac_lg->lg_mutex);
4259         ext4_mb_collect_stats(ac);
4260         return 0;
4261 }
4262
4263 static int ext4_mb_discard_preallocations(struct super_block *sb, int needed)
4264 {
4265         ext4_group_t i;
4266         int ret;
4267         int freed = 0;
4268
4269         for (i = 0; i < EXT4_SB(sb)->s_groups_count && needed > 0; i++) {
4270                 ret = ext4_mb_discard_group_preallocations(sb, i, needed);
4271                 freed += ret;
4272                 needed -= ret;
4273         }
4274
4275         return freed;
4276 }
4277
4278 /*
4279  * Main entry point into mballoc to allocate blocks
4280  * it tries to use preallocation first, then falls back
4281  * to usual allocation
4282  */
4283 ext4_fsblk_t ext4_mb_new_blocks(handle_t *handle,
4284                                  struct ext4_allocation_request *ar, int *errp)
4285 {
4286         int freed;
4287         struct ext4_allocation_context *ac = NULL;
4288         struct ext4_sb_info *sbi;
4289         struct super_block *sb;
4290         ext4_fsblk_t block = 0;
4291         unsigned long inquota;
4292         unsigned long reserv_blks = 0;
4293
4294         sb = ar->inode->i_sb;
4295         sbi = EXT4_SB(sb);
4296
4297         if (!EXT4_I(ar->inode)->i_delalloc_reserved_flag) {
4298                 /*
4299                  * With delalloc we already reserved the blocks
4300                  */
4301                 while (ar->len && ext4_claim_free_blocks(sbi, ar->len)) {
4302                         /* let others to free the space */
4303                         yield();
4304                         ar->len = ar->len >> 1;
4305                 }
4306                 if (!ar->len) {
4307                         *errp = -ENOSPC;
4308                         return 0;
4309                 }
4310                 reserv_blks = ar->len;
4311         }
4312         while (ar->len && DQUOT_ALLOC_BLOCK(ar->inode, ar->len)) {
4313                 ar->flags |= EXT4_MB_HINT_NOPREALLOC;
4314                 ar->len--;
4315         }
4316         if (ar->len == 0) {
4317                 *errp = -EDQUOT;
4318                 return 0;
4319         }
4320         inquota = ar->len;
4321
4322         if (EXT4_I(ar->inode)->i_delalloc_reserved_flag)
4323                 ar->flags |= EXT4_MB_DELALLOC_RESERVED;
4324
4325         ac = kmem_cache_alloc(ext4_ac_cachep, GFP_NOFS);
4326         if (!ac) {
4327                 ar->len = 0;
4328                 *errp = -ENOMEM;
4329                 goto out1;
4330         }
4331
4332         *errp = ext4_mb_initialize_context(ac, ar);
4333         if (*errp) {
4334                 ar->len = 0;
4335                 goto out2;
4336         }
4337
4338         ac->ac_op = EXT4_MB_HISTORY_PREALLOC;
4339         if (!ext4_mb_use_preallocated(ac)) {
4340                 ac->ac_op = EXT4_MB_HISTORY_ALLOC;
4341                 ext4_mb_normalize_request(ac, ar);
4342 repeat:
4343                 /* allocate space in core */
4344                 ext4_mb_regular_allocator(ac);
4345
4346                 /* as we've just preallocated more space than
4347                  * user requested orinally, we store allocated
4348                  * space in a special descriptor */
4349                 if (ac->ac_status == AC_STATUS_FOUND &&
4350                                 ac->ac_o_ex.fe_len < ac->ac_b_ex.fe_len)
4351                         ext4_mb_new_preallocation(ac);
4352         }
4353
4354         if (likely(ac->ac_status == AC_STATUS_FOUND)) {
4355                 *errp = ext4_mb_mark_diskspace_used(ac, handle, reserv_blks);
4356                 if (*errp ==  -EAGAIN) {
4357                         ac->ac_b_ex.fe_group = 0;
4358                         ac->ac_b_ex.fe_start = 0;
4359                         ac->ac_b_ex.fe_len = 0;
4360                         ac->ac_status = AC_STATUS_CONTINUE;
4361                         goto repeat;
4362                 } else if (*errp) {
4363                         ac->ac_b_ex.fe_len = 0;
4364                         ar->len = 0;
4365                         ext4_mb_show_ac(ac);
4366                 } else {
4367                         block = ext4_grp_offs_to_block(sb, &ac->ac_b_ex);
4368                         ar->len = ac->ac_b_ex.fe_len;
4369                 }
4370         } else {
4371                 freed  = ext4_mb_discard_preallocations(sb, ac->ac_o_ex.fe_len);
4372                 if (freed)
4373                         goto repeat;
4374                 *errp = -ENOSPC;
4375                 ac->ac_b_ex.fe_len = 0;
4376                 ar->len = 0;
4377                 ext4_mb_show_ac(ac);
4378         }
4379
4380         ext4_mb_release_context(ac);
4381
4382 out2:
4383         kmem_cache_free(ext4_ac_cachep, ac);
4384 out1:
4385         if (ar->len < inquota)
4386                 DQUOT_FREE_BLOCK(ar->inode, inquota - ar->len);
4387
4388         return block;
4389 }
4390
4391 /*
4392  * We can merge two free data extents only if the physical blocks
4393  * are contiguous, AND the extents were freed by the same transaction,
4394  * AND the blocks are associated with the same group.
4395  */
4396 static int can_merge(struct ext4_free_data *entry1,
4397                         struct ext4_free_data *entry2)
4398 {
4399         if ((entry1->t_tid == entry2->t_tid) &&
4400             (entry1->group == entry2->group) &&
4401             ((entry1->start_blk + entry1->count) == entry2->start_blk))
4402                 return 1;
4403         return 0;
4404 }
4405
4406 static noinline_for_stack int
4407 ext4_mb_free_metadata(handle_t *handle, struct ext4_buddy *e4b,
4408                           ext4_group_t group, ext4_grpblk_t block, int count)
4409 {
4410         struct ext4_group_info *db = e4b->bd_info;
4411         struct super_block *sb = e4b->bd_sb;
4412         struct ext4_sb_info *sbi = EXT4_SB(sb);
4413         struct ext4_free_data *entry, *new_entry;
4414         struct rb_node **n = &db->bb_free_root.rb_node, *node;
4415         struct rb_node *parent = NULL, *new_node;
4416
4417
4418         BUG_ON(e4b->bd_bitmap_page == NULL);
4419         BUG_ON(e4b->bd_buddy_page == NULL);
4420
4421         new_entry  = kmem_cache_alloc(ext4_free_ext_cachep, GFP_NOFS);
4422         new_entry->start_blk = block;
4423         new_entry->group  = group;
4424         new_entry->count = count;
4425         new_entry->t_tid = handle->h_transaction->t_tid;
4426         new_node = &new_entry->node;
4427
4428         ext4_lock_group(sb, group);
4429         if (!*n) {
4430                 /* first free block exent. We need to
4431                    protect buddy cache from being freed,
4432                  * otherwise we'll refresh it from
4433                  * on-disk bitmap and lose not-yet-available
4434                  * blocks */
4435                 page_cache_get(e4b->bd_buddy_page);
4436                 page_cache_get(e4b->bd_bitmap_page);
4437         }
4438         while (*n) {
4439                 parent = *n;
4440                 entry = rb_entry(parent, struct ext4_free_data, node);
4441                 if (block < entry->start_blk)
4442                         n = &(*n)->rb_left;
4443                 else if (block >= (entry->start_blk + entry->count))
4444                         n = &(*n)->rb_right;
4445                 else {
4446                         ext4_unlock_group(sb, group);
4447                         ext4_error(sb, __func__,
4448                             "Double free of blocks %d (%d %d)\n",
4449                             block, entry->start_blk, entry->count);
4450                         return 0;
4451                 }
4452         }
4453
4454         rb_link_node(new_node, parent, n);
4455         rb_insert_color(new_node, &db->bb_free_root);
4456
4457         /* Now try to see the extent can be merged to left and right */
4458         node = rb_prev(new_node);
4459         if (node) {
4460                 entry = rb_entry(node, struct ext4_free_data, node);
4461                 if (can_merge(entry, new_entry)) {
4462                         new_entry->start_blk = entry->start_blk;
4463                         new_entry->count += entry->count;
4464                         rb_erase(node, &(db->bb_free_root));
4465                         spin_lock(&sbi->s_md_lock);
4466                         list_del(&entry->list);
4467                         spin_unlock(&sbi->s_md_lock);
4468                         kmem_cache_free(ext4_free_ext_cachep, entry);
4469                 }
4470         }
4471
4472         node = rb_next(new_node);
4473         if (node) {
4474                 entry = rb_entry(node, struct ext4_free_data, node);
4475                 if (can_merge(new_entry, entry)) {
4476                         new_entry->count += entry->count;
4477                         rb_erase(node, &(db->bb_free_root));
4478                         spin_lock(&sbi->s_md_lock);
4479                         list_del(&entry->list);
4480                         spin_unlock(&sbi->s_md_lock);
4481                         kmem_cache_free(ext4_free_ext_cachep, entry);
4482                 }
4483         }
4484         /* Add the extent to transaction's private list */
4485         spin_lock(&sbi->s_md_lock);
4486         list_add(&new_entry->list, &handle->h_transaction->t_private_list);
4487         spin_unlock(&sbi->s_md_lock);
4488         ext4_unlock_group(sb, group);
4489         return 0;
4490 }
4491
4492 /*
4493  * Main entry point into mballoc to free blocks
4494  */
4495 void ext4_mb_free_blocks(handle_t *handle, struct inode *inode,
4496                         unsigned long block, unsigned long count,
4497                         int metadata, unsigned long *freed)
4498 {
4499         struct buffer_head *bitmap_bh = NULL;
4500         struct super_block *sb = inode->i_sb;
4501         struct ext4_allocation_context *ac = NULL;
4502         struct ext4_group_desc *gdp;
4503         struct ext4_super_block *es;
4504         unsigned long overflow;
4505         ext4_grpblk_t bit;
4506         struct buffer_head *gd_bh;
4507         ext4_group_t block_group;
4508         struct ext4_sb_info *sbi;
4509         struct ext4_buddy e4b;
4510         int err = 0;
4511         int ret;
4512
4513         *freed = 0;
4514
4515         sbi = EXT4_SB(sb);
4516         es = EXT4_SB(sb)->s_es;
4517         if (block < le32_to_cpu(es->s_first_data_block) ||
4518             block + count < block ||
4519             block + count > ext4_blocks_count(es)) {
4520                 ext4_error(sb, __func__,
4521                             "Freeing blocks not in datazone - "
4522                             "block = %lu, count = %lu", block, count);
4523                 goto error_return;
4524         }
4525
4526         ext4_debug("freeing block %lu\n", block);
4527
4528         ac = kmem_cache_alloc(ext4_ac_cachep, GFP_NOFS);
4529         if (ac) {
4530                 ac->ac_op = EXT4_MB_HISTORY_FREE;
4531                 ac->ac_inode = inode;
4532                 ac->ac_sb = sb;
4533         }
4534
4535 do_more:
4536         overflow = 0;
4537         ext4_get_group_no_and_offset(sb, block, &block_group, &bit);
4538
4539         /*
4540          * Check to see if we are freeing blocks across a group
4541          * boundary.
4542          */
4543         if (bit + count > EXT4_BLOCKS_PER_GROUP(sb)) {
4544                 overflow = bit + count - EXT4_BLOCKS_PER_GROUP(sb);
4545                 count -= overflow;
4546         }
4547         bitmap_bh = ext4_read_block_bitmap(sb, block_group);
4548         if (!bitmap_bh) {
4549                 err = -EIO;
4550                 goto error_return;
4551         }
4552         gdp = ext4_get_group_desc(sb, block_group, &gd_bh);
4553         if (!gdp) {
4554                 err = -EIO;
4555                 goto error_return;
4556         }
4557
4558         if (in_range(ext4_block_bitmap(sb, gdp), block, count) ||
4559             in_range(ext4_inode_bitmap(sb, gdp), block, count) ||
4560             in_range(block, ext4_inode_table(sb, gdp),
4561                       EXT4_SB(sb)->s_itb_per_group) ||
4562             in_range(block + count - 1, ext4_inode_table(sb, gdp),
4563                       EXT4_SB(sb)->s_itb_per_group)) {
4564
4565                 ext4_error(sb, __func__,
4566                            "Freeing blocks in system zone - "
4567                            "Block = %lu, count = %lu", block, count);
4568                 /* err = 0. ext4_std_error should be a no op */
4569                 goto error_return;
4570         }
4571
4572         BUFFER_TRACE(bitmap_bh, "getting write access");
4573         err = ext4_journal_get_write_access(handle, bitmap_bh);
4574         if (err)
4575                 goto error_return;
4576
4577         /*
4578          * We are about to modify some metadata.  Call the journal APIs
4579          * to unshare ->b_data if a currently-committing transaction is
4580          * using it
4581          */
4582         BUFFER_TRACE(gd_bh, "get_write_access");
4583         err = ext4_journal_get_write_access(handle, gd_bh);
4584         if (err)
4585                 goto error_return;
4586
4587         err = ext4_mb_load_buddy(sb, block_group, &e4b);
4588         if (err)
4589                 goto error_return;
4590
4591 #ifdef AGGRESSIVE_CHECK
4592         {
4593                 int i;
4594                 for (i = 0; i < count; i++)
4595                         BUG_ON(!mb_test_bit(bit + i, bitmap_bh->b_data));
4596         }
4597 #endif
4598         mb_clear_bits(sb_bgl_lock(sbi, block_group), bitmap_bh->b_data,
4599                         bit, count);
4600
4601         /* We dirtied the bitmap block */
4602         BUFFER_TRACE(bitmap_bh, "dirtied bitmap block");
4603         err = ext4_journal_dirty_metadata(handle, bitmap_bh);
4604
4605         if (ac) {
4606                 ac->ac_b_ex.fe_group = block_group;
4607                 ac->ac_b_ex.fe_start = bit;
4608                 ac->ac_b_ex.fe_len = count;
4609                 ext4_mb_store_history(ac);
4610         }
4611
4612         if (metadata) {
4613                 /* blocks being freed are metadata. these blocks shouldn't
4614                  * be used until this transaction is committed */
4615                 ext4_mb_free_metadata(handle, &e4b, block_group, bit, count);
4616         } else {
4617                 ext4_lock_group(sb, block_group);
4618                 mb_free_blocks(inode, &e4b, bit, count);
4619                 ext4_mb_return_to_preallocation(inode, &e4b, block, count);
4620                 ext4_unlock_group(sb, block_group);
4621         }
4622
4623         spin_lock(sb_bgl_lock(sbi, block_group));
4624         le16_add_cpu(&gdp->bg_free_blocks_count, count);
4625         gdp->bg_checksum = ext4_group_desc_csum(sbi, block_group, gdp);
4626         spin_unlock(sb_bgl_lock(sbi, block_group));
4627         percpu_counter_add(&sbi->s_freeblocks_counter, count);
4628
4629         if (sbi->s_log_groups_per_flex) {
4630                 ext4_group_t flex_group = ext4_flex_group(sbi, block_group);
4631                 spin_lock(sb_bgl_lock(sbi, flex_group));
4632                 sbi->s_flex_groups[flex_group].free_blocks += count;
4633                 spin_unlock(sb_bgl_lock(sbi, flex_group));
4634         }
4635
4636         ext4_mb_release_desc(&e4b);
4637
4638         *freed += count;
4639
4640         /* And the group descriptor block */
4641         BUFFER_TRACE(gd_bh, "dirtied group descriptor block");
4642         ret = ext4_journal_dirty_metadata(handle, gd_bh);
4643         if (!err)
4644                 err = ret;
4645
4646         if (overflow && !err) {
4647                 block += count;
4648                 count = overflow;
4649                 put_bh(bitmap_bh);
4650                 goto do_more;
4651         }
4652         sb->s_dirt = 1;
4653 error_return:
4654         brelse(bitmap_bh);
4655         ext4_std_error(sb, err);
4656         if (ac)
4657                 kmem_cache_free(ext4_ac_cachep, ac);
4658         return;
4659 }