2 * Copyright (c) 2003-2006, Cluster File Systems, Inc, info@clusterfs.com
3 * Written by Alex Tomas <alex@clusterfs.com>
5 * Architecture independence:
6 * Copyright (c) 2005, Bull S.A.
7 * Written by Pierre Peiffer <pierre.peiffer@bull.net>
9 * This program is free software; you can redistribute it and/or modify
10 * it under the terms of the GNU General Public License version 2 as
11 * published by the Free Software Foundation.
13 * This program is distributed in the hope that it will be useful,
14 * but WITHOUT ANY WARRANTY; without even the implied warranty of
15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
16 * GNU General Public License for more details.
18 * You should have received a copy of the GNU General Public Licens
19 * along with this program; if not, write to the Free Software
20 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-
24 * Extents support for EXT4
27 * - ext4*_error() should be used in some situations
28 * - analyze all BUG()/BUG_ON(), use -EIO where appropriate
29 * - smart tree reduction
32 #include <linux/module.h>
34 #include <linux/time.h>
35 #include <linux/ext4_jbd2.h>
36 #include <linux/jbd.h>
37 #include <linux/smp_lock.h>
38 #include <linux/highuid.h>
39 #include <linux/pagemap.h>
40 #include <linux/quotaops.h>
41 #include <linux/string.h>
42 #include <linux/slab.h>
43 #include <linux/ext4_fs_extents.h>
44 #include <asm/uaccess.h>
49 * combine low and high parts of physical block number into ext4_fsblk_t
51 static inline ext4_fsblk_t ext_pblock(struct ext4_extent *ex)
55 block = le32_to_cpu(ex->ee_start);
56 if (sizeof(ext4_fsblk_t) > 4)
57 block |= ((ext4_fsblk_t) le16_to_cpu(ex->ee_start_hi) << 31) << 1;
63 * combine low and high parts of a leaf physical block number into ext4_fsblk_t
65 static inline ext4_fsblk_t idx_pblock(struct ext4_extent_idx *ix)
69 block = le32_to_cpu(ix->ei_leaf);
70 if (sizeof(ext4_fsblk_t) > 4)
71 block |= ((ext4_fsblk_t) le16_to_cpu(ix->ei_leaf_hi) << 31) << 1;
76 * ext4_ext_store_pblock:
77 * stores a large physical block number into an extent struct,
78 * breaking it into parts
80 static inline void ext4_ext_store_pblock(struct ext4_extent *ex, ext4_fsblk_t pb)
82 ex->ee_start = cpu_to_le32((unsigned long) (pb & 0xffffffff));
83 if (sizeof(ext4_fsblk_t) > 4)
84 ex->ee_start_hi = cpu_to_le16((unsigned long) ((pb >> 31) >> 1) & 0xffff);
88 * ext4_idx_store_pblock:
89 * stores a large physical block number into an index struct,
90 * breaking it into parts
92 static inline void ext4_idx_store_pblock(struct ext4_extent_idx *ix, ext4_fsblk_t pb)
94 ix->ei_leaf = cpu_to_le32((unsigned long) (pb & 0xffffffff));
95 if (sizeof(ext4_fsblk_t) > 4)
96 ix->ei_leaf_hi = cpu_to_le16((unsigned long) ((pb >> 31) >> 1) & 0xffff);
99 static int ext4_ext_check_header(const char *function, struct inode *inode,
100 struct ext4_extent_header *eh)
102 const char *error_msg = NULL;
104 if (unlikely(eh->eh_magic != EXT4_EXT_MAGIC)) {
105 error_msg = "invalid magic";
108 if (unlikely(eh->eh_max == 0)) {
109 error_msg = "invalid eh_max";
112 if (unlikely(le16_to_cpu(eh->eh_entries) > le16_to_cpu(eh->eh_max))) {
113 error_msg = "invalid eh_entries";
119 ext4_error(inode->i_sb, function,
120 "bad header in inode #%lu: %s - magic %x, "
121 "entries %u, max %u, depth %u",
122 inode->i_ino, error_msg, le16_to_cpu(eh->eh_magic),
123 le16_to_cpu(eh->eh_entries), le16_to_cpu(eh->eh_max),
124 le16_to_cpu(eh->eh_depth));
129 static handle_t *ext4_ext_journal_restart(handle_t *handle, int needed)
133 if (handle->h_buffer_credits > needed)
135 if (!ext4_journal_extend(handle, needed))
137 err = ext4_journal_restart(handle, needed);
147 static int ext4_ext_get_access(handle_t *handle, struct inode *inode,
148 struct ext4_ext_path *path)
151 /* path points to block */
152 return ext4_journal_get_write_access(handle, path->p_bh);
154 /* path points to leaf/index in inode body */
155 /* we use in-core data, no need to protect them */
165 static int ext4_ext_dirty(handle_t *handle, struct inode *inode,
166 struct ext4_ext_path *path)
170 /* path points to block */
171 err = ext4_journal_dirty_metadata(handle, path->p_bh);
173 /* path points to leaf/index in inode body */
174 err = ext4_mark_inode_dirty(handle, inode);
179 static ext4_fsblk_t ext4_ext_find_goal(struct inode *inode,
180 struct ext4_ext_path *path,
183 struct ext4_inode_info *ei = EXT4_I(inode);
184 ext4_fsblk_t bg_start;
185 ext4_grpblk_t colour;
189 struct ext4_extent *ex;
190 depth = path->p_depth;
192 /* try to predict block placement */
193 if ((ex = path[depth].p_ext))
194 return ext_pblock(ex)+(block-le32_to_cpu(ex->ee_block));
196 /* it looks like index is empty;
197 * try to find starting block from index itself */
198 if (path[depth].p_bh)
199 return path[depth].p_bh->b_blocknr;
202 /* OK. use inode's group */
203 bg_start = (ei->i_block_group * EXT4_BLOCKS_PER_GROUP(inode->i_sb)) +
204 le32_to_cpu(EXT4_SB(inode->i_sb)->s_es->s_first_data_block);
205 colour = (current->pid % 16) *
206 (EXT4_BLOCKS_PER_GROUP(inode->i_sb) / 16);
207 return bg_start + colour + block;
211 ext4_ext_new_block(handle_t *handle, struct inode *inode,
212 struct ext4_ext_path *path,
213 struct ext4_extent *ex, int *err)
215 ext4_fsblk_t goal, newblock;
217 goal = ext4_ext_find_goal(inode, path, le32_to_cpu(ex->ee_block));
218 newblock = ext4_new_block(handle, inode, goal, err);
222 static inline int ext4_ext_space_block(struct inode *inode)
226 size = (inode->i_sb->s_blocksize - sizeof(struct ext4_extent_header))
227 / sizeof(struct ext4_extent);
228 #ifdef AGRESSIVE_TEST
235 static inline int ext4_ext_space_block_idx(struct inode *inode)
239 size = (inode->i_sb->s_blocksize - sizeof(struct ext4_extent_header))
240 / sizeof(struct ext4_extent_idx);
241 #ifdef AGRESSIVE_TEST
248 static inline int ext4_ext_space_root(struct inode *inode)
252 size = sizeof(EXT4_I(inode)->i_data);
253 size -= sizeof(struct ext4_extent_header);
254 size /= sizeof(struct ext4_extent);
255 #ifdef AGRESSIVE_TEST
262 static inline int ext4_ext_space_root_idx(struct inode *inode)
266 size = sizeof(EXT4_I(inode)->i_data);
267 size -= sizeof(struct ext4_extent_header);
268 size /= sizeof(struct ext4_extent_idx);
269 #ifdef AGRESSIVE_TEST
277 static void ext4_ext_show_path(struct inode *inode, struct ext4_ext_path *path)
279 int k, l = path->p_depth;
282 for (k = 0; k <= l; k++, path++) {
284 ext_debug(" %d->"E3FSBLK, le32_to_cpu(path->p_idx->ei_block),
285 idx_pblock(path->p_idx));
286 } else if (path->p_ext) {
287 ext_debug(" %d:%d:"E3FSBLK" ",
288 le32_to_cpu(path->p_ext->ee_block),
289 le16_to_cpu(path->p_ext->ee_len),
290 ext_pblock(path->p_ext));
297 static void ext4_ext_show_leaf(struct inode *inode, struct ext4_ext_path *path)
299 int depth = ext_depth(inode);
300 struct ext4_extent_header *eh;
301 struct ext4_extent *ex;
307 eh = path[depth].p_hdr;
308 ex = EXT_FIRST_EXTENT(eh);
310 for (i = 0; i < le16_to_cpu(eh->eh_entries); i++, ex++) {
311 ext_debug("%d:%d:"E3FSBLK" ", le32_to_cpu(ex->ee_block),
312 le16_to_cpu(ex->ee_len), ext_pblock(ex));
317 #define ext4_ext_show_path(inode,path)
318 #define ext4_ext_show_leaf(inode,path)
321 static void ext4_ext_drop_refs(struct ext4_ext_path *path)
323 int depth = path->p_depth;
326 for (i = 0; i <= depth; i++, path++)
334 * ext4_ext_binsearch_idx:
335 * binary search for the closest index of the given block
338 ext4_ext_binsearch_idx(struct inode *inode, struct ext4_ext_path *path, int block)
340 struct ext4_extent_header *eh = path->p_hdr;
341 struct ext4_extent_idx *r, *l, *m;
343 BUG_ON(eh->eh_magic != EXT4_EXT_MAGIC);
344 BUG_ON(le16_to_cpu(eh->eh_entries) > le16_to_cpu(eh->eh_max));
345 BUG_ON(le16_to_cpu(eh->eh_entries) <= 0);
347 ext_debug("binsearch for %d(idx): ", block);
349 l = EXT_FIRST_INDEX(eh) + 1;
350 r = EXT_FIRST_INDEX(eh) + le16_to_cpu(eh->eh_entries) - 1;
353 if (block < le32_to_cpu(m->ei_block))
357 ext_debug("%p(%u):%p(%u):%p(%u) ", l, l->ei_block,
358 m, m->ei_block, r, r->ei_block);
362 ext_debug(" -> %d->%lld ", le32_to_cpu(path->p_idx->ei_block),
363 idx_block(path->p_idx));
365 #ifdef CHECK_BINSEARCH
367 struct ext4_extent_idx *chix, *ix;
370 chix = ix = EXT_FIRST_INDEX(eh);
371 for (k = 0; k < le16_to_cpu(eh->eh_entries); k++, ix++) {
373 le32_to_cpu(ix->ei_block) <= le32_to_cpu(ix[-1].ei_block)) {
374 printk("k=%d, ix=0x%p, first=0x%p\n", k,
375 ix, EXT_FIRST_INDEX(eh));
377 le32_to_cpu(ix->ei_block),
378 le32_to_cpu(ix[-1].ei_block));
380 BUG_ON(k && le32_to_cpu(ix->ei_block)
381 <= le32_to_cpu(ix[-1].ei_block));
382 if (block < le32_to_cpu(ix->ei_block))
386 BUG_ON(chix != path->p_idx);
393 * ext4_ext_binsearch:
394 * binary search for closest extent of the given block
397 ext4_ext_binsearch(struct inode *inode, struct ext4_ext_path *path, int block)
399 struct ext4_extent_header *eh = path->p_hdr;
400 struct ext4_extent *r, *l, *m;
402 BUG_ON(eh->eh_magic != EXT4_EXT_MAGIC);
403 BUG_ON(le16_to_cpu(eh->eh_entries) > le16_to_cpu(eh->eh_max));
405 if (eh->eh_entries == 0) {
407 * this leaf is empty:
408 * we get such a leaf in split/add case
413 ext_debug("binsearch for %d: ", block);
415 l = EXT_FIRST_EXTENT(eh) + 1;
416 r = EXT_FIRST_EXTENT(eh) + le16_to_cpu(eh->eh_entries) - 1;
420 if (block < le32_to_cpu(m->ee_block))
424 ext_debug("%p(%u):%p(%u):%p(%u) ", l, l->ee_block,
425 m, m->ee_block, r, r->ee_block);
429 ext_debug(" -> %d:"E3FSBLK":%d ",
430 le32_to_cpu(path->p_ext->ee_block),
431 ext_pblock(path->p_ext),
432 le16_to_cpu(path->p_ext->ee_len));
434 #ifdef CHECK_BINSEARCH
436 struct ext4_extent *chex, *ex;
439 chex = ex = EXT_FIRST_EXTENT(eh);
440 for (k = 0; k < le16_to_cpu(eh->eh_entries); k++, ex++) {
441 BUG_ON(k && le32_to_cpu(ex->ee_block)
442 <= le32_to_cpu(ex[-1].ee_block));
443 if (block < le32_to_cpu(ex->ee_block))
447 BUG_ON(chex != path->p_ext);
453 int ext4_ext_tree_init(handle_t *handle, struct inode *inode)
455 struct ext4_extent_header *eh;
457 eh = ext_inode_hdr(inode);
460 eh->eh_magic = EXT4_EXT_MAGIC;
461 eh->eh_max = cpu_to_le16(ext4_ext_space_root(inode));
462 ext4_mark_inode_dirty(handle, inode);
463 ext4_ext_invalidate_cache(inode);
467 struct ext4_ext_path *
468 ext4_ext_find_extent(struct inode *inode, int block, struct ext4_ext_path *path)
470 struct ext4_extent_header *eh;
471 struct buffer_head *bh;
472 short int depth, i, ppos = 0, alloc = 0;
474 eh = ext_inode_hdr(inode);
476 if (ext4_ext_check_header(__FUNCTION__, inode, eh))
477 return ERR_PTR(-EIO);
479 i = depth = ext_depth(inode);
481 /* account possible depth increase */
483 path = kmalloc(sizeof(struct ext4_ext_path) * (depth + 2),
486 return ERR_PTR(-ENOMEM);
489 memset(path, 0, sizeof(struct ext4_ext_path) * (depth + 1));
492 /* walk through the tree */
494 ext_debug("depth %d: num %d, max %d\n",
495 ppos, le16_to_cpu(eh->eh_entries), le16_to_cpu(eh->eh_max));
496 ext4_ext_binsearch_idx(inode, path + ppos, block);
497 path[ppos].p_block = idx_pblock(path[ppos].p_idx);
498 path[ppos].p_depth = i;
499 path[ppos].p_ext = NULL;
501 bh = sb_bread(inode->i_sb, path[ppos].p_block);
505 eh = ext_block_hdr(bh);
507 BUG_ON(ppos > depth);
508 path[ppos].p_bh = bh;
509 path[ppos].p_hdr = eh;
512 if (ext4_ext_check_header(__FUNCTION__, inode, eh))
516 path[ppos].p_depth = i;
517 path[ppos].p_hdr = eh;
518 path[ppos].p_ext = NULL;
519 path[ppos].p_idx = NULL;
521 if (ext4_ext_check_header(__FUNCTION__, inode, eh))
525 ext4_ext_binsearch(inode, path + ppos, block);
527 ext4_ext_show_path(inode, path);
532 ext4_ext_drop_refs(path);
535 return ERR_PTR(-EIO);
539 * ext4_ext_insert_index:
540 * insert new index [@logical;@ptr] into the block at @curp;
541 * check where to insert: before @curp or after @curp
543 static int ext4_ext_insert_index(handle_t *handle, struct inode *inode,
544 struct ext4_ext_path *curp,
545 int logical, ext4_fsblk_t ptr)
547 struct ext4_extent_idx *ix;
550 if ((err = ext4_ext_get_access(handle, inode, curp)))
553 BUG_ON(logical == le32_to_cpu(curp->p_idx->ei_block));
554 len = EXT_MAX_INDEX(curp->p_hdr) - curp->p_idx;
555 if (logical > le32_to_cpu(curp->p_idx->ei_block)) {
557 if (curp->p_idx != EXT_LAST_INDEX(curp->p_hdr)) {
558 len = (len - 1) * sizeof(struct ext4_extent_idx);
559 len = len < 0 ? 0 : len;
560 ext_debug("insert new index %d after: %d. "
561 "move %d from 0x%p to 0x%p\n",
563 (curp->p_idx + 1), (curp->p_idx + 2));
564 memmove(curp->p_idx + 2, curp->p_idx + 1, len);
566 ix = curp->p_idx + 1;
569 len = len * sizeof(struct ext4_extent_idx);
570 len = len < 0 ? 0 : len;
571 ext_debug("insert new index %d before: %d. "
572 "move %d from 0x%p to 0x%p\n",
574 curp->p_idx, (curp->p_idx + 1));
575 memmove(curp->p_idx + 1, curp->p_idx, len);
579 ix->ei_block = cpu_to_le32(logical);
580 ext4_idx_store_pblock(ix, ptr);
581 curp->p_hdr->eh_entries = cpu_to_le16(le16_to_cpu(curp->p_hdr->eh_entries)+1);
583 BUG_ON(le16_to_cpu(curp->p_hdr->eh_entries)
584 > le16_to_cpu(curp->p_hdr->eh_max));
585 BUG_ON(ix > EXT_LAST_INDEX(curp->p_hdr));
587 err = ext4_ext_dirty(handle, inode, curp);
588 ext4_std_error(inode->i_sb, err);
595 * inserts new subtree into the path, using free index entry
597 * - allocates all needed blocks (new leaf and all intermediate index blocks)
598 * - makes decision where to split
599 * - moves remaining extents and index entries (right to the split point)
600 * into the newly allocated blocks
601 * - initializes subtree
603 static int ext4_ext_split(handle_t *handle, struct inode *inode,
604 struct ext4_ext_path *path,
605 struct ext4_extent *newext, int at)
607 struct buffer_head *bh = NULL;
608 int depth = ext_depth(inode);
609 struct ext4_extent_header *neh;
610 struct ext4_extent_idx *fidx;
611 struct ext4_extent *ex;
613 ext4_fsblk_t newblock, oldblock;
615 ext4_fsblk_t *ablocks = NULL; /* array of allocated blocks */
618 /* make decision: where to split? */
619 /* FIXME: now decision is simplest: at current extent */
621 /* if current leaf will be split, then we should use
622 * border from split point */
623 BUG_ON(path[depth].p_ext > EXT_MAX_EXTENT(path[depth].p_hdr));
624 if (path[depth].p_ext != EXT_MAX_EXTENT(path[depth].p_hdr)) {
625 border = path[depth].p_ext[1].ee_block;
626 ext_debug("leaf will be split."
627 " next leaf starts at %d\n",
628 le32_to_cpu(border));
630 border = newext->ee_block;
631 ext_debug("leaf will be added."
632 " next leaf starts at %d\n",
633 le32_to_cpu(border));
637 * If error occurs, then we break processing
638 * and mark filesystem read-only. index won't
639 * be inserted and tree will be in consistent
640 * state. Next mount will repair buffers too.
644 * Get array to track all allocated blocks.
645 * We need this to handle errors and free blocks
648 ablocks = kmalloc(sizeof(ext4_fsblk_t) * depth, GFP_NOFS);
651 memset(ablocks, 0, sizeof(ext4_fsblk_t) * depth);
653 /* allocate all needed blocks */
654 ext_debug("allocate %d blocks for indexes/leaf\n", depth - at);
655 for (a = 0; a < depth - at; a++) {
656 newblock = ext4_ext_new_block(handle, inode, path, newext, &err);
659 ablocks[a] = newblock;
662 /* initialize new leaf */
663 newblock = ablocks[--a];
664 BUG_ON(newblock == 0);
665 bh = sb_getblk(inode->i_sb, newblock);
672 if ((err = ext4_journal_get_create_access(handle, bh)))
675 neh = ext_block_hdr(bh);
677 neh->eh_max = cpu_to_le16(ext4_ext_space_block(inode));
678 neh->eh_magic = EXT4_EXT_MAGIC;
680 ex = EXT_FIRST_EXTENT(neh);
682 /* move remainder of path[depth] to the new leaf */
683 BUG_ON(path[depth].p_hdr->eh_entries != path[depth].p_hdr->eh_max);
684 /* start copy from next extent */
685 /* TODO: we could do it by single memmove */
688 while (path[depth].p_ext <=
689 EXT_MAX_EXTENT(path[depth].p_hdr)) {
690 ext_debug("move %d:"E3FSBLK":%d in new leaf "E3FSBLK"\n",
691 le32_to_cpu(path[depth].p_ext->ee_block),
692 ext_pblock(path[depth].p_ext),
693 le16_to_cpu(path[depth].p_ext->ee_len),
695 /*memmove(ex++, path[depth].p_ext++,
696 sizeof(struct ext4_extent));
702 memmove(ex, path[depth].p_ext-m, sizeof(struct ext4_extent)*m);
703 neh->eh_entries = cpu_to_le16(le16_to_cpu(neh->eh_entries)+m);
706 set_buffer_uptodate(bh);
709 if ((err = ext4_journal_dirty_metadata(handle, bh)))
714 /* correct old leaf */
716 if ((err = ext4_ext_get_access(handle, inode, path + depth)))
718 path[depth].p_hdr->eh_entries =
719 cpu_to_le16(le16_to_cpu(path[depth].p_hdr->eh_entries)-m);
720 if ((err = ext4_ext_dirty(handle, inode, path + depth)))
725 /* create intermediate indexes */
729 ext_debug("create %d intermediate indices\n", k);
730 /* insert new index into current index block */
731 /* current depth stored in i var */
735 newblock = ablocks[--a];
736 bh = sb_getblk(inode->i_sb, (ext4_fsblk_t)newblock);
743 if ((err = ext4_journal_get_create_access(handle, bh)))
746 neh = ext_block_hdr(bh);
747 neh->eh_entries = cpu_to_le16(1);
748 neh->eh_magic = EXT4_EXT_MAGIC;
749 neh->eh_max = cpu_to_le16(ext4_ext_space_block_idx(inode));
750 neh->eh_depth = cpu_to_le16(depth - i);
751 fidx = EXT_FIRST_INDEX(neh);
752 fidx->ei_block = border;
753 ext4_idx_store_pblock(fidx, oldblock);
755 ext_debug("int.index at %d (block "E3FSBLK"): %lu -> "E3FSBLK"\n", i,
756 newblock, (unsigned long) le32_to_cpu(border),
762 ext_debug("cur 0x%p, last 0x%p\n", path[i].p_idx,
763 EXT_MAX_INDEX(path[i].p_hdr));
764 BUG_ON(EXT_MAX_INDEX(path[i].p_hdr) !=
765 EXT_LAST_INDEX(path[i].p_hdr));
766 while (path[i].p_idx <= EXT_MAX_INDEX(path[i].p_hdr)) {
767 ext_debug("%d: move %d:%d in new index "E3FSBLK"\n", i,
768 le32_to_cpu(path[i].p_idx->ei_block),
769 idx_pblock(path[i].p_idx),
771 /*memmove(++fidx, path[i].p_idx++,
772 sizeof(struct ext4_extent_idx));
774 BUG_ON(neh->eh_entries > neh->eh_max);*/
779 memmove(++fidx, path[i].p_idx - m,
780 sizeof(struct ext4_extent_idx) * m);
782 cpu_to_le16(le16_to_cpu(neh->eh_entries) + m);
784 set_buffer_uptodate(bh);
787 if ((err = ext4_journal_dirty_metadata(handle, bh)))
792 /* correct old index */
794 err = ext4_ext_get_access(handle, inode, path + i);
797 path[i].p_hdr->eh_entries = cpu_to_le16(le16_to_cpu(path[i].p_hdr->eh_entries)-m);
798 err = ext4_ext_dirty(handle, inode, path + i);
806 /* insert new index */
810 err = ext4_ext_insert_index(handle, inode, path + at,
811 le32_to_cpu(border), newblock);
815 if (buffer_locked(bh))
821 /* free all allocated blocks in error case */
822 for (i = 0; i < depth; i++) {
825 ext4_free_blocks(handle, inode, ablocks[i], 1);
834 * ext4_ext_grow_indepth:
835 * implements tree growing procedure:
836 * - allocates new block
837 * - moves top-level data (index block or leaf) into the new block
838 * - initializes new top-level, creating index that points to the
841 static int ext4_ext_grow_indepth(handle_t *handle, struct inode *inode,
842 struct ext4_ext_path *path,
843 struct ext4_extent *newext)
845 struct ext4_ext_path *curp = path;
846 struct ext4_extent_header *neh;
847 struct ext4_extent_idx *fidx;
848 struct buffer_head *bh;
849 ext4_fsblk_t newblock;
852 newblock = ext4_ext_new_block(handle, inode, path, newext, &err);
856 bh = sb_getblk(inode->i_sb, newblock);
859 ext4_std_error(inode->i_sb, err);
864 if ((err = ext4_journal_get_create_access(handle, bh))) {
869 /* move top-level index/leaf into new block */
870 memmove(bh->b_data, curp->p_hdr, sizeof(EXT4_I(inode)->i_data));
872 /* set size of new block */
873 neh = ext_block_hdr(bh);
874 /* old root could have indexes or leaves
875 * so calculate e_max right way */
876 if (ext_depth(inode))
877 neh->eh_max = cpu_to_le16(ext4_ext_space_block_idx(inode));
879 neh->eh_max = cpu_to_le16(ext4_ext_space_block(inode));
880 neh->eh_magic = EXT4_EXT_MAGIC;
881 set_buffer_uptodate(bh);
884 if ((err = ext4_journal_dirty_metadata(handle, bh)))
887 /* create index in new top-level index: num,max,pointer */
888 if ((err = ext4_ext_get_access(handle, inode, curp)))
891 curp->p_hdr->eh_magic = EXT4_EXT_MAGIC;
892 curp->p_hdr->eh_max = cpu_to_le16(ext4_ext_space_root_idx(inode));
893 curp->p_hdr->eh_entries = cpu_to_le16(1);
894 curp->p_idx = EXT_FIRST_INDEX(curp->p_hdr);
895 /* FIXME: it works, but actually path[0] can be index */
896 curp->p_idx->ei_block = EXT_FIRST_EXTENT(path[0].p_hdr)->ee_block;
897 ext4_idx_store_pblock(curp->p_idx, newblock);
899 neh = ext_inode_hdr(inode);
900 fidx = EXT_FIRST_INDEX(neh);
901 ext_debug("new root: num %d(%d), lblock %d, ptr "E3FSBLK"\n",
902 le16_to_cpu(neh->eh_entries), le16_to_cpu(neh->eh_max),
903 le32_to_cpu(fidx->ei_block), idx_pblock(fidx));
905 neh->eh_depth = cpu_to_le16(path->p_depth + 1);
906 err = ext4_ext_dirty(handle, inode, curp);
914 * ext4_ext_create_new_leaf:
915 * finds empty index and adds new leaf.
916 * if no free index is found, then it requests in-depth growing.
918 static int ext4_ext_create_new_leaf(handle_t *handle, struct inode *inode,
919 struct ext4_ext_path *path,
920 struct ext4_extent *newext)
922 struct ext4_ext_path *curp;
923 int depth, i, err = 0;
926 i = depth = ext_depth(inode);
928 /* walk up to the tree and look for free index entry */
930 while (i > 0 && !EXT_HAS_FREE_INDEX(curp)) {
935 /* we use already allocated block for index block,
936 * so subsequent data blocks should be contiguous */
937 if (EXT_HAS_FREE_INDEX(curp)) {
938 /* if we found index with free entry, then use that
939 * entry: create all needed subtree and add new leaf */
940 err = ext4_ext_split(handle, inode, path, newext, i);
943 ext4_ext_drop_refs(path);
944 path = ext4_ext_find_extent(inode,
945 le32_to_cpu(newext->ee_block),
950 /* tree is full, time to grow in depth */
951 err = ext4_ext_grow_indepth(handle, inode, path, newext);
956 ext4_ext_drop_refs(path);
957 path = ext4_ext_find_extent(inode,
958 le32_to_cpu(newext->ee_block),
966 * only first (depth 0 -> 1) produces free space;
967 * in all other cases we have to split the grown tree
969 depth = ext_depth(inode);
970 if (path[depth].p_hdr->eh_entries == path[depth].p_hdr->eh_max) {
971 /* now we need to split */
981 * ext4_ext_next_allocated_block:
982 * returns allocated block in subsequent extent or EXT_MAX_BLOCK.
983 * NOTE: it considers block number from index entry as
984 * allocated block. Thus, index entries have to be consistent
988 ext4_ext_next_allocated_block(struct ext4_ext_path *path)
992 BUG_ON(path == NULL);
993 depth = path->p_depth;
995 if (depth == 0 && path->p_ext == NULL)
996 return EXT_MAX_BLOCK;
999 if (depth == path->p_depth) {
1001 if (path[depth].p_ext !=
1002 EXT_LAST_EXTENT(path[depth].p_hdr))
1003 return le32_to_cpu(path[depth].p_ext[1].ee_block);
1006 if (path[depth].p_idx !=
1007 EXT_LAST_INDEX(path[depth].p_hdr))
1008 return le32_to_cpu(path[depth].p_idx[1].ei_block);
1013 return EXT_MAX_BLOCK;
1017 * ext4_ext_next_leaf_block:
1018 * returns first allocated block from next leaf or EXT_MAX_BLOCK
1020 static unsigned ext4_ext_next_leaf_block(struct inode *inode,
1021 struct ext4_ext_path *path)
1025 BUG_ON(path == NULL);
1026 depth = path->p_depth;
1028 /* zero-tree has no leaf blocks at all */
1030 return EXT_MAX_BLOCK;
1032 /* go to index block */
1035 while (depth >= 0) {
1036 if (path[depth].p_idx !=
1037 EXT_LAST_INDEX(path[depth].p_hdr))
1038 return le32_to_cpu(path[depth].p_idx[1].ei_block);
1042 return EXT_MAX_BLOCK;
1046 * ext4_ext_correct_indexes:
1047 * if leaf gets modified and modified extent is first in the leaf,
1048 * then we have to correct all indexes above.
1049 * TODO: do we need to correct tree in all cases?
1051 int ext4_ext_correct_indexes(handle_t *handle, struct inode *inode,
1052 struct ext4_ext_path *path)
1054 struct ext4_extent_header *eh;
1055 int depth = ext_depth(inode);
1056 struct ext4_extent *ex;
1060 eh = path[depth].p_hdr;
1061 ex = path[depth].p_ext;
1066 /* there is no tree at all */
1070 if (ex != EXT_FIRST_EXTENT(eh)) {
1071 /* we correct tree if first leaf got modified only */
1076 * TODO: we need correction if border is smaller than current one
1079 border = path[depth].p_ext->ee_block;
1080 if ((err = ext4_ext_get_access(handle, inode, path + k)))
1082 path[k].p_idx->ei_block = border;
1083 if ((err = ext4_ext_dirty(handle, inode, path + k)))
1087 /* change all left-side indexes */
1088 if (path[k+1].p_idx != EXT_FIRST_INDEX(path[k+1].p_hdr))
1090 if ((err = ext4_ext_get_access(handle, inode, path + k)))
1092 path[k].p_idx->ei_block = border;
1093 if ((err = ext4_ext_dirty(handle, inode, path + k)))
1101 ext4_can_extents_be_merged(struct inode *inode, struct ext4_extent *ex1,
1102 struct ext4_extent *ex2)
1104 if (le32_to_cpu(ex1->ee_block) + le16_to_cpu(ex1->ee_len)
1105 != le32_to_cpu(ex2->ee_block))
1109 * To allow future support for preallocated extents to be added
1110 * as an RO_COMPAT feature, refuse to merge to extents if
1111 * this can result in the top bit of ee_len being set.
1113 if (le16_to_cpu(ex1->ee_len) + le16_to_cpu(ex2->ee_len) > EXT_MAX_LEN)
1115 #ifdef AGRESSIVE_TEST
1116 if (le16_to_cpu(ex1->ee_len) >= 4)
1120 if (ext_pblock(ex1) + le16_to_cpu(ex1->ee_len) == ext_pblock(ex2))
1126 * ext4_ext_insert_extent:
1127 * tries to merge requsted extent into the existing extent or
1128 * inserts requested extent as new one into the tree,
1129 * creating new leaf in the no-space case.
1131 int ext4_ext_insert_extent(handle_t *handle, struct inode *inode,
1132 struct ext4_ext_path *path,
1133 struct ext4_extent *newext)
1135 struct ext4_extent_header * eh;
1136 struct ext4_extent *ex, *fex;
1137 struct ext4_extent *nearex; /* nearest extent */
1138 struct ext4_ext_path *npath = NULL;
1139 int depth, len, err, next;
1141 BUG_ON(newext->ee_len == 0);
1142 depth = ext_depth(inode);
1143 ex = path[depth].p_ext;
1144 BUG_ON(path[depth].p_hdr == NULL);
1146 /* try to insert block into found extent and return */
1147 if (ex && ext4_can_extents_be_merged(inode, ex, newext)) {
1148 ext_debug("append %d block to %d:%d (from "E3FSBLK")\n",
1149 le16_to_cpu(newext->ee_len),
1150 le32_to_cpu(ex->ee_block),
1151 le16_to_cpu(ex->ee_len), ext_pblock(ex));
1152 if ((err = ext4_ext_get_access(handle, inode, path + depth)))
1154 ex->ee_len = cpu_to_le16(le16_to_cpu(ex->ee_len)
1155 + le16_to_cpu(newext->ee_len));
1156 eh = path[depth].p_hdr;
1162 depth = ext_depth(inode);
1163 eh = path[depth].p_hdr;
1164 if (le16_to_cpu(eh->eh_entries) < le16_to_cpu(eh->eh_max))
1167 /* probably next leaf has space for us? */
1168 fex = EXT_LAST_EXTENT(eh);
1169 next = ext4_ext_next_leaf_block(inode, path);
1170 if (le32_to_cpu(newext->ee_block) > le32_to_cpu(fex->ee_block)
1171 && next != EXT_MAX_BLOCK) {
1172 ext_debug("next leaf block - %d\n", next);
1173 BUG_ON(npath != NULL);
1174 npath = ext4_ext_find_extent(inode, next, NULL);
1176 return PTR_ERR(npath);
1177 BUG_ON(npath->p_depth != path->p_depth);
1178 eh = npath[depth].p_hdr;
1179 if (le16_to_cpu(eh->eh_entries) < le16_to_cpu(eh->eh_max)) {
1180 ext_debug("next leaf isnt full(%d)\n",
1181 le16_to_cpu(eh->eh_entries));
1185 ext_debug("next leaf has no free space(%d,%d)\n",
1186 le16_to_cpu(eh->eh_entries), le16_to_cpu(eh->eh_max));
1190 * There is no free space in the found leaf.
1191 * We're gonna add a new leaf in the tree.
1193 err = ext4_ext_create_new_leaf(handle, inode, path, newext);
1196 depth = ext_depth(inode);
1197 eh = path[depth].p_hdr;
1200 nearex = path[depth].p_ext;
1202 if ((err = ext4_ext_get_access(handle, inode, path + depth)))
1206 /* there is no extent in this leaf, create first one */
1207 ext_debug("first extent in the leaf: %d:"E3FSBLK":%d\n",
1208 le32_to_cpu(newext->ee_block),
1210 le16_to_cpu(newext->ee_len));
1211 path[depth].p_ext = EXT_FIRST_EXTENT(eh);
1212 } else if (le32_to_cpu(newext->ee_block)
1213 > le32_to_cpu(nearex->ee_block)) {
1214 /* BUG_ON(newext->ee_block == nearex->ee_block); */
1215 if (nearex != EXT_LAST_EXTENT(eh)) {
1216 len = EXT_MAX_EXTENT(eh) - nearex;
1217 len = (len - 1) * sizeof(struct ext4_extent);
1218 len = len < 0 ? 0 : len;
1219 ext_debug("insert %d:"E3FSBLK":%d after: nearest 0x%p, "
1220 "move %d from 0x%p to 0x%p\n",
1221 le32_to_cpu(newext->ee_block),
1223 le16_to_cpu(newext->ee_len),
1224 nearex, len, nearex + 1, nearex + 2);
1225 memmove(nearex + 2, nearex + 1, len);
1227 path[depth].p_ext = nearex + 1;
1229 BUG_ON(newext->ee_block == nearex->ee_block);
1230 len = (EXT_MAX_EXTENT(eh) - nearex) * sizeof(struct ext4_extent);
1231 len = len < 0 ? 0 : len;
1232 ext_debug("insert %d:"E3FSBLK":%d before: nearest 0x%p, "
1233 "move %d from 0x%p to 0x%p\n",
1234 le32_to_cpu(newext->ee_block),
1236 le16_to_cpu(newext->ee_len),
1237 nearex, len, nearex + 1, nearex + 2);
1238 memmove(nearex + 1, nearex, len);
1239 path[depth].p_ext = nearex;
1242 eh->eh_entries = cpu_to_le16(le16_to_cpu(eh->eh_entries)+1);
1243 nearex = path[depth].p_ext;
1244 nearex->ee_block = newext->ee_block;
1245 nearex->ee_start = newext->ee_start;
1246 nearex->ee_start_hi = newext->ee_start_hi;
1247 nearex->ee_len = newext->ee_len;
1250 /* try to merge extents to the right */
1251 while (nearex < EXT_LAST_EXTENT(eh)) {
1252 if (!ext4_can_extents_be_merged(inode, nearex, nearex + 1))
1254 /* merge with next extent! */
1255 nearex->ee_len = cpu_to_le16(le16_to_cpu(nearex->ee_len)
1256 + le16_to_cpu(nearex[1].ee_len));
1257 if (nearex + 1 < EXT_LAST_EXTENT(eh)) {
1258 len = (EXT_LAST_EXTENT(eh) - nearex - 1)
1259 * sizeof(struct ext4_extent);
1260 memmove(nearex + 1, nearex + 2, len);
1262 eh->eh_entries = cpu_to_le16(le16_to_cpu(eh->eh_entries)-1);
1263 BUG_ON(eh->eh_entries == 0);
1266 /* try to merge extents to the left */
1268 /* time to correct all indexes above */
1269 err = ext4_ext_correct_indexes(handle, inode, path);
1273 err = ext4_ext_dirty(handle, inode, path + depth);
1277 ext4_ext_drop_refs(npath);
1280 ext4_ext_tree_changed(inode);
1281 ext4_ext_invalidate_cache(inode);
1285 int ext4_ext_walk_space(struct inode *inode, unsigned long block,
1286 unsigned long num, ext_prepare_callback func,
1289 struct ext4_ext_path *path = NULL;
1290 struct ext4_ext_cache cbex;
1291 struct ext4_extent *ex;
1292 unsigned long next, start = 0, end = 0;
1293 unsigned long last = block + num;
1294 int depth, exists, err = 0;
1296 BUG_ON(func == NULL);
1297 BUG_ON(inode == NULL);
1299 while (block < last && block != EXT_MAX_BLOCK) {
1301 /* find extent for this block */
1302 path = ext4_ext_find_extent(inode, block, path);
1304 err = PTR_ERR(path);
1309 depth = ext_depth(inode);
1310 BUG_ON(path[depth].p_hdr == NULL);
1311 ex = path[depth].p_ext;
1312 next = ext4_ext_next_allocated_block(path);
1316 /* there is no extent yet, so try to allocate
1317 * all requested space */
1320 } else if (le32_to_cpu(ex->ee_block) > block) {
1321 /* need to allocate space before found extent */
1323 end = le32_to_cpu(ex->ee_block);
1324 if (block + num < end)
1327 le32_to_cpu(ex->ee_block) + le16_to_cpu(ex->ee_len)) {
1328 /* need to allocate space after found extent */
1333 } else if (block >= le32_to_cpu(ex->ee_block)) {
1335 * some part of requested space is covered
1339 end = le32_to_cpu(ex->ee_block) + le16_to_cpu(ex->ee_len);
1340 if (block + num < end)
1346 BUG_ON(end <= start);
1349 cbex.ec_block = start;
1350 cbex.ec_len = end - start;
1352 cbex.ec_type = EXT4_EXT_CACHE_GAP;
1354 cbex.ec_block = le32_to_cpu(ex->ee_block);
1355 cbex.ec_len = le16_to_cpu(ex->ee_len);
1356 cbex.ec_start = ext_pblock(ex);
1357 cbex.ec_type = EXT4_EXT_CACHE_EXTENT;
1360 BUG_ON(cbex.ec_len == 0);
1361 err = func(inode, path, &cbex, cbdata);
1362 ext4_ext_drop_refs(path);
1366 if (err == EXT_REPEAT)
1368 else if (err == EXT_BREAK) {
1373 if (ext_depth(inode) != depth) {
1374 /* depth was changed. we have to realloc path */
1379 block = cbex.ec_block + cbex.ec_len;
1383 ext4_ext_drop_refs(path);
1391 ext4_ext_put_in_cache(struct inode *inode, __u32 block,
1392 __u32 len, __u32 start, int type)
1394 struct ext4_ext_cache *cex;
1396 cex = &EXT4_I(inode)->i_cached_extent;
1397 cex->ec_type = type;
1398 cex->ec_block = block;
1400 cex->ec_start = start;
1404 * ext4_ext_put_gap_in_cache:
1405 * calculate boundaries of the gap that the requested block fits into
1406 * and cache this gap
1409 ext4_ext_put_gap_in_cache(struct inode *inode, struct ext4_ext_path *path,
1410 unsigned long block)
1412 int depth = ext_depth(inode);
1413 unsigned long lblock, len;
1414 struct ext4_extent *ex;
1416 ex = path[depth].p_ext;
1418 /* there is no extent yet, so gap is [0;-] */
1420 len = EXT_MAX_BLOCK;
1421 ext_debug("cache gap(whole file):");
1422 } else if (block < le32_to_cpu(ex->ee_block)) {
1424 len = le32_to_cpu(ex->ee_block) - block;
1425 ext_debug("cache gap(before): %lu [%lu:%lu]",
1426 (unsigned long) block,
1427 (unsigned long) le32_to_cpu(ex->ee_block),
1428 (unsigned long) le16_to_cpu(ex->ee_len));
1429 } else if (block >= le32_to_cpu(ex->ee_block)
1430 + le16_to_cpu(ex->ee_len)) {
1431 lblock = le32_to_cpu(ex->ee_block)
1432 + le16_to_cpu(ex->ee_len);
1433 len = ext4_ext_next_allocated_block(path);
1434 ext_debug("cache gap(after): [%lu:%lu] %lu",
1435 (unsigned long) le32_to_cpu(ex->ee_block),
1436 (unsigned long) le16_to_cpu(ex->ee_len),
1437 (unsigned long) block);
1438 BUG_ON(len == lblock);
1445 ext_debug(" -> %lu:%lu\n", (unsigned long) lblock, len);
1446 ext4_ext_put_in_cache(inode, lblock, len, 0, EXT4_EXT_CACHE_GAP);
1450 ext4_ext_in_cache(struct inode *inode, unsigned long block,
1451 struct ext4_extent *ex)
1453 struct ext4_ext_cache *cex;
1455 cex = &EXT4_I(inode)->i_cached_extent;
1457 /* has cache valid data? */
1458 if (cex->ec_type == EXT4_EXT_CACHE_NO)
1459 return EXT4_EXT_CACHE_NO;
1461 BUG_ON(cex->ec_type != EXT4_EXT_CACHE_GAP &&
1462 cex->ec_type != EXT4_EXT_CACHE_EXTENT);
1463 if (block >= cex->ec_block && block < cex->ec_block + cex->ec_len) {
1464 ex->ee_block = cpu_to_le32(cex->ec_block);
1465 ext4_ext_store_pblock(ex, cex->ec_start);
1466 ex->ee_len = cpu_to_le16(cex->ec_len);
1467 ext_debug("%lu cached by %lu:%lu:"E3FSBLK"\n",
1468 (unsigned long) block,
1469 (unsigned long) cex->ec_block,
1470 (unsigned long) cex->ec_len,
1472 return cex->ec_type;
1476 return EXT4_EXT_CACHE_NO;
1481 * removes index from the index block.
1482 * It's used in truncate case only, thus all requests are for
1483 * last index in the block only.
1485 int ext4_ext_rm_idx(handle_t *handle, struct inode *inode,
1486 struct ext4_ext_path *path)
1488 struct buffer_head *bh;
1492 /* free index block */
1494 leaf = idx_pblock(path->p_idx);
1495 BUG_ON(path->p_hdr->eh_entries == 0);
1496 if ((err = ext4_ext_get_access(handle, inode, path)))
1498 path->p_hdr->eh_entries = cpu_to_le16(le16_to_cpu(path->p_hdr->eh_entries)-1);
1499 if ((err = ext4_ext_dirty(handle, inode, path)))
1501 ext_debug("index is empty, remove it, free block "E3FSBLK"\n", leaf);
1502 bh = sb_find_get_block(inode->i_sb, leaf);
1503 ext4_forget(handle, 1, inode, bh, leaf);
1504 ext4_free_blocks(handle, inode, leaf, 1);
1509 * ext4_ext_calc_credits_for_insert:
1510 * This routine returns max. credits that the extent tree can consume.
1511 * It should be OK for low-performance paths like ->writepage()
1512 * To allow many writing processes to fit into a single transaction,
1513 * the caller should calculate credits under truncate_mutex and
1514 * pass the actual path.
1516 int inline ext4_ext_calc_credits_for_insert(struct inode *inode,
1517 struct ext4_ext_path *path)
1522 /* probably there is space in leaf? */
1523 depth = ext_depth(inode);
1524 if (le16_to_cpu(path[depth].p_hdr->eh_entries)
1525 < le16_to_cpu(path[depth].p_hdr->eh_max))
1530 * given 32-bit logical block (4294967296 blocks), max. tree
1531 * can be 4 levels in depth -- 4 * 340^4 == 53453440000.
1532 * Let's also add one more level for imbalance.
1536 /* allocation of new data block(s) */
1540 * tree can be full, so it would need to grow in depth:
1541 * allocation + old root + new root
1543 needed += 2 + 1 + 1;
1546 * Index split can happen, we would need:
1547 * allocate intermediate indexes (bitmap + group)
1548 * + change two blocks at each level, but root (already included)
1550 needed = (depth * 2) + (depth * 2);
1552 /* any allocation modifies superblock */
1558 static int ext4_remove_blocks(handle_t *handle, struct inode *inode,
1559 struct ext4_extent *ex,
1560 unsigned long from, unsigned long to)
1562 struct buffer_head *bh;
1565 #ifdef EXTENTS_STATS
1567 struct ext4_sb_info *sbi = EXT4_SB(inode->i_sb);
1568 unsigned short ee_len = le16_to_cpu(ex->ee_len);
1569 spin_lock(&sbi->s_ext_stats_lock);
1570 sbi->s_ext_blocks += ee_len;
1571 sbi->s_ext_extents++;
1572 if (ee_len < sbi->s_ext_min)
1573 sbi->s_ext_min = ee_len;
1574 if (ee_len > sbi->s_ext_max)
1575 sbi->s_ext_max = ee_len;
1576 if (ext_depth(inode) > sbi->s_depth_max)
1577 sbi->s_depth_max = ext_depth(inode);
1578 spin_unlock(&sbi->s_ext_stats_lock);
1581 if (from >= le32_to_cpu(ex->ee_block)
1582 && to == le32_to_cpu(ex->ee_block) + le16_to_cpu(ex->ee_len) - 1) {
1586 num = le32_to_cpu(ex->ee_block) + le16_to_cpu(ex->ee_len) - from;
1587 start = ext_pblock(ex) + le16_to_cpu(ex->ee_len) - num;
1588 ext_debug("free last %lu blocks starting "E3FSBLK"\n", num, start);
1589 for (i = 0; i < num; i++) {
1590 bh = sb_find_get_block(inode->i_sb, start + i);
1591 ext4_forget(handle, 0, inode, bh, start + i);
1593 ext4_free_blocks(handle, inode, start, num);
1594 } else if (from == le32_to_cpu(ex->ee_block)
1595 && to <= le32_to_cpu(ex->ee_block) + le16_to_cpu(ex->ee_len) - 1) {
1596 printk("strange request: removal %lu-%lu from %u:%u\n",
1597 from, to, le32_to_cpu(ex->ee_block), le16_to_cpu(ex->ee_len));
1599 printk("strange request: removal(2) %lu-%lu from %u:%u\n",
1600 from, to, le32_to_cpu(ex->ee_block), le16_to_cpu(ex->ee_len));
1606 ext4_ext_rm_leaf(handle_t *handle, struct inode *inode,
1607 struct ext4_ext_path *path, unsigned long start)
1609 int err = 0, correct_index = 0;
1610 int depth = ext_depth(inode), credits;
1611 struct ext4_extent_header *eh;
1612 unsigned a, b, block, num;
1613 unsigned long ex_ee_block;
1614 unsigned short ex_ee_len;
1615 struct ext4_extent *ex;
1617 ext_debug("truncate since %lu in leaf\n", start);
1618 if (!path[depth].p_hdr)
1619 path[depth].p_hdr = ext_block_hdr(path[depth].p_bh);
1620 eh = path[depth].p_hdr;
1622 BUG_ON(le16_to_cpu(eh->eh_entries) > le16_to_cpu(eh->eh_max));
1623 BUG_ON(eh->eh_magic != EXT4_EXT_MAGIC);
1625 /* find where to start removing */
1626 ex = EXT_LAST_EXTENT(eh);
1628 ex_ee_block = le32_to_cpu(ex->ee_block);
1629 ex_ee_len = le16_to_cpu(ex->ee_len);
1631 while (ex >= EXT_FIRST_EXTENT(eh) &&
1632 ex_ee_block + ex_ee_len > start) {
1633 ext_debug("remove ext %lu:%u\n", ex_ee_block, ex_ee_len);
1634 path[depth].p_ext = ex;
1636 a = ex_ee_block > start ? ex_ee_block : start;
1637 b = ex_ee_block + ex_ee_len - 1 < EXT_MAX_BLOCK ?
1638 ex_ee_block + ex_ee_len - 1 : EXT_MAX_BLOCK;
1640 ext_debug(" border %u:%u\n", a, b);
1642 if (a != ex_ee_block && b != ex_ee_block + ex_ee_len - 1) {
1646 } else if (a != ex_ee_block) {
1647 /* remove tail of the extent */
1648 block = ex_ee_block;
1650 } else if (b != ex_ee_block + ex_ee_len - 1) {
1651 /* remove head of the extent */
1654 /* there is no "make a hole" API yet */
1657 /* remove whole extent: excellent! */
1658 block = ex_ee_block;
1660 BUG_ON(a != ex_ee_block);
1661 BUG_ON(b != ex_ee_block + ex_ee_len - 1);
1664 /* at present, extent can't cross block group: */
1665 /* leaf + bitmap + group desc + sb + inode */
1667 if (ex == EXT_FIRST_EXTENT(eh)) {
1669 credits += (ext_depth(inode)) + 1;
1672 credits += 2 * EXT4_QUOTA_TRANS_BLOCKS(inode->i_sb);
1675 handle = ext4_ext_journal_restart(handle, credits);
1676 if (IS_ERR(handle)) {
1677 err = PTR_ERR(handle);
1681 err = ext4_ext_get_access(handle, inode, path + depth);
1685 err = ext4_remove_blocks(handle, inode, ex, a, b);
1690 /* this extent is removed; mark slot entirely unused */
1691 ext4_ext_store_pblock(ex, 0);
1692 eh->eh_entries = cpu_to_le16(le16_to_cpu(eh->eh_entries)-1);
1695 ex->ee_block = cpu_to_le32(block);
1696 ex->ee_len = cpu_to_le16(num);
1698 err = ext4_ext_dirty(handle, inode, path + depth);
1702 ext_debug("new extent: %u:%u:"E3FSBLK"\n", block, num,
1705 ex_ee_block = le32_to_cpu(ex->ee_block);
1706 ex_ee_len = le16_to_cpu(ex->ee_len);
1709 if (correct_index && eh->eh_entries)
1710 err = ext4_ext_correct_indexes(handle, inode, path);
1712 /* if this leaf is free, then we should
1713 * remove it from index block above */
1714 if (err == 0 && eh->eh_entries == 0 && path[depth].p_bh != NULL)
1715 err = ext4_ext_rm_idx(handle, inode, path + depth);
1722 * ext4_ext_more_to_rm:
1723 * returns 1 if current index has to be freed (even partial)
1726 ext4_ext_more_to_rm(struct ext4_ext_path *path)
1728 BUG_ON(path->p_idx == NULL);
1730 if (path->p_idx < EXT_FIRST_INDEX(path->p_hdr))
1734 * if truncate on deeper level happened, it wasn't partial,
1735 * so we have to consider current index for truncation
1737 if (le16_to_cpu(path->p_hdr->eh_entries) == path->p_block)
1742 int ext4_ext_remove_space(struct inode *inode, unsigned long start)
1744 struct super_block *sb = inode->i_sb;
1745 int depth = ext_depth(inode);
1746 struct ext4_ext_path *path;
1750 ext_debug("truncate since %lu\n", start);
1752 /* probably first extent we're gonna free will be last in block */
1753 handle = ext4_journal_start(inode, depth + 1);
1755 return PTR_ERR(handle);
1757 ext4_ext_invalidate_cache(inode);
1760 * We start scanning from right side, freeing all the blocks
1761 * after i_size and walking into the tree depth-wise.
1763 path = kmalloc(sizeof(struct ext4_ext_path) * (depth + 1), GFP_KERNEL);
1765 ext4_journal_stop(handle);
1768 memset(path, 0, sizeof(struct ext4_ext_path) * (depth + 1));
1769 path[0].p_hdr = ext_inode_hdr(inode);
1770 if (ext4_ext_check_header(__FUNCTION__, inode, path[0].p_hdr)) {
1774 path[0].p_depth = depth;
1776 while (i >= 0 && err == 0) {
1778 /* this is leaf block */
1779 err = ext4_ext_rm_leaf(handle, inode, path, start);
1780 /* root level has p_bh == NULL, brelse() eats this */
1781 brelse(path[i].p_bh);
1782 path[i].p_bh = NULL;
1787 /* this is index block */
1788 if (!path[i].p_hdr) {
1789 ext_debug("initialize header\n");
1790 path[i].p_hdr = ext_block_hdr(path[i].p_bh);
1791 if (ext4_ext_check_header(__FUNCTION__, inode,
1798 BUG_ON(le16_to_cpu(path[i].p_hdr->eh_entries)
1799 > le16_to_cpu(path[i].p_hdr->eh_max));
1800 BUG_ON(path[i].p_hdr->eh_magic != EXT4_EXT_MAGIC);
1802 if (!path[i].p_idx) {
1803 /* this level hasn't been touched yet */
1804 path[i].p_idx = EXT_LAST_INDEX(path[i].p_hdr);
1805 path[i].p_block = le16_to_cpu(path[i].p_hdr->eh_entries)+1;
1806 ext_debug("init index ptr: hdr 0x%p, num %d\n",
1808 le16_to_cpu(path[i].p_hdr->eh_entries));
1810 /* we were already here, see at next index */
1814 ext_debug("level %d - index, first 0x%p, cur 0x%p\n",
1815 i, EXT_FIRST_INDEX(path[i].p_hdr),
1817 if (ext4_ext_more_to_rm(path + i)) {
1818 /* go to the next level */
1819 ext_debug("move to level %d (block "E3FSBLK")\n",
1820 i + 1, idx_pblock(path[i].p_idx));
1821 memset(path + i + 1, 0, sizeof(*path));
1823 sb_bread(sb, idx_pblock(path[i].p_idx));
1824 if (!path[i+1].p_bh) {
1825 /* should we reset i_size? */
1830 /* save actual number of indexes since this
1831 * number is changed at the next iteration */
1832 path[i].p_block = le16_to_cpu(path[i].p_hdr->eh_entries);
1835 /* we finished processing this index, go up */
1836 if (path[i].p_hdr->eh_entries == 0 && i > 0) {
1837 /* index is empty, remove it;
1838 * handle must be already prepared by the
1839 * truncatei_leaf() */
1840 err = ext4_ext_rm_idx(handle, inode, path + i);
1842 /* root level has p_bh == NULL, brelse() eats this */
1843 brelse(path[i].p_bh);
1844 path[i].p_bh = NULL;
1846 ext_debug("return to level %d\n", i);
1850 /* TODO: flexible tree reduction should be here */
1851 if (path->p_hdr->eh_entries == 0) {
1853 * truncate to zero freed all the tree,
1854 * so we need to correct eh_depth
1856 err = ext4_ext_get_access(handle, inode, path);
1858 ext_inode_hdr(inode)->eh_depth = 0;
1859 ext_inode_hdr(inode)->eh_max =
1860 cpu_to_le16(ext4_ext_space_root(inode));
1861 err = ext4_ext_dirty(handle, inode, path);
1865 ext4_ext_tree_changed(inode);
1866 ext4_ext_drop_refs(path);
1868 ext4_journal_stop(handle);
1874 * called at mount time
1876 void ext4_ext_init(struct super_block *sb)
1879 * possible initialization would be here
1882 if (test_opt(sb, EXTENTS)) {
1883 printk("EXT4-fs: file extents enabled");
1884 #ifdef AGRESSIVE_TEST
1885 printk(", agressive tests");
1887 #ifdef CHECK_BINSEARCH
1888 printk(", check binsearch");
1890 #ifdef EXTENTS_STATS
1894 #ifdef EXTENTS_STATS
1895 spin_lock_init(&EXT4_SB(sb)->s_ext_stats_lock);
1896 EXT4_SB(sb)->s_ext_min = 1 << 30;
1897 EXT4_SB(sb)->s_ext_max = 0;
1903 * called at umount time
1905 void ext4_ext_release(struct super_block *sb)
1907 if (!test_opt(sb, EXTENTS))
1910 #ifdef EXTENTS_STATS
1911 if (EXT4_SB(sb)->s_ext_blocks && EXT4_SB(sb)->s_ext_extents) {
1912 struct ext4_sb_info *sbi = EXT4_SB(sb);
1913 printk(KERN_ERR "EXT4-fs: %lu blocks in %lu extents (%lu ave)\n",
1914 sbi->s_ext_blocks, sbi->s_ext_extents,
1915 sbi->s_ext_blocks / sbi->s_ext_extents);
1916 printk(KERN_ERR "EXT4-fs: extents: %lu min, %lu max, max depth %lu\n",
1917 sbi->s_ext_min, sbi->s_ext_max, sbi->s_depth_max);
1922 int ext4_ext_get_blocks(handle_t *handle, struct inode *inode,
1923 ext4_fsblk_t iblock,
1924 unsigned long max_blocks, struct buffer_head *bh_result,
1925 int create, int extend_disksize)
1927 struct ext4_ext_path *path = NULL;
1928 struct ext4_extent newex, *ex;
1929 ext4_fsblk_t goal, newblock;
1931 unsigned long allocated = 0;
1933 __clear_bit(BH_New, &bh_result->b_state);
1934 ext_debug("blocks %d/%lu requested for inode %u\n", (int) iblock,
1935 max_blocks, (unsigned) inode->i_ino);
1936 mutex_lock(&EXT4_I(inode)->truncate_mutex);
1938 /* check in cache */
1939 if ((goal = ext4_ext_in_cache(inode, iblock, &newex))) {
1940 if (goal == EXT4_EXT_CACHE_GAP) {
1942 /* block isn't allocated yet and
1943 * user doesn't want to allocate it */
1946 /* we should allocate requested block */
1947 } else if (goal == EXT4_EXT_CACHE_EXTENT) {
1948 /* block is already allocated */
1950 - le32_to_cpu(newex.ee_block)
1951 + ext_pblock(&newex);
1952 /* number of remaining blocks in the extent */
1953 allocated = le16_to_cpu(newex.ee_len) -
1954 (iblock - le32_to_cpu(newex.ee_block));
1961 /* find extent for this block */
1962 path = ext4_ext_find_extent(inode, iblock, NULL);
1964 err = PTR_ERR(path);
1969 depth = ext_depth(inode);
1972 * consistent leaf must not be empty;
1973 * this situation is possible, though, _during_ tree modification;
1974 * this is why assert can't be put in ext4_ext_find_extent()
1976 BUG_ON(path[depth].p_ext == NULL && depth != 0);
1978 if ((ex = path[depth].p_ext)) {
1979 unsigned long ee_block = le32_to_cpu(ex->ee_block);
1980 ext4_fsblk_t ee_start = ext_pblock(ex);
1981 unsigned short ee_len = le16_to_cpu(ex->ee_len);
1984 * Allow future support for preallocated extents to be added
1985 * as an RO_COMPAT feature:
1986 * Uninitialized extents are treated as holes, except that
1987 * we avoid (fail) allocating new blocks during a write.
1989 if (ee_len > EXT_MAX_LEN)
1991 /* if found extent covers block, simply return it */
1992 if (iblock >= ee_block && iblock < ee_block + ee_len) {
1993 newblock = iblock - ee_block + ee_start;
1994 /* number of remaining blocks in the extent */
1995 allocated = ee_len - (iblock - ee_block);
1996 ext_debug("%d fit into %lu:%d -> "E3FSBLK"\n", (int) iblock,
1997 ee_block, ee_len, newblock);
1998 ext4_ext_put_in_cache(inode, ee_block, ee_len,
1999 ee_start, EXT4_EXT_CACHE_EXTENT);
2005 * requested block isn't allocated yet;
2006 * we couldn't try to create block if create flag is zero
2009 /* put just found gap into cache to speed up
2010 * subsequent requests */
2011 ext4_ext_put_gap_in_cache(inode, path, iblock);
2015 * Okay, we need to do block allocation. Lazily initialize the block
2016 * allocation info here if necessary.
2018 if (S_ISREG(inode->i_mode) && (!EXT4_I(inode)->i_block_alloc_info))
2019 ext4_init_block_alloc_info(inode);
2021 /* allocate new block */
2022 goal = ext4_ext_find_goal(inode, path, iblock);
2023 allocated = max_blocks;
2024 newblock = ext4_new_blocks(handle, inode, goal, &allocated, &err);
2027 ext_debug("allocate new block: goal "E3FSBLK", found "E3FSBLK"/%lu\n",
2028 goal, newblock, allocated);
2030 /* try to insert new extent into found leaf and return */
2031 newex.ee_block = cpu_to_le32(iblock);
2032 ext4_ext_store_pblock(&newex, newblock);
2033 newex.ee_len = cpu_to_le16(allocated);
2034 err = ext4_ext_insert_extent(handle, inode, path, &newex);
2038 if (extend_disksize && inode->i_size > EXT4_I(inode)->i_disksize)
2039 EXT4_I(inode)->i_disksize = inode->i_size;
2041 /* previous routine could use block we allocated */
2042 newblock = ext_pblock(&newex);
2043 __set_bit(BH_New, &bh_result->b_state);
2045 ext4_ext_put_in_cache(inode, iblock, allocated, newblock,
2046 EXT4_EXT_CACHE_EXTENT);
2048 if (allocated > max_blocks)
2049 allocated = max_blocks;
2050 ext4_ext_show_leaf(inode, path);
2051 __set_bit(BH_Mapped, &bh_result->b_state);
2052 bh_result->b_bdev = inode->i_sb->s_bdev;
2053 bh_result->b_blocknr = newblock;
2056 ext4_ext_drop_refs(path);
2059 mutex_unlock(&EXT4_I(inode)->truncate_mutex);
2061 return err ? err : allocated;
2064 void ext4_ext_truncate(struct inode * inode, struct page *page)
2066 struct address_space *mapping = inode->i_mapping;
2067 struct super_block *sb = inode->i_sb;
2068 unsigned long last_block;
2073 * probably first extent we're gonna free will be last in block
2075 err = ext4_writepage_trans_blocks(inode) + 3;
2076 handle = ext4_journal_start(inode, err);
2077 if (IS_ERR(handle)) {
2079 clear_highpage(page);
2080 flush_dcache_page(page);
2082 page_cache_release(page);
2088 ext4_block_truncate_page(handle, page, mapping, inode->i_size);
2090 mutex_lock(&EXT4_I(inode)->truncate_mutex);
2091 ext4_ext_invalidate_cache(inode);
2094 * TODO: optimization is possible here.
2095 * Probably we need not scan at all,
2096 * because page truncation is enough.
2098 if (ext4_orphan_add(handle, inode))
2101 /* we have to know where to truncate from in crash case */
2102 EXT4_I(inode)->i_disksize = inode->i_size;
2103 ext4_mark_inode_dirty(handle, inode);
2105 last_block = (inode->i_size + sb->s_blocksize - 1)
2106 >> EXT4_BLOCK_SIZE_BITS(sb);
2107 err = ext4_ext_remove_space(inode, last_block);
2109 /* In a multi-transaction truncate, we only make the final
2110 * transaction synchronous. */
2116 * If this was a simple ftruncate() and the file will remain alive,
2117 * then we need to clear up the orphan record which we created above.
2118 * However, if this was a real unlink then we were called by
2119 * ext4_delete_inode(), and we allow that function to clean up the
2120 * orphan info for us.
2123 ext4_orphan_del(handle, inode);
2125 mutex_unlock(&EXT4_I(inode)->truncate_mutex);
2126 ext4_journal_stop(handle);
2130 * ext4_ext_writepage_trans_blocks:
2131 * calculate max number of blocks we could modify
2132 * in order to allocate new block for an inode
2134 int ext4_ext_writepage_trans_blocks(struct inode *inode, int num)
2138 needed = ext4_ext_calc_credits_for_insert(inode, NULL);
2140 /* caller wants to allocate num blocks, but note it includes sb */
2141 needed = needed * num - (num - 1);
2144 needed += 2 * EXT4_QUOTA_TRANS_BLOCKS(inode->i_sb);
2150 EXPORT_SYMBOL(ext4_mark_inode_dirty);
2151 EXPORT_SYMBOL(ext4_ext_invalidate_cache);
2152 EXPORT_SYMBOL(ext4_ext_insert_extent);
2153 EXPORT_SYMBOL(ext4_ext_walk_space);
2154 EXPORT_SYMBOL(ext4_ext_find_goal);
2155 EXPORT_SYMBOL(ext4_ext_calc_credits_for_insert);