1 /* -*- mode: c; c-basic-offset: 8; -*-
2 * vim: noexpandtab sw=8 ts=8 sts=0:
6 * Copyright (C) 2009 Oracle. All rights reserved.
8 * This program is free software; you can redistribute it and/or
9 * modify it under the terms of the GNU General Public
10 * License version 2 as published by the Free Software Foundation.
12 * This program is distributed in the hope that it will be useful,
13 * but WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
15 * General Public License for more details.
18 #include <linux/sort.h>
19 #define MLOG_MASK_PREFIX ML_REFCOUNT
20 #include <cluster/masklog.h>
28 #include "buffer_head_io.h"
29 #include "blockcheck.h"
30 #include "refcounttree.h"
33 #include "extent_map.h"
36 #include <linux/bio.h>
37 #include <linux/blkdev.h>
38 #include <linux/gfp.h>
39 #include <linux/slab.h>
40 #include <linux/writeback.h>
41 #include <linux/pagevec.h>
42 #include <linux/swap.h>
44 struct ocfs2_cow_context {
48 struct ocfs2_extent_tree data_et;
49 struct ocfs2_refcount_tree *ref_tree;
50 struct buffer_head *ref_root_bh;
51 struct ocfs2_alloc_context *meta_ac;
52 struct ocfs2_alloc_context *data_ac;
53 struct ocfs2_cached_dealloc_ctxt dealloc;
54 int (*get_clusters)(struct ocfs2_cow_context *context,
55 u32 v_cluster, u32 *p_cluster,
57 unsigned int *extent_flags);
58 int (*cow_duplicate_clusters)(handle_t *handle,
59 struct ocfs2_cow_context *context,
60 u32 cpos, u32 old_cluster,
61 u32 new_cluster, u32 new_len);
64 static inline struct ocfs2_refcount_tree *
65 cache_info_to_refcount(struct ocfs2_caching_info *ci)
67 return container_of(ci, struct ocfs2_refcount_tree, rf_ci);
70 static int ocfs2_validate_refcount_block(struct super_block *sb,
71 struct buffer_head *bh)
74 struct ocfs2_refcount_block *rb =
75 (struct ocfs2_refcount_block *)bh->b_data;
77 mlog(0, "Validating refcount block %llu\n",
78 (unsigned long long)bh->b_blocknr);
80 BUG_ON(!buffer_uptodate(bh));
83 * If the ecc fails, we return the error but otherwise
84 * leave the filesystem running. We know any error is
85 * local to this block.
87 rc = ocfs2_validate_meta_ecc(sb, bh->b_data, &rb->rf_check);
89 mlog(ML_ERROR, "Checksum failed for refcount block %llu\n",
90 (unsigned long long)bh->b_blocknr);
95 if (!OCFS2_IS_VALID_REFCOUNT_BLOCK(rb)) {
97 "Refcount block #%llu has bad signature %.*s",
98 (unsigned long long)bh->b_blocknr, 7,
103 if (le64_to_cpu(rb->rf_blkno) != bh->b_blocknr) {
105 "Refcount block #%llu has an invalid rf_blkno "
107 (unsigned long long)bh->b_blocknr,
108 (unsigned long long)le64_to_cpu(rb->rf_blkno));
112 if (le32_to_cpu(rb->rf_fs_generation) != OCFS2_SB(sb)->fs_generation) {
114 "Refcount block #%llu has an invalid "
115 "rf_fs_generation of #%u",
116 (unsigned long long)bh->b_blocknr,
117 le32_to_cpu(rb->rf_fs_generation));
124 static int ocfs2_read_refcount_block(struct ocfs2_caching_info *ci,
126 struct buffer_head **bh)
129 struct buffer_head *tmp = *bh;
131 rc = ocfs2_read_block(ci, rb_blkno, &tmp,
132 ocfs2_validate_refcount_block);
134 /* If ocfs2_read_block() got us a new bh, pass it up. */
141 static u64 ocfs2_refcount_cache_owner(struct ocfs2_caching_info *ci)
143 struct ocfs2_refcount_tree *rf = cache_info_to_refcount(ci);
148 static struct super_block *
149 ocfs2_refcount_cache_get_super(struct ocfs2_caching_info *ci)
151 struct ocfs2_refcount_tree *rf = cache_info_to_refcount(ci);
156 static void ocfs2_refcount_cache_lock(struct ocfs2_caching_info *ci)
158 struct ocfs2_refcount_tree *rf = cache_info_to_refcount(ci);
160 spin_lock(&rf->rf_lock);
163 static void ocfs2_refcount_cache_unlock(struct ocfs2_caching_info *ci)
165 struct ocfs2_refcount_tree *rf = cache_info_to_refcount(ci);
167 spin_unlock(&rf->rf_lock);
170 static void ocfs2_refcount_cache_io_lock(struct ocfs2_caching_info *ci)
172 struct ocfs2_refcount_tree *rf = cache_info_to_refcount(ci);
174 mutex_lock(&rf->rf_io_mutex);
177 static void ocfs2_refcount_cache_io_unlock(struct ocfs2_caching_info *ci)
179 struct ocfs2_refcount_tree *rf = cache_info_to_refcount(ci);
181 mutex_unlock(&rf->rf_io_mutex);
184 static const struct ocfs2_caching_operations ocfs2_refcount_caching_ops = {
185 .co_owner = ocfs2_refcount_cache_owner,
186 .co_get_super = ocfs2_refcount_cache_get_super,
187 .co_cache_lock = ocfs2_refcount_cache_lock,
188 .co_cache_unlock = ocfs2_refcount_cache_unlock,
189 .co_io_lock = ocfs2_refcount_cache_io_lock,
190 .co_io_unlock = ocfs2_refcount_cache_io_unlock,
193 static struct ocfs2_refcount_tree *
194 ocfs2_find_refcount_tree(struct ocfs2_super *osb, u64 blkno)
196 struct rb_node *n = osb->osb_rf_lock_tree.rb_node;
197 struct ocfs2_refcount_tree *tree = NULL;
200 tree = rb_entry(n, struct ocfs2_refcount_tree, rf_node);
202 if (blkno < tree->rf_blkno)
204 else if (blkno > tree->rf_blkno)
213 /* osb_lock is already locked. */
214 static void ocfs2_insert_refcount_tree(struct ocfs2_super *osb,
215 struct ocfs2_refcount_tree *new)
217 u64 rf_blkno = new->rf_blkno;
218 struct rb_node *parent = NULL;
219 struct rb_node **p = &osb->osb_rf_lock_tree.rb_node;
220 struct ocfs2_refcount_tree *tmp;
225 tmp = rb_entry(parent, struct ocfs2_refcount_tree,
228 if (rf_blkno < tmp->rf_blkno)
230 else if (rf_blkno > tmp->rf_blkno)
233 /* This should never happen! */
234 mlog(ML_ERROR, "Duplicate refcount block %llu found!\n",
235 (unsigned long long)rf_blkno);
240 rb_link_node(&new->rf_node, parent, p);
241 rb_insert_color(&new->rf_node, &osb->osb_rf_lock_tree);
244 static void ocfs2_free_refcount_tree(struct ocfs2_refcount_tree *tree)
246 ocfs2_metadata_cache_exit(&tree->rf_ci);
247 ocfs2_simple_drop_lockres(OCFS2_SB(tree->rf_sb), &tree->rf_lockres);
248 ocfs2_lock_res_free(&tree->rf_lockres);
253 ocfs2_erase_refcount_tree_from_list_no_lock(struct ocfs2_super *osb,
254 struct ocfs2_refcount_tree *tree)
256 rb_erase(&tree->rf_node, &osb->osb_rf_lock_tree);
257 if (osb->osb_ref_tree_lru && osb->osb_ref_tree_lru == tree)
258 osb->osb_ref_tree_lru = NULL;
261 static void ocfs2_erase_refcount_tree_from_list(struct ocfs2_super *osb,
262 struct ocfs2_refcount_tree *tree)
264 spin_lock(&osb->osb_lock);
265 ocfs2_erase_refcount_tree_from_list_no_lock(osb, tree);
266 spin_unlock(&osb->osb_lock);
269 void ocfs2_kref_remove_refcount_tree(struct kref *kref)
271 struct ocfs2_refcount_tree *tree =
272 container_of(kref, struct ocfs2_refcount_tree, rf_getcnt);
274 ocfs2_free_refcount_tree(tree);
278 ocfs2_refcount_tree_get(struct ocfs2_refcount_tree *tree)
280 kref_get(&tree->rf_getcnt);
284 ocfs2_refcount_tree_put(struct ocfs2_refcount_tree *tree)
286 kref_put(&tree->rf_getcnt, ocfs2_kref_remove_refcount_tree);
289 static inline void ocfs2_init_refcount_tree_ci(struct ocfs2_refcount_tree *new,
290 struct super_block *sb)
292 ocfs2_metadata_cache_init(&new->rf_ci, &ocfs2_refcount_caching_ops);
293 mutex_init(&new->rf_io_mutex);
295 spin_lock_init(&new->rf_lock);
298 static inline void ocfs2_init_refcount_tree_lock(struct ocfs2_super *osb,
299 struct ocfs2_refcount_tree *new,
300 u64 rf_blkno, u32 generation)
302 init_rwsem(&new->rf_sem);
303 ocfs2_refcount_lock_res_init(&new->rf_lockres, osb,
304 rf_blkno, generation);
307 static struct ocfs2_refcount_tree*
308 ocfs2_allocate_refcount_tree(struct ocfs2_super *osb, u64 rf_blkno)
310 struct ocfs2_refcount_tree *new;
312 new = kzalloc(sizeof(struct ocfs2_refcount_tree), GFP_NOFS);
316 new->rf_blkno = rf_blkno;
317 kref_init(&new->rf_getcnt);
318 ocfs2_init_refcount_tree_ci(new, osb->sb);
323 static int ocfs2_get_refcount_tree(struct ocfs2_super *osb, u64 rf_blkno,
324 struct ocfs2_refcount_tree **ret_tree)
327 struct ocfs2_refcount_tree *tree, *new = NULL;
328 struct buffer_head *ref_root_bh = NULL;
329 struct ocfs2_refcount_block *ref_rb;
331 spin_lock(&osb->osb_lock);
332 if (osb->osb_ref_tree_lru &&
333 osb->osb_ref_tree_lru->rf_blkno == rf_blkno)
334 tree = osb->osb_ref_tree_lru;
336 tree = ocfs2_find_refcount_tree(osb, rf_blkno);
340 spin_unlock(&osb->osb_lock);
342 new = ocfs2_allocate_refcount_tree(osb, rf_blkno);
349 * We need the generation to create the refcount tree lock and since
350 * it isn't changed during the tree modification, we are safe here to
351 * read without protection.
352 * We also have to purge the cache after we create the lock since the
353 * refcount block may have the stale data. It can only be trusted when
354 * we hold the refcount lock.
356 ret = ocfs2_read_refcount_block(&new->rf_ci, rf_blkno, &ref_root_bh);
359 ocfs2_metadata_cache_exit(&new->rf_ci);
364 ref_rb = (struct ocfs2_refcount_block *)ref_root_bh->b_data;
365 new->rf_generation = le32_to_cpu(ref_rb->rf_generation);
366 ocfs2_init_refcount_tree_lock(osb, new, rf_blkno,
368 ocfs2_metadata_cache_purge(&new->rf_ci);
370 spin_lock(&osb->osb_lock);
371 tree = ocfs2_find_refcount_tree(osb, rf_blkno);
375 ocfs2_insert_refcount_tree(osb, new);
383 osb->osb_ref_tree_lru = tree;
385 spin_unlock(&osb->osb_lock);
388 ocfs2_free_refcount_tree(new);
394 static int ocfs2_get_refcount_block(struct inode *inode, u64 *ref_blkno)
397 struct buffer_head *di_bh = NULL;
398 struct ocfs2_dinode *di;
400 ret = ocfs2_read_inode_block(inode, &di_bh);
406 BUG_ON(!(OCFS2_I(inode)->ip_dyn_features & OCFS2_HAS_REFCOUNT_FL));
408 di = (struct ocfs2_dinode *)di_bh->b_data;
409 *ref_blkno = le64_to_cpu(di->i_refcount_loc);
415 static int __ocfs2_lock_refcount_tree(struct ocfs2_super *osb,
416 struct ocfs2_refcount_tree *tree, int rw)
420 ret = ocfs2_refcount_lock(tree, rw);
427 down_write(&tree->rf_sem);
429 down_read(&tree->rf_sem);
436 * Lock the refcount tree pointed by ref_blkno and return the tree.
437 * In most case, we lock the tree and read the refcount block.
438 * So read it here if the caller really needs it.
440 * If the tree has been re-created by other node, it will free the
441 * old one and re-create it.
443 int ocfs2_lock_refcount_tree(struct ocfs2_super *osb,
444 u64 ref_blkno, int rw,
445 struct ocfs2_refcount_tree **ret_tree,
446 struct buffer_head **ref_bh)
448 int ret, delete_tree = 0;
449 struct ocfs2_refcount_tree *tree = NULL;
450 struct buffer_head *ref_root_bh = NULL;
451 struct ocfs2_refcount_block *rb;
454 ret = ocfs2_get_refcount_tree(osb, ref_blkno, &tree);
460 ocfs2_refcount_tree_get(tree);
462 ret = __ocfs2_lock_refcount_tree(osb, tree, rw);
465 ocfs2_refcount_tree_put(tree);
469 ret = ocfs2_read_refcount_block(&tree->rf_ci, tree->rf_blkno,
473 ocfs2_unlock_refcount_tree(osb, tree, rw);
474 ocfs2_refcount_tree_put(tree);
478 rb = (struct ocfs2_refcount_block *)ref_root_bh->b_data;
480 * If the refcount block has been freed and re-created, we may need
481 * to recreate the refcount tree also.
483 * Here we just remove the tree from the rb-tree, and the last
484 * kref holder will unlock and delete this refcount_tree.
485 * Then we goto "again" and ocfs2_get_refcount_tree will create
486 * the new refcount tree for us.
488 if (tree->rf_generation != le32_to_cpu(rb->rf_generation)) {
489 if (!tree->rf_removed) {
490 ocfs2_erase_refcount_tree_from_list(osb, tree);
491 tree->rf_removed = 1;
495 ocfs2_unlock_refcount_tree(osb, tree, rw);
497 * We get an extra reference when we create the refcount
498 * tree, so another put will destroy it.
501 ocfs2_refcount_tree_put(tree);
509 *ref_bh = ref_root_bh;
517 int ocfs2_lock_refcount_tree_by_inode(struct inode *inode, int rw,
518 struct ocfs2_refcount_tree **ret_tree,
519 struct buffer_head **ref_bh)
524 ret = ocfs2_get_refcount_block(inode, &ref_blkno);
530 return ocfs2_lock_refcount_tree(OCFS2_SB(inode->i_sb), ref_blkno,
531 rw, ret_tree, ref_bh);
534 void ocfs2_unlock_refcount_tree(struct ocfs2_super *osb,
535 struct ocfs2_refcount_tree *tree, int rw)
538 up_write(&tree->rf_sem);
540 up_read(&tree->rf_sem);
542 ocfs2_refcount_unlock(tree, rw);
543 ocfs2_refcount_tree_put(tree);
546 void ocfs2_purge_refcount_trees(struct ocfs2_super *osb)
548 struct rb_node *node;
549 struct ocfs2_refcount_tree *tree;
550 struct rb_root *root = &osb->osb_rf_lock_tree;
552 while ((node = rb_last(root)) != NULL) {
553 tree = rb_entry(node, struct ocfs2_refcount_tree, rf_node);
555 mlog(0, "Purge tree %llu\n",
556 (unsigned long long) tree->rf_blkno);
558 rb_erase(&tree->rf_node, root);
559 ocfs2_free_refcount_tree(tree);
564 * Create a refcount tree for an inode.
565 * We take for granted that the inode is already locked.
567 static int ocfs2_create_refcount_tree(struct inode *inode,
568 struct buffer_head *di_bh)
571 handle_t *handle = NULL;
572 struct ocfs2_alloc_context *meta_ac = NULL;
573 struct ocfs2_dinode *di = (struct ocfs2_dinode *)di_bh->b_data;
574 struct ocfs2_inode_info *oi = OCFS2_I(inode);
575 struct ocfs2_super *osb = OCFS2_SB(inode->i_sb);
576 struct buffer_head *new_bh = NULL;
577 struct ocfs2_refcount_block *rb;
578 struct ocfs2_refcount_tree *new_tree = NULL, *tree = NULL;
579 u16 suballoc_bit_start;
583 BUG_ON(oi->ip_dyn_features & OCFS2_HAS_REFCOUNT_FL);
585 mlog(0, "create tree for inode %lu\n", inode->i_ino);
587 ret = ocfs2_reserve_new_metadata_blocks(osb, 1, &meta_ac);
593 handle = ocfs2_start_trans(osb, OCFS2_REFCOUNT_TREE_CREATE_CREDITS);
594 if (IS_ERR(handle)) {
595 ret = PTR_ERR(handle);
600 ret = ocfs2_journal_access_di(handle, INODE_CACHE(inode), di_bh,
601 OCFS2_JOURNAL_ACCESS_WRITE);
607 ret = ocfs2_claim_metadata(osb, handle, meta_ac, 1,
608 &suballoc_bit_start, &num_got,
615 new_tree = ocfs2_allocate_refcount_tree(osb, first_blkno);
622 new_bh = sb_getblk(inode->i_sb, first_blkno);
623 ocfs2_set_new_buffer_uptodate(&new_tree->rf_ci, new_bh);
625 ret = ocfs2_journal_access_rb(handle, &new_tree->rf_ci, new_bh,
626 OCFS2_JOURNAL_ACCESS_CREATE);
632 /* Initialize ocfs2_refcount_block. */
633 rb = (struct ocfs2_refcount_block *)new_bh->b_data;
634 memset(rb, 0, inode->i_sb->s_blocksize);
635 strcpy((void *)rb, OCFS2_REFCOUNT_BLOCK_SIGNATURE);
636 rb->rf_suballoc_slot = cpu_to_le16(osb->slot_num);
637 rb->rf_suballoc_bit = cpu_to_le16(suballoc_bit_start);
638 rb->rf_fs_generation = cpu_to_le32(osb->fs_generation);
639 rb->rf_blkno = cpu_to_le64(first_blkno);
640 rb->rf_count = cpu_to_le32(1);
641 rb->rf_records.rl_count =
642 cpu_to_le16(ocfs2_refcount_recs_per_rb(osb->sb));
643 spin_lock(&osb->osb_lock);
644 rb->rf_generation = osb->s_next_generation++;
645 spin_unlock(&osb->osb_lock);
647 ocfs2_journal_dirty(handle, new_bh);
649 spin_lock(&oi->ip_lock);
650 oi->ip_dyn_features |= OCFS2_HAS_REFCOUNT_FL;
651 di->i_dyn_features = cpu_to_le16(oi->ip_dyn_features);
652 di->i_refcount_loc = cpu_to_le64(first_blkno);
653 spin_unlock(&oi->ip_lock);
655 mlog(0, "created tree for inode %lu, refblock %llu\n",
656 inode->i_ino, (unsigned long long)first_blkno);
658 ocfs2_journal_dirty(handle, di_bh);
661 * We have to init the tree lock here since it will use
662 * the generation number to create it.
664 new_tree->rf_generation = le32_to_cpu(rb->rf_generation);
665 ocfs2_init_refcount_tree_lock(osb, new_tree, first_blkno,
666 new_tree->rf_generation);
668 spin_lock(&osb->osb_lock);
669 tree = ocfs2_find_refcount_tree(osb, first_blkno);
672 * We've just created a new refcount tree in this block. If
673 * we found a refcount tree on the ocfs2_super, it must be
674 * one we just deleted. We free the old tree before
675 * inserting the new tree.
677 BUG_ON(tree && tree->rf_generation == new_tree->rf_generation);
679 ocfs2_erase_refcount_tree_from_list_no_lock(osb, tree);
680 ocfs2_insert_refcount_tree(osb, new_tree);
681 spin_unlock(&osb->osb_lock);
684 ocfs2_refcount_tree_put(tree);
687 ocfs2_commit_trans(osb, handle);
691 ocfs2_metadata_cache_exit(&new_tree->rf_ci);
697 ocfs2_free_alloc_context(meta_ac);
702 static int ocfs2_set_refcount_tree(struct inode *inode,
703 struct buffer_head *di_bh,
707 handle_t *handle = NULL;
708 struct ocfs2_dinode *di = (struct ocfs2_dinode *)di_bh->b_data;
709 struct ocfs2_inode_info *oi = OCFS2_I(inode);
710 struct ocfs2_super *osb = OCFS2_SB(inode->i_sb);
711 struct buffer_head *ref_root_bh = NULL;
712 struct ocfs2_refcount_block *rb;
713 struct ocfs2_refcount_tree *ref_tree;
715 BUG_ON(oi->ip_dyn_features & OCFS2_HAS_REFCOUNT_FL);
717 ret = ocfs2_lock_refcount_tree(osb, refcount_loc, 1,
718 &ref_tree, &ref_root_bh);
724 handle = ocfs2_start_trans(osb, OCFS2_REFCOUNT_TREE_SET_CREDITS);
725 if (IS_ERR(handle)) {
726 ret = PTR_ERR(handle);
731 ret = ocfs2_journal_access_di(handle, INODE_CACHE(inode), di_bh,
732 OCFS2_JOURNAL_ACCESS_WRITE);
738 ret = ocfs2_journal_access_rb(handle, &ref_tree->rf_ci, ref_root_bh,
739 OCFS2_JOURNAL_ACCESS_WRITE);
745 rb = (struct ocfs2_refcount_block *)ref_root_bh->b_data;
746 le32_add_cpu(&rb->rf_count, 1);
748 ocfs2_journal_dirty(handle, ref_root_bh);
750 spin_lock(&oi->ip_lock);
751 oi->ip_dyn_features |= OCFS2_HAS_REFCOUNT_FL;
752 di->i_dyn_features = cpu_to_le16(oi->ip_dyn_features);
753 di->i_refcount_loc = cpu_to_le64(refcount_loc);
754 spin_unlock(&oi->ip_lock);
755 ocfs2_journal_dirty(handle, di_bh);
758 ocfs2_commit_trans(osb, handle);
760 ocfs2_unlock_refcount_tree(osb, ref_tree, 1);
766 int ocfs2_remove_refcount_tree(struct inode *inode, struct buffer_head *di_bh)
768 int ret, delete_tree = 0;
769 handle_t *handle = NULL;
770 struct ocfs2_dinode *di = (struct ocfs2_dinode *)di_bh->b_data;
771 struct ocfs2_inode_info *oi = OCFS2_I(inode);
772 struct ocfs2_super *osb = OCFS2_SB(inode->i_sb);
773 struct ocfs2_refcount_block *rb;
774 struct inode *alloc_inode = NULL;
775 struct buffer_head *alloc_bh = NULL;
776 struct buffer_head *blk_bh = NULL;
777 struct ocfs2_refcount_tree *ref_tree;
778 int credits = OCFS2_REFCOUNT_TREE_REMOVE_CREDITS;
779 u64 blk = 0, bg_blkno = 0, ref_blkno = le64_to_cpu(di->i_refcount_loc);
782 if (!(oi->ip_dyn_features & OCFS2_HAS_REFCOUNT_FL))
786 ret = ocfs2_lock_refcount_tree(osb, ref_blkno, 1, &ref_tree, &blk_bh);
792 rb = (struct ocfs2_refcount_block *)blk_bh->b_data;
795 * If we are the last user, we need to free the block.
796 * So lock the allocator ahead.
798 if (le32_to_cpu(rb->rf_count) == 1) {
799 blk = le64_to_cpu(rb->rf_blkno);
800 bit = le16_to_cpu(rb->rf_suballoc_bit);
801 bg_blkno = ocfs2_which_suballoc_group(blk, bit);
803 alloc_inode = ocfs2_get_system_file_inode(osb,
804 EXTENT_ALLOC_SYSTEM_INODE,
805 le16_to_cpu(rb->rf_suballoc_slot));
811 mutex_lock(&alloc_inode->i_mutex);
813 ret = ocfs2_inode_lock(alloc_inode, &alloc_bh, 1);
819 credits += OCFS2_SUBALLOC_FREE;
822 handle = ocfs2_start_trans(osb, credits);
823 if (IS_ERR(handle)) {
824 ret = PTR_ERR(handle);
829 ret = ocfs2_journal_access_di(handle, INODE_CACHE(inode), di_bh,
830 OCFS2_JOURNAL_ACCESS_WRITE);
836 ret = ocfs2_journal_access_rb(handle, &ref_tree->rf_ci, blk_bh,
837 OCFS2_JOURNAL_ACCESS_WRITE);
843 spin_lock(&oi->ip_lock);
844 oi->ip_dyn_features &= ~OCFS2_HAS_REFCOUNT_FL;
845 di->i_dyn_features = cpu_to_le16(oi->ip_dyn_features);
846 di->i_refcount_loc = 0;
847 spin_unlock(&oi->ip_lock);
848 ocfs2_journal_dirty(handle, di_bh);
850 le32_add_cpu(&rb->rf_count , -1);
851 ocfs2_journal_dirty(handle, blk_bh);
855 ocfs2_erase_refcount_tree_from_list(osb, ref_tree);
856 ret = ocfs2_free_suballoc_bits(handle, alloc_inode,
857 alloc_bh, bit, bg_blkno, 1);
863 ocfs2_commit_trans(osb, handle);
866 ocfs2_inode_unlock(alloc_inode, 1);
871 mutex_unlock(&alloc_inode->i_mutex);
875 ocfs2_unlock_refcount_tree(osb, ref_tree, 1);
877 ocfs2_refcount_tree_put(ref_tree);
883 static void ocfs2_find_refcount_rec_in_rl(struct ocfs2_caching_info *ci,
884 struct buffer_head *ref_leaf_bh,
885 u64 cpos, unsigned int len,
886 struct ocfs2_refcount_rec *ret_rec,
890 struct ocfs2_refcount_block *rb =
891 (struct ocfs2_refcount_block *)ref_leaf_bh->b_data;
892 struct ocfs2_refcount_rec *rec = NULL;
894 for (; i < le16_to_cpu(rb->rf_records.rl_used); i++) {
895 rec = &rb->rf_records.rl_recs[i];
897 if (le64_to_cpu(rec->r_cpos) +
898 le32_to_cpu(rec->r_clusters) <= cpos)
900 else if (le64_to_cpu(rec->r_cpos) > cpos)
903 /* ok, cpos fail in this rec. Just return. */
910 /* We meet with a hole here, so fake the rec. */
911 ret_rec->r_cpos = cpu_to_le64(cpos);
912 ret_rec->r_refcount = 0;
913 if (i < le16_to_cpu(rb->rf_records.rl_used) &&
914 le64_to_cpu(rec->r_cpos) < cpos + len)
915 ret_rec->r_clusters =
916 cpu_to_le32(le64_to_cpu(rec->r_cpos) - cpos);
918 ret_rec->r_clusters = cpu_to_le32(len);
926 * Given a cpos and len, try to find the refcount record which contains cpos.
927 * 1. If cpos can be found in one refcount record, return the record.
928 * 2. If cpos can't be found, return a fake record which start from cpos
929 * and end at a small value between cpos+len and start of the next record.
930 * This fake record has r_refcount = 0.
932 static int ocfs2_get_refcount_rec(struct ocfs2_caching_info *ci,
933 struct buffer_head *ref_root_bh,
934 u64 cpos, unsigned int len,
935 struct ocfs2_refcount_rec *ret_rec,
937 struct buffer_head **ret_bh)
939 int ret = 0, i, found;
941 struct ocfs2_extent_list *el;
942 struct ocfs2_extent_rec *tmp, *rec = NULL;
943 struct ocfs2_extent_block *eb;
944 struct buffer_head *eb_bh = NULL, *ref_leaf_bh = NULL;
945 struct super_block *sb = ocfs2_metadata_cache_get_super(ci);
946 struct ocfs2_refcount_block *rb =
947 (struct ocfs2_refcount_block *)ref_root_bh->b_data;
949 if (!(le32_to_cpu(rb->rf_flags) & OCFS2_REFCOUNT_TREE_FL)) {
950 ocfs2_find_refcount_rec_in_rl(ci, ref_root_bh, cpos, len,
952 *ret_bh = ref_root_bh;
958 low_cpos = cpos & OCFS2_32BIT_POS_MASK;
960 if (el->l_tree_depth) {
961 ret = ocfs2_find_leaf(ci, el, low_cpos, &eb_bh);
967 eb = (struct ocfs2_extent_block *) eb_bh->b_data;
970 if (el->l_tree_depth) {
972 "refcount tree %llu has non zero tree "
973 "depth in leaf btree tree block %llu\n",
974 (unsigned long long)ocfs2_metadata_cache_owner(ci),
975 (unsigned long long)eb_bh->b_blocknr);
982 for (i = le16_to_cpu(el->l_next_free_rec) - 1; i >= 0; i--) {
983 rec = &el->l_recs[i];
985 if (le32_to_cpu(rec->e_cpos) <= low_cpos) {
991 /* adjust len when we have ocfs2_extent_rec after it. */
992 if (found && i < le16_to_cpu(el->l_next_free_rec) - 1) {
993 tmp = &el->l_recs[i+1];
995 if (le32_to_cpu(tmp->e_cpos) < cpos + len)
996 len = le32_to_cpu(tmp->e_cpos) - cpos;
999 ret = ocfs2_read_refcount_block(ci, le64_to_cpu(rec->e_blkno),
1006 ocfs2_find_refcount_rec_in_rl(ci, ref_leaf_bh, cpos, len,
1008 *ret_bh = ref_leaf_bh;
1014 enum ocfs2_ref_rec_contig {
1015 REF_CONTIG_NONE = 0,
1018 REF_CONTIG_LEFTRIGHT,
1021 static enum ocfs2_ref_rec_contig
1022 ocfs2_refcount_rec_adjacent(struct ocfs2_refcount_block *rb,
1025 if ((rb->rf_records.rl_recs[index].r_refcount ==
1026 rb->rf_records.rl_recs[index + 1].r_refcount) &&
1027 (le64_to_cpu(rb->rf_records.rl_recs[index].r_cpos) +
1028 le32_to_cpu(rb->rf_records.rl_recs[index].r_clusters) ==
1029 le64_to_cpu(rb->rf_records.rl_recs[index + 1].r_cpos)))
1030 return REF_CONTIG_RIGHT;
1032 return REF_CONTIG_NONE;
1035 static enum ocfs2_ref_rec_contig
1036 ocfs2_refcount_rec_contig(struct ocfs2_refcount_block *rb,
1039 enum ocfs2_ref_rec_contig ret = REF_CONTIG_NONE;
1041 if (index < le16_to_cpu(rb->rf_records.rl_used) - 1)
1042 ret = ocfs2_refcount_rec_adjacent(rb, index);
1045 enum ocfs2_ref_rec_contig tmp;
1047 tmp = ocfs2_refcount_rec_adjacent(rb, index - 1);
1049 if (tmp == REF_CONTIG_RIGHT) {
1050 if (ret == REF_CONTIG_RIGHT)
1051 ret = REF_CONTIG_LEFTRIGHT;
1053 ret = REF_CONTIG_LEFT;
1060 static void ocfs2_rotate_refcount_rec_left(struct ocfs2_refcount_block *rb,
1063 BUG_ON(rb->rf_records.rl_recs[index].r_refcount !=
1064 rb->rf_records.rl_recs[index+1].r_refcount);
1066 le32_add_cpu(&rb->rf_records.rl_recs[index].r_clusters,
1067 le32_to_cpu(rb->rf_records.rl_recs[index+1].r_clusters));
1069 if (index < le16_to_cpu(rb->rf_records.rl_used) - 2)
1070 memmove(&rb->rf_records.rl_recs[index + 1],
1071 &rb->rf_records.rl_recs[index + 2],
1072 sizeof(struct ocfs2_refcount_rec) *
1073 (le16_to_cpu(rb->rf_records.rl_used) - index - 2));
1075 memset(&rb->rf_records.rl_recs[le16_to_cpu(rb->rf_records.rl_used) - 1],
1076 0, sizeof(struct ocfs2_refcount_rec));
1077 le16_add_cpu(&rb->rf_records.rl_used, -1);
1081 * Merge the refcount rec if we are contiguous with the adjacent recs.
1083 static void ocfs2_refcount_rec_merge(struct ocfs2_refcount_block *rb,
1086 enum ocfs2_ref_rec_contig contig =
1087 ocfs2_refcount_rec_contig(rb, index);
1089 if (contig == REF_CONTIG_NONE)
1092 if (contig == REF_CONTIG_LEFT || contig == REF_CONTIG_LEFTRIGHT) {
1097 ocfs2_rotate_refcount_rec_left(rb, index);
1099 if (contig == REF_CONTIG_LEFTRIGHT)
1100 ocfs2_rotate_refcount_rec_left(rb, index);
1104 * Change the refcount indexed by "index" in ref_bh.
1105 * If refcount reaches 0, remove it.
1107 static int ocfs2_change_refcount_rec(handle_t *handle,
1108 struct ocfs2_caching_info *ci,
1109 struct buffer_head *ref_leaf_bh,
1110 int index, int change)
1113 struct ocfs2_refcount_block *rb =
1114 (struct ocfs2_refcount_block *)ref_leaf_bh->b_data;
1115 struct ocfs2_refcount_list *rl = &rb->rf_records;
1116 struct ocfs2_refcount_rec *rec = &rl->rl_recs[index];
1118 ret = ocfs2_journal_access_rb(handle, ci, ref_leaf_bh,
1119 OCFS2_JOURNAL_ACCESS_WRITE);
1125 mlog(0, "change index %d, old count %u, change %d\n", index,
1126 le32_to_cpu(rec->r_refcount), change);
1127 le32_add_cpu(&rec->r_refcount, change);
1129 if (!rec->r_refcount) {
1130 if (index != le16_to_cpu(rl->rl_used) - 1) {
1131 memmove(rec, rec + 1,
1132 (le16_to_cpu(rl->rl_used) - index - 1) *
1133 sizeof(struct ocfs2_refcount_rec));
1134 memset(&rl->rl_recs[le16_to_cpu(rl->rl_used) - 1],
1135 0, sizeof(struct ocfs2_refcount_rec));
1138 le16_add_cpu(&rl->rl_used, -1);
1140 ocfs2_refcount_rec_merge(rb, index);
1142 ret = ocfs2_journal_dirty(handle, ref_leaf_bh);
1149 static int ocfs2_expand_inline_ref_root(handle_t *handle,
1150 struct ocfs2_caching_info *ci,
1151 struct buffer_head *ref_root_bh,
1152 struct buffer_head **ref_leaf_bh,
1153 struct ocfs2_alloc_context *meta_ac)
1156 u16 suballoc_bit_start;
1159 struct super_block *sb = ocfs2_metadata_cache_get_super(ci);
1160 struct buffer_head *new_bh = NULL;
1161 struct ocfs2_refcount_block *new_rb;
1162 struct ocfs2_refcount_block *root_rb =
1163 (struct ocfs2_refcount_block *)ref_root_bh->b_data;
1165 ret = ocfs2_journal_access_rb(handle, ci, ref_root_bh,
1166 OCFS2_JOURNAL_ACCESS_WRITE);
1172 ret = ocfs2_claim_metadata(OCFS2_SB(sb), handle, meta_ac, 1,
1173 &suballoc_bit_start, &num_got,
1180 new_bh = sb_getblk(sb, blkno);
1181 if (new_bh == NULL) {
1186 ocfs2_set_new_buffer_uptodate(ci, new_bh);
1188 ret = ocfs2_journal_access_rb(handle, ci, new_bh,
1189 OCFS2_JOURNAL_ACCESS_CREATE);
1196 * Initialize ocfs2_refcount_block.
1197 * It should contain the same information as the old root.
1198 * so just memcpy it and change the corresponding field.
1200 memcpy(new_bh->b_data, ref_root_bh->b_data, sb->s_blocksize);
1202 new_rb = (struct ocfs2_refcount_block *)new_bh->b_data;
1203 new_rb->rf_suballoc_slot = cpu_to_le16(OCFS2_SB(sb)->slot_num);
1204 new_rb->rf_suballoc_bit = cpu_to_le16(suballoc_bit_start);
1205 new_rb->rf_blkno = cpu_to_le64(blkno);
1206 new_rb->rf_cpos = cpu_to_le32(0);
1207 new_rb->rf_parent = cpu_to_le64(ref_root_bh->b_blocknr);
1208 new_rb->rf_flags = cpu_to_le32(OCFS2_REFCOUNT_LEAF_FL);
1209 ocfs2_journal_dirty(handle, new_bh);
1211 /* Now change the root. */
1212 memset(&root_rb->rf_list, 0, sb->s_blocksize -
1213 offsetof(struct ocfs2_refcount_block, rf_list));
1214 root_rb->rf_list.l_count = cpu_to_le16(ocfs2_extent_recs_per_rb(sb));
1215 root_rb->rf_clusters = cpu_to_le32(1);
1216 root_rb->rf_list.l_next_free_rec = cpu_to_le16(1);
1217 root_rb->rf_list.l_recs[0].e_blkno = cpu_to_le64(blkno);
1218 root_rb->rf_list.l_recs[0].e_leaf_clusters = cpu_to_le16(1);
1219 root_rb->rf_flags = cpu_to_le32(OCFS2_REFCOUNT_TREE_FL);
1221 ocfs2_journal_dirty(handle, ref_root_bh);
1223 mlog(0, "new leaf block %llu, used %u\n", (unsigned long long)blkno,
1224 le16_to_cpu(new_rb->rf_records.rl_used));
1226 *ref_leaf_bh = new_bh;
1233 static int ocfs2_refcount_rec_no_intersect(struct ocfs2_refcount_rec *prev,
1234 struct ocfs2_refcount_rec *next)
1236 if (ocfs2_get_ref_rec_low_cpos(prev) + le32_to_cpu(prev->r_clusters) <=
1237 ocfs2_get_ref_rec_low_cpos(next))
1243 static int cmp_refcount_rec_by_low_cpos(const void *a, const void *b)
1245 const struct ocfs2_refcount_rec *l = a, *r = b;
1246 u32 l_cpos = ocfs2_get_ref_rec_low_cpos(l);
1247 u32 r_cpos = ocfs2_get_ref_rec_low_cpos(r);
1249 if (l_cpos > r_cpos)
1251 if (l_cpos < r_cpos)
1256 static int cmp_refcount_rec_by_cpos(const void *a, const void *b)
1258 const struct ocfs2_refcount_rec *l = a, *r = b;
1259 u64 l_cpos = le64_to_cpu(l->r_cpos);
1260 u64 r_cpos = le64_to_cpu(r->r_cpos);
1262 if (l_cpos > r_cpos)
1264 if (l_cpos < r_cpos)
1269 static void swap_refcount_rec(void *a, void *b, int size)
1271 struct ocfs2_refcount_rec *l = a, *r = b, tmp;
1273 tmp = *(struct ocfs2_refcount_rec *)l;
1274 *(struct ocfs2_refcount_rec *)l =
1275 *(struct ocfs2_refcount_rec *)r;
1276 *(struct ocfs2_refcount_rec *)r = tmp;
1280 * The refcount cpos are ordered by their 64bit cpos,
1281 * But we will use the low 32 bit to be the e_cpos in the b-tree.
1282 * So we need to make sure that this pos isn't intersected with others.
1284 * Note: The refcount block is already sorted by their low 32 bit cpos,
1285 * So just try the middle pos first, and we will exit when we find
1286 * the good position.
1288 static int ocfs2_find_refcount_split_pos(struct ocfs2_refcount_list *rl,
1289 u32 *split_pos, int *split_index)
1291 int num_used = le16_to_cpu(rl->rl_used);
1292 int delta, middle = num_used / 2;
1294 for (delta = 0; delta < middle; delta++) {
1295 /* Let's check delta earlier than middle */
1296 if (ocfs2_refcount_rec_no_intersect(
1297 &rl->rl_recs[middle - delta - 1],
1298 &rl->rl_recs[middle - delta])) {
1299 *split_index = middle - delta;
1303 /* For even counts, don't walk off the end */
1304 if ((middle + delta + 1) == num_used)
1307 /* Now try delta past middle */
1308 if (ocfs2_refcount_rec_no_intersect(
1309 &rl->rl_recs[middle + delta],
1310 &rl->rl_recs[middle + delta + 1])) {
1311 *split_index = middle + delta + 1;
1316 if (delta >= middle)
1319 *split_pos = ocfs2_get_ref_rec_low_cpos(&rl->rl_recs[*split_index]);
1323 static int ocfs2_divide_leaf_refcount_block(struct buffer_head *ref_leaf_bh,
1324 struct buffer_head *new_bh,
1327 int split_index = 0, num_moved, ret;
1329 struct ocfs2_refcount_block *rb =
1330 (struct ocfs2_refcount_block *)ref_leaf_bh->b_data;
1331 struct ocfs2_refcount_list *rl = &rb->rf_records;
1332 struct ocfs2_refcount_block *new_rb =
1333 (struct ocfs2_refcount_block *)new_bh->b_data;
1334 struct ocfs2_refcount_list *new_rl = &new_rb->rf_records;
1336 mlog(0, "split old leaf refcount block %llu, count = %u, used = %u\n",
1337 (unsigned long long)ref_leaf_bh->b_blocknr,
1338 le32_to_cpu(rl->rl_count), le32_to_cpu(rl->rl_used));
1341 * XXX: Improvement later.
1342 * If we know all the high 32 bit cpos is the same, no need to sort.
1344 * In order to make the whole process safe, we do:
1345 * 1. sort the entries by their low 32 bit cpos first so that we can
1346 * find the split cpos easily.
1347 * 2. call ocfs2_insert_extent to insert the new refcount block.
1348 * 3. move the refcount rec to the new block.
1349 * 4. sort the entries by their 64 bit cpos.
1350 * 5. dirty the new_rb and rb.
1352 sort(&rl->rl_recs, le16_to_cpu(rl->rl_used),
1353 sizeof(struct ocfs2_refcount_rec),
1354 cmp_refcount_rec_by_low_cpos, swap_refcount_rec);
1356 ret = ocfs2_find_refcount_split_pos(rl, &cpos, &split_index);
1362 new_rb->rf_cpos = cpu_to_le32(cpos);
1364 /* move refcount records starting from split_index to the new block. */
1365 num_moved = le16_to_cpu(rl->rl_used) - split_index;
1366 memcpy(new_rl->rl_recs, &rl->rl_recs[split_index],
1367 num_moved * sizeof(struct ocfs2_refcount_rec));
1369 /*ok, remove the entries we just moved over to the other block. */
1370 memset(&rl->rl_recs[split_index], 0,
1371 num_moved * sizeof(struct ocfs2_refcount_rec));
1373 /* change old and new rl_used accordingly. */
1374 le16_add_cpu(&rl->rl_used, -num_moved);
1375 new_rl->rl_used = cpu_to_le32(num_moved);
1377 sort(&rl->rl_recs, le16_to_cpu(rl->rl_used),
1378 sizeof(struct ocfs2_refcount_rec),
1379 cmp_refcount_rec_by_cpos, swap_refcount_rec);
1381 sort(&new_rl->rl_recs, le16_to_cpu(new_rl->rl_used),
1382 sizeof(struct ocfs2_refcount_rec),
1383 cmp_refcount_rec_by_cpos, swap_refcount_rec);
1389 static int ocfs2_new_leaf_refcount_block(handle_t *handle,
1390 struct ocfs2_caching_info *ci,
1391 struct buffer_head *ref_root_bh,
1392 struct buffer_head *ref_leaf_bh,
1393 struct ocfs2_alloc_context *meta_ac)
1396 u16 suballoc_bit_start;
1397 u32 num_got, new_cpos;
1399 struct super_block *sb = ocfs2_metadata_cache_get_super(ci);
1400 struct ocfs2_refcount_block *root_rb =
1401 (struct ocfs2_refcount_block *)ref_root_bh->b_data;
1402 struct buffer_head *new_bh = NULL;
1403 struct ocfs2_refcount_block *new_rb;
1404 struct ocfs2_extent_tree ref_et;
1406 BUG_ON(!(le32_to_cpu(root_rb->rf_flags) & OCFS2_REFCOUNT_TREE_FL));
1408 ret = ocfs2_journal_access_rb(handle, ci, ref_root_bh,
1409 OCFS2_JOURNAL_ACCESS_WRITE);
1415 ret = ocfs2_journal_access_rb(handle, ci, ref_leaf_bh,
1416 OCFS2_JOURNAL_ACCESS_WRITE);
1422 ret = ocfs2_claim_metadata(OCFS2_SB(sb), handle, meta_ac, 1,
1423 &suballoc_bit_start, &num_got,
1430 new_bh = sb_getblk(sb, blkno);
1431 if (new_bh == NULL) {
1436 ocfs2_set_new_buffer_uptodate(ci, new_bh);
1438 ret = ocfs2_journal_access_rb(handle, ci, new_bh,
1439 OCFS2_JOURNAL_ACCESS_CREATE);
1445 /* Initialize ocfs2_refcount_block. */
1446 new_rb = (struct ocfs2_refcount_block *)new_bh->b_data;
1447 memset(new_rb, 0, sb->s_blocksize);
1448 strcpy((void *)new_rb, OCFS2_REFCOUNT_BLOCK_SIGNATURE);
1449 new_rb->rf_suballoc_slot = cpu_to_le16(OCFS2_SB(sb)->slot_num);
1450 new_rb->rf_suballoc_bit = cpu_to_le16(suballoc_bit_start);
1451 new_rb->rf_fs_generation = cpu_to_le32(OCFS2_SB(sb)->fs_generation);
1452 new_rb->rf_blkno = cpu_to_le64(blkno);
1453 new_rb->rf_parent = cpu_to_le64(ref_root_bh->b_blocknr);
1454 new_rb->rf_flags = cpu_to_le32(OCFS2_REFCOUNT_LEAF_FL);
1455 new_rb->rf_records.rl_count =
1456 cpu_to_le16(ocfs2_refcount_recs_per_rb(sb));
1457 new_rb->rf_generation = root_rb->rf_generation;
1459 ret = ocfs2_divide_leaf_refcount_block(ref_leaf_bh, new_bh, &new_cpos);
1465 ocfs2_journal_dirty(handle, ref_leaf_bh);
1466 ocfs2_journal_dirty(handle, new_bh);
1468 ocfs2_init_refcount_extent_tree(&ref_et, ci, ref_root_bh);
1470 mlog(0, "insert new leaf block %llu at %u\n",
1471 (unsigned long long)new_bh->b_blocknr, new_cpos);
1473 /* Insert the new leaf block with the specific offset cpos. */
1474 ret = ocfs2_insert_extent(handle, &ref_et, new_cpos, new_bh->b_blocknr,
1484 static int ocfs2_expand_refcount_tree(handle_t *handle,
1485 struct ocfs2_caching_info *ci,
1486 struct buffer_head *ref_root_bh,
1487 struct buffer_head *ref_leaf_bh,
1488 struct ocfs2_alloc_context *meta_ac)
1491 struct buffer_head *expand_bh = NULL;
1493 if (ref_root_bh == ref_leaf_bh) {
1495 * the old root bh hasn't been expanded to a b-tree,
1496 * so expand it first.
1498 ret = ocfs2_expand_inline_ref_root(handle, ci, ref_root_bh,
1499 &expand_bh, meta_ac);
1505 expand_bh = ref_leaf_bh;
1510 /* Now add a new refcount block into the tree.*/
1511 ret = ocfs2_new_leaf_refcount_block(handle, ci, ref_root_bh,
1512 expand_bh, meta_ac);
1521 * Adjust the extent rec in b-tree representing ref_leaf_bh.
1523 * Only called when we have inserted a new refcount rec at index 0
1524 * which means ocfs2_extent_rec.e_cpos may need some change.
1526 static int ocfs2_adjust_refcount_rec(handle_t *handle,
1527 struct ocfs2_caching_info *ci,
1528 struct buffer_head *ref_root_bh,
1529 struct buffer_head *ref_leaf_bh,
1530 struct ocfs2_refcount_rec *rec)
1533 u32 new_cpos, old_cpos;
1534 struct ocfs2_path *path = NULL;
1535 struct ocfs2_extent_tree et;
1536 struct ocfs2_refcount_block *rb =
1537 (struct ocfs2_refcount_block *)ref_root_bh->b_data;
1538 struct ocfs2_extent_list *el;
1540 if (!(le32_to_cpu(rb->rf_flags) & OCFS2_REFCOUNT_TREE_FL))
1543 rb = (struct ocfs2_refcount_block *)ref_leaf_bh->b_data;
1544 old_cpos = le32_to_cpu(rb->rf_cpos);
1545 new_cpos = le64_to_cpu(rec->r_cpos) & OCFS2_32BIT_POS_MASK;
1546 if (old_cpos <= new_cpos)
1549 ocfs2_init_refcount_extent_tree(&et, ci, ref_root_bh);
1551 path = ocfs2_new_path_from_et(&et);
1558 ret = ocfs2_find_path(ci, path, old_cpos);
1565 * 2 more credits, one for the leaf refcount block, one for
1566 * the extent block contains the extent rec.
1568 ret = ocfs2_extend_trans(handle, handle->h_buffer_credits + 2);
1574 ret = ocfs2_journal_access_rb(handle, ci, ref_leaf_bh,
1575 OCFS2_JOURNAL_ACCESS_WRITE);
1581 ret = ocfs2_journal_access_eb(handle, ci, path_leaf_bh(path),
1582 OCFS2_JOURNAL_ACCESS_WRITE);
1588 /* change the leaf extent block first. */
1589 el = path_leaf_el(path);
1591 for (i = 0; i < le16_to_cpu(el->l_next_free_rec); i++)
1592 if (le32_to_cpu(el->l_recs[i].e_cpos) == old_cpos)
1595 BUG_ON(i == le16_to_cpu(el->l_next_free_rec));
1597 el->l_recs[i].e_cpos = cpu_to_le32(new_cpos);
1599 /* change the r_cpos in the leaf block. */
1600 rb->rf_cpos = cpu_to_le32(new_cpos);
1602 ocfs2_journal_dirty(handle, path_leaf_bh(path));
1603 ocfs2_journal_dirty(handle, ref_leaf_bh);
1606 ocfs2_free_path(path);
1610 static int ocfs2_insert_refcount_rec(handle_t *handle,
1611 struct ocfs2_caching_info *ci,
1612 struct buffer_head *ref_root_bh,
1613 struct buffer_head *ref_leaf_bh,
1614 struct ocfs2_refcount_rec *rec,
1616 struct ocfs2_alloc_context *meta_ac)
1619 struct ocfs2_refcount_block *rb =
1620 (struct ocfs2_refcount_block *)ref_leaf_bh->b_data;
1621 struct ocfs2_refcount_list *rf_list = &rb->rf_records;
1622 struct buffer_head *new_bh = NULL;
1624 BUG_ON(le32_to_cpu(rb->rf_flags) & OCFS2_REFCOUNT_TREE_FL);
1626 if (rf_list->rl_used == rf_list->rl_count) {
1627 u64 cpos = le64_to_cpu(rec->r_cpos);
1628 u32 len = le32_to_cpu(rec->r_clusters);
1630 ret = ocfs2_expand_refcount_tree(handle, ci, ref_root_bh,
1631 ref_leaf_bh, meta_ac);
1637 ret = ocfs2_get_refcount_rec(ci, ref_root_bh,
1638 cpos, len, NULL, &index,
1645 ref_leaf_bh = new_bh;
1646 rb = (struct ocfs2_refcount_block *)ref_leaf_bh->b_data;
1647 rf_list = &rb->rf_records;
1650 ret = ocfs2_journal_access_rb(handle, ci, ref_leaf_bh,
1651 OCFS2_JOURNAL_ACCESS_WRITE);
1657 if (index < le16_to_cpu(rf_list->rl_used))
1658 memmove(&rf_list->rl_recs[index + 1],
1659 &rf_list->rl_recs[index],
1660 (le16_to_cpu(rf_list->rl_used) - index) *
1661 sizeof(struct ocfs2_refcount_rec));
1663 mlog(0, "insert refcount record start %llu, len %u, count %u "
1664 "to leaf block %llu at index %d\n",
1665 (unsigned long long)le64_to_cpu(rec->r_cpos),
1666 le32_to_cpu(rec->r_clusters), le32_to_cpu(rec->r_refcount),
1667 (unsigned long long)ref_leaf_bh->b_blocknr, index);
1669 rf_list->rl_recs[index] = *rec;
1671 le16_add_cpu(&rf_list->rl_used, 1);
1673 ocfs2_refcount_rec_merge(rb, index);
1675 ret = ocfs2_journal_dirty(handle, ref_leaf_bh);
1682 ret = ocfs2_adjust_refcount_rec(handle, ci,
1694 * Split the refcount_rec indexed by "index" in ref_leaf_bh.
1695 * This is much simple than our b-tree code.
1696 * split_rec is the new refcount rec we want to insert.
1697 * If split_rec->r_refcount > 0, we are changing the refcount(in case we
1698 * increase refcount or decrease a refcount to non-zero).
1699 * If split_rec->r_refcount == 0, we are punching a hole in current refcount
1700 * rec( in case we decrease a refcount to zero).
1702 static int ocfs2_split_refcount_rec(handle_t *handle,
1703 struct ocfs2_caching_info *ci,
1704 struct buffer_head *ref_root_bh,
1705 struct buffer_head *ref_leaf_bh,
1706 struct ocfs2_refcount_rec *split_rec,
1708 struct ocfs2_alloc_context *meta_ac,
1709 struct ocfs2_cached_dealloc_ctxt *dealloc)
1713 struct ocfs2_refcount_block *rb =
1714 (struct ocfs2_refcount_block *)ref_leaf_bh->b_data;
1715 struct ocfs2_refcount_list *rf_list = &rb->rf_records;
1716 struct ocfs2_refcount_rec *orig_rec = &rf_list->rl_recs[index];
1717 struct ocfs2_refcount_rec *tail_rec = NULL;
1718 struct buffer_head *new_bh = NULL;
1720 BUG_ON(le32_to_cpu(rb->rf_flags) & OCFS2_REFCOUNT_TREE_FL);
1722 mlog(0, "original r_pos %llu, cluster %u, split %llu, cluster %u\n",
1723 le64_to_cpu(orig_rec->r_cpos), le32_to_cpu(orig_rec->r_clusters),
1724 le64_to_cpu(split_rec->r_cpos),
1725 le32_to_cpu(split_rec->r_clusters));
1728 * If we just need to split the header or tail clusters,
1729 * no more recs are needed, just split is OK.
1730 * Otherwise we at least need one new recs.
1732 if (!split_rec->r_refcount &&
1733 (split_rec->r_cpos == orig_rec->r_cpos ||
1734 le64_to_cpu(split_rec->r_cpos) +
1735 le32_to_cpu(split_rec->r_clusters) ==
1736 le64_to_cpu(orig_rec->r_cpos) + le32_to_cpu(orig_rec->r_clusters)))
1742 * We need one more rec if we split in the middle and the new rec have
1743 * some refcount in it.
1745 if (split_rec->r_refcount &&
1746 (split_rec->r_cpos != orig_rec->r_cpos &&
1747 le64_to_cpu(split_rec->r_cpos) +
1748 le32_to_cpu(split_rec->r_clusters) !=
1749 le64_to_cpu(orig_rec->r_cpos) + le32_to_cpu(orig_rec->r_clusters)))
1752 /* If the leaf block don't have enough record, expand it. */
1753 if (le16_to_cpu(rf_list->rl_used) + recs_need > rf_list->rl_count) {
1754 struct ocfs2_refcount_rec tmp_rec;
1755 u64 cpos = le64_to_cpu(orig_rec->r_cpos);
1756 len = le32_to_cpu(orig_rec->r_clusters);
1757 ret = ocfs2_expand_refcount_tree(handle, ci, ref_root_bh,
1758 ref_leaf_bh, meta_ac);
1765 * We have to re-get it since now cpos may be moved to
1766 * another leaf block.
1768 ret = ocfs2_get_refcount_rec(ci, ref_root_bh,
1769 cpos, len, &tmp_rec, &index,
1776 ref_leaf_bh = new_bh;
1777 rb = (struct ocfs2_refcount_block *)ref_leaf_bh->b_data;
1778 rf_list = &rb->rf_records;
1779 orig_rec = &rf_list->rl_recs[index];
1782 ret = ocfs2_journal_access_rb(handle, ci, ref_leaf_bh,
1783 OCFS2_JOURNAL_ACCESS_WRITE);
1790 * We have calculated out how many new records we need and store
1791 * in recs_need, so spare enough space first by moving the records
1792 * after "index" to the end.
1794 if (index != le16_to_cpu(rf_list->rl_used) - 1)
1795 memmove(&rf_list->rl_recs[index + 1 + recs_need],
1796 &rf_list->rl_recs[index + 1],
1797 (le16_to_cpu(rf_list->rl_used) - index - 1) *
1798 sizeof(struct ocfs2_refcount_rec));
1800 len = (le64_to_cpu(orig_rec->r_cpos) +
1801 le32_to_cpu(orig_rec->r_clusters)) -
1802 (le64_to_cpu(split_rec->r_cpos) +
1803 le32_to_cpu(split_rec->r_clusters));
1806 * If we have "len", the we will split in the tail and move it
1807 * to the end of the space we have just spared.
1810 tail_rec = &rf_list->rl_recs[index + recs_need];
1812 memcpy(tail_rec, orig_rec, sizeof(struct ocfs2_refcount_rec));
1813 le64_add_cpu(&tail_rec->r_cpos,
1814 le32_to_cpu(tail_rec->r_clusters) - len);
1815 tail_rec->r_clusters = le32_to_cpu(len);
1819 * If the split pos isn't the same as the original one, we need to
1820 * split in the head.
1822 * Note: We have the chance that split_rec.r_refcount = 0,
1823 * recs_need = 0 and len > 0, which means we just cut the head from
1824 * the orig_rec and in that case we have done some modification in
1825 * orig_rec above, so the check for r_cpos is faked.
1827 if (split_rec->r_cpos != orig_rec->r_cpos && tail_rec != orig_rec) {
1828 len = le64_to_cpu(split_rec->r_cpos) -
1829 le64_to_cpu(orig_rec->r_cpos);
1830 orig_rec->r_clusters = cpu_to_le32(len);
1834 le16_add_cpu(&rf_list->rl_used, recs_need);
1836 if (split_rec->r_refcount) {
1837 rf_list->rl_recs[index] = *split_rec;
1838 mlog(0, "insert refcount record start %llu, len %u, count %u "
1839 "to leaf block %llu at index %d\n",
1840 (unsigned long long)le64_to_cpu(split_rec->r_cpos),
1841 le32_to_cpu(split_rec->r_clusters),
1842 le32_to_cpu(split_rec->r_refcount),
1843 (unsigned long long)ref_leaf_bh->b_blocknr, index);
1845 ocfs2_refcount_rec_merge(rb, index);
1848 ret = ocfs2_journal_dirty(handle, ref_leaf_bh);
1857 static int __ocfs2_increase_refcount(handle_t *handle,
1858 struct ocfs2_caching_info *ci,
1859 struct buffer_head *ref_root_bh,
1861 struct ocfs2_alloc_context *meta_ac,
1862 struct ocfs2_cached_dealloc_ctxt *dealloc)
1865 struct buffer_head *ref_leaf_bh = NULL;
1866 struct ocfs2_refcount_rec rec;
1867 unsigned int set_len = 0;
1869 mlog(0, "Tree owner %llu, add refcount start %llu, len %u\n",
1870 (unsigned long long)ocfs2_metadata_cache_owner(ci),
1871 (unsigned long long)cpos, len);
1874 ret = ocfs2_get_refcount_rec(ci, ref_root_bh,
1875 cpos, len, &rec, &index,
1882 set_len = le32_to_cpu(rec.r_clusters);
1885 * Here we may meet with 3 situations:
1887 * 1. If we find an already existing record, and the length
1888 * is the same, cool, we just need to increase the r_refcount
1890 * 2. If we find a hole, just insert it with r_refcount = 1.
1891 * 3. If we are in the middle of one extent record, split
1894 if (rec.r_refcount && le64_to_cpu(rec.r_cpos) == cpos &&
1896 mlog(0, "increase refcount rec, start %llu, len %u, "
1897 "count %u\n", (unsigned long long)cpos, set_len,
1898 le32_to_cpu(rec.r_refcount));
1899 ret = ocfs2_change_refcount_rec(handle, ci,
1900 ref_leaf_bh, index, 1);
1905 } else if (!rec.r_refcount) {
1906 rec.r_refcount = cpu_to_le32(1);
1908 mlog(0, "insert refcount rec, start %llu, len %u\n",
1909 (unsigned long long)le64_to_cpu(rec.r_cpos),
1911 ret = ocfs2_insert_refcount_rec(handle, ci, ref_root_bh,
1913 &rec, index, meta_ac);
1919 set_len = min((u64)(cpos + len),
1920 le64_to_cpu(rec.r_cpos) + set_len) - cpos;
1921 rec.r_cpos = cpu_to_le64(cpos);
1922 rec.r_clusters = cpu_to_le32(set_len);
1923 le32_add_cpu(&rec.r_refcount, 1);
1925 mlog(0, "split refcount rec, start %llu, "
1926 "len %u, count %u\n",
1927 (unsigned long long)le64_to_cpu(rec.r_cpos),
1928 set_len, le32_to_cpu(rec.r_refcount));
1929 ret = ocfs2_split_refcount_rec(handle, ci,
1930 ref_root_bh, ref_leaf_bh,
1941 brelse(ref_leaf_bh);
1946 brelse(ref_leaf_bh);
1950 static int ocfs2_remove_refcount_extent(handle_t *handle,
1951 struct ocfs2_caching_info *ci,
1952 struct buffer_head *ref_root_bh,
1953 struct buffer_head *ref_leaf_bh,
1954 struct ocfs2_alloc_context *meta_ac,
1955 struct ocfs2_cached_dealloc_ctxt *dealloc)
1958 struct super_block *sb = ocfs2_metadata_cache_get_super(ci);
1959 struct ocfs2_refcount_block *rb =
1960 (struct ocfs2_refcount_block *)ref_leaf_bh->b_data;
1961 struct ocfs2_extent_tree et;
1963 BUG_ON(rb->rf_records.rl_used);
1965 ocfs2_init_refcount_extent_tree(&et, ci, ref_root_bh);
1966 ret = ocfs2_remove_extent(handle, &et, le32_to_cpu(rb->rf_cpos),
1967 1, meta_ac, dealloc);
1973 ocfs2_remove_from_cache(ci, ref_leaf_bh);
1976 * add the freed block to the dealloc so that it will be freed
1977 * when we run dealloc.
1979 ret = ocfs2_cache_block_dealloc(dealloc, EXTENT_ALLOC_SYSTEM_INODE,
1980 le16_to_cpu(rb->rf_suballoc_slot),
1981 le64_to_cpu(rb->rf_blkno),
1982 le16_to_cpu(rb->rf_suballoc_bit));
1988 ret = ocfs2_journal_access_rb(handle, ci, ref_root_bh,
1989 OCFS2_JOURNAL_ACCESS_WRITE);
1995 rb = (struct ocfs2_refcount_block *)ref_root_bh->b_data;
1997 le32_add_cpu(&rb->rf_clusters, -1);
2000 * check whether we need to restore the root refcount block if
2001 * there is no leaf extent block at atll.
2003 if (!rb->rf_list.l_next_free_rec) {
2004 BUG_ON(rb->rf_clusters);
2006 mlog(0, "reset refcount tree root %llu to be a record block.\n",
2007 (unsigned long long)ref_root_bh->b_blocknr);
2012 memset(&rb->rf_records, 0, sb->s_blocksize -
2013 offsetof(struct ocfs2_refcount_block, rf_records));
2014 rb->rf_records.rl_count =
2015 cpu_to_le16(ocfs2_refcount_recs_per_rb(sb));
2018 ocfs2_journal_dirty(handle, ref_root_bh);
2024 static int ocfs2_decrease_refcount_rec(handle_t *handle,
2025 struct ocfs2_caching_info *ci,
2026 struct buffer_head *ref_root_bh,
2027 struct buffer_head *ref_leaf_bh,
2028 int index, u64 cpos, unsigned int len,
2029 struct ocfs2_alloc_context *meta_ac,
2030 struct ocfs2_cached_dealloc_ctxt *dealloc)
2033 struct ocfs2_refcount_block *rb =
2034 (struct ocfs2_refcount_block *)ref_leaf_bh->b_data;
2035 struct ocfs2_refcount_rec *rec = &rb->rf_records.rl_recs[index];
2037 BUG_ON(cpos < le64_to_cpu(rec->r_cpos));
2039 le64_to_cpu(rec->r_cpos) + le32_to_cpu(rec->r_clusters));
2041 if (cpos == le64_to_cpu(rec->r_cpos) &&
2042 len == le32_to_cpu(rec->r_clusters))
2043 ret = ocfs2_change_refcount_rec(handle, ci,
2044 ref_leaf_bh, index, -1);
2046 struct ocfs2_refcount_rec split = *rec;
2047 split.r_cpos = cpu_to_le64(cpos);
2048 split.r_clusters = cpu_to_le32(len);
2050 le32_add_cpu(&split.r_refcount, -1);
2052 mlog(0, "split refcount rec, start %llu, "
2053 "len %u, count %u, original start %llu, len %u\n",
2054 (unsigned long long)le64_to_cpu(split.r_cpos),
2055 len, le32_to_cpu(split.r_refcount),
2056 (unsigned long long)le64_to_cpu(rec->r_cpos),
2057 le32_to_cpu(rec->r_clusters));
2058 ret = ocfs2_split_refcount_rec(handle, ci,
2059 ref_root_bh, ref_leaf_bh,
2069 /* Remove the leaf refcount block if it contains no refcount record. */
2070 if (!rb->rf_records.rl_used && ref_leaf_bh != ref_root_bh) {
2071 ret = ocfs2_remove_refcount_extent(handle, ci, ref_root_bh,
2072 ref_leaf_bh, meta_ac,
2082 static int __ocfs2_decrease_refcount(handle_t *handle,
2083 struct ocfs2_caching_info *ci,
2084 struct buffer_head *ref_root_bh,
2086 struct ocfs2_alloc_context *meta_ac,
2087 struct ocfs2_cached_dealloc_ctxt *dealloc,
2090 int ret = 0, index = 0;
2091 struct ocfs2_refcount_rec rec;
2092 unsigned int r_count = 0, r_len;
2093 struct super_block *sb = ocfs2_metadata_cache_get_super(ci);
2094 struct buffer_head *ref_leaf_bh = NULL;
2096 mlog(0, "Tree owner %llu, decrease refcount start %llu, "
2097 "len %u, delete %u\n",
2098 (unsigned long long)ocfs2_metadata_cache_owner(ci),
2099 (unsigned long long)cpos, len, delete);
2102 ret = ocfs2_get_refcount_rec(ci, ref_root_bh,
2103 cpos, len, &rec, &index,
2110 r_count = le32_to_cpu(rec.r_refcount);
2111 BUG_ON(r_count == 0);
2113 BUG_ON(r_count > 1);
2115 r_len = min((u64)(cpos + len), le64_to_cpu(rec.r_cpos) +
2116 le32_to_cpu(rec.r_clusters)) - cpos;
2118 ret = ocfs2_decrease_refcount_rec(handle, ci, ref_root_bh,
2127 if (le32_to_cpu(rec.r_refcount) == 1 && delete) {
2128 ret = ocfs2_cache_cluster_dealloc(dealloc,
2129 ocfs2_clusters_to_blocks(sb, cpos),
2139 brelse(ref_leaf_bh);
2144 brelse(ref_leaf_bh);
2148 /* Caller must hold refcount tree lock. */
2149 int ocfs2_decrease_refcount(struct inode *inode,
2150 handle_t *handle, u32 cpos, u32 len,
2151 struct ocfs2_alloc_context *meta_ac,
2152 struct ocfs2_cached_dealloc_ctxt *dealloc,
2157 struct ocfs2_inode_info *oi = OCFS2_I(inode);
2158 struct buffer_head *ref_root_bh = NULL;
2159 struct ocfs2_refcount_tree *tree;
2161 BUG_ON(!(oi->ip_dyn_features & OCFS2_HAS_REFCOUNT_FL));
2163 ret = ocfs2_get_refcount_block(inode, &ref_blkno);
2169 ret = ocfs2_get_refcount_tree(OCFS2_SB(inode->i_sb), ref_blkno, &tree);
2175 ret = ocfs2_read_refcount_block(&tree->rf_ci, tree->rf_blkno,
2182 ret = __ocfs2_decrease_refcount(handle, &tree->rf_ci, ref_root_bh,
2183 cpos, len, meta_ac, dealloc, delete);
2187 brelse(ref_root_bh);
2192 * Mark the already-existing extent at cpos as refcounted for len clusters.
2193 * This adds the refcount extent flag.
2195 * If the existing extent is larger than the request, initiate a
2196 * split. An attempt will be made at merging with adjacent extents.
2198 * The caller is responsible for passing down meta_ac if we'll need it.
2200 static int ocfs2_mark_extent_refcounted(struct inode *inode,
2201 struct ocfs2_extent_tree *et,
2202 handle_t *handle, u32 cpos,
2204 struct ocfs2_alloc_context *meta_ac,
2205 struct ocfs2_cached_dealloc_ctxt *dealloc)
2209 mlog(0, "Inode %lu refcount tree cpos %u, len %u, phys cluster %u\n",
2210 inode->i_ino, cpos, len, phys);
2212 if (!ocfs2_refcount_tree(OCFS2_SB(inode->i_sb))) {
2213 ocfs2_error(inode->i_sb, "Inode %lu want to use refcount "
2214 "tree, but the feature bit is not set in the "
2215 "super block.", inode->i_ino);
2220 ret = ocfs2_change_extent_flag(handle, et, cpos,
2221 len, phys, meta_ac, dealloc,
2222 OCFS2_EXT_REFCOUNTED, 0);
2231 * Given some contiguous physical clusters, calculate what we need
2232 * for modifying their refcount.
2234 static int ocfs2_calc_refcount_meta_credits(struct super_block *sb,
2235 struct ocfs2_caching_info *ci,
2236 struct buffer_head *ref_root_bh,
2242 int ret = 0, index, ref_blocks = 0, recs_add = 0;
2243 u64 cpos = start_cpos;
2244 struct ocfs2_refcount_block *rb;
2245 struct ocfs2_refcount_rec rec;
2246 struct buffer_head *ref_leaf_bh = NULL, *prev_bh = NULL;
2249 mlog(0, "start_cpos %llu, clusters %u\n",
2250 (unsigned long long)start_cpos, clusters);
2252 ret = ocfs2_get_refcount_rec(ci, ref_root_bh,
2253 cpos, clusters, &rec,
2254 &index, &ref_leaf_bh);
2260 if (ref_leaf_bh != prev_bh) {
2262 * Now we encounter a new leaf block, so calculate
2263 * whether we need to extend the old leaf.
2266 rb = (struct ocfs2_refcount_block *)
2269 if (le64_to_cpu(rb->rf_records.rl_used) +
2271 le16_to_cpu(rb->rf_records.rl_count))
2278 prev_bh = ref_leaf_bh;
2282 rb = (struct ocfs2_refcount_block *)ref_leaf_bh->b_data;
2284 mlog(0, "recs_add %d,cpos %llu, clusters %u, rec->r_cpos %llu,"
2285 "rec->r_clusters %u, rec->r_refcount %u, index %d\n",
2286 recs_add, (unsigned long long)cpos, clusters,
2287 (unsigned long long)le64_to_cpu(rec.r_cpos),
2288 le32_to_cpu(rec.r_clusters),
2289 le32_to_cpu(rec.r_refcount), index);
2291 len = min((u64)cpos + clusters, le64_to_cpu(rec.r_cpos) +
2292 le32_to_cpu(rec.r_clusters)) - cpos;
2294 * If the refcount rec already exist, cool. We just need
2295 * to check whether there is a split. Otherwise we just need
2296 * to increase the refcount.
2297 * If we will insert one, increases recs_add.
2299 * We record all the records which will be inserted to the
2300 * same refcount block, so that we can tell exactly whether
2301 * we need a new refcount block or not.
2303 if (rec.r_refcount) {
2304 /* Check whether we need a split at the beginning. */
2305 if (cpos == start_cpos &&
2306 cpos != le64_to_cpu(rec.r_cpos))
2309 /* Check whether we need a split in the end. */
2310 if (cpos + clusters < le64_to_cpu(rec.r_cpos) +
2311 le32_to_cpu(rec.r_clusters))
2316 brelse(ref_leaf_bh);
2323 rb = (struct ocfs2_refcount_block *)prev_bh->b_data;
2325 if (le64_to_cpu(rb->rf_records.rl_used) + recs_add >
2326 le16_to_cpu(rb->rf_records.rl_count))
2335 mlog(0, "we need ref_blocks %d\n", ref_blocks);
2336 *meta_add += ref_blocks;
2337 *credits += ref_blocks;
2340 * So we may need ref_blocks to insert into the tree.
2341 * That also means we need to change the b-tree and add that number
2342 * of records since we never merge them.
2343 * We need one more block for expansion since the new created leaf
2344 * block is also full and needs split.
2346 rb = (struct ocfs2_refcount_block *)ref_root_bh->b_data;
2347 if (le32_to_cpu(rb->rf_flags) & OCFS2_REFCOUNT_TREE_FL) {
2348 struct ocfs2_extent_tree et;
2350 ocfs2_init_refcount_extent_tree(&et, ci, ref_root_bh);
2351 *meta_add += ocfs2_extend_meta_needed(et.et_root_el);
2352 *credits += ocfs2_calc_extend_credits(sb,
2356 *credits += OCFS2_EXPAND_REFCOUNT_TREE_CREDITS;
2361 brelse(ref_leaf_bh);
2367 * For refcount tree, we will decrease some contiguous clusters
2368 * refcount count, so just go through it to see how many blocks
2369 * we gonna touch and whether we need to create new blocks.
2371 * Normally the refcount blocks store these refcount should be
2372 * continguous also, so that we can get the number easily.
2373 * As for meta_ac, we will at most add split 2 refcount record and
2374 * 2 more refcount block, so just check it in a rough way.
2376 * Caller must hold refcount tree lock.
2378 int ocfs2_prepare_refcount_change_for_del(struct inode *inode,
2379 struct buffer_head *di_bh,
2383 struct ocfs2_alloc_context **meta_ac)
2385 int ret, ref_blocks = 0;
2386 struct ocfs2_dinode *di = (struct ocfs2_dinode *)di_bh->b_data;
2387 struct ocfs2_inode_info *oi = OCFS2_I(inode);
2388 struct buffer_head *ref_root_bh = NULL;
2389 struct ocfs2_refcount_tree *tree;
2390 u64 start_cpos = ocfs2_blocks_to_clusters(inode->i_sb, phys_blkno);
2392 if (!ocfs2_refcount_tree(OCFS2_SB(inode->i_sb))) {
2393 ocfs2_error(inode->i_sb, "Inode %lu want to use refcount "
2394 "tree, but the feature bit is not set in the "
2395 "super block.", inode->i_ino);
2400 BUG_ON(!(oi->ip_dyn_features & OCFS2_HAS_REFCOUNT_FL));
2402 ret = ocfs2_get_refcount_tree(OCFS2_SB(inode->i_sb),
2403 le64_to_cpu(di->i_refcount_loc), &tree);
2409 ret = ocfs2_read_refcount_block(&tree->rf_ci,
2410 le64_to_cpu(di->i_refcount_loc),
2417 ret = ocfs2_calc_refcount_meta_credits(inode->i_sb,
2420 start_cpos, clusters,
2421 &ref_blocks, credits);
2427 mlog(0, "reserve new metadata %d, credits = %d\n",
2428 ref_blocks, *credits);
2431 ret = ocfs2_reserve_new_metadata_blocks(OCFS2_SB(inode->i_sb),
2432 ref_blocks, meta_ac);
2438 brelse(ref_root_bh);
2442 #define MAX_CONTIG_BYTES 1048576
2444 static inline unsigned int ocfs2_cow_contig_clusters(struct super_block *sb)
2446 return ocfs2_clusters_for_bytes(sb, MAX_CONTIG_BYTES);
2449 static inline unsigned int ocfs2_cow_contig_mask(struct super_block *sb)
2451 return ~(ocfs2_cow_contig_clusters(sb) - 1);
2455 * Given an extent that starts at 'start' and an I/O that starts at 'cpos',
2456 * find an offset (start + (n * contig_clusters)) that is closest to cpos
2457 * while still being less than or equal to it.
2459 * The goal is to break the extent at a multiple of contig_clusters.
2461 static inline unsigned int ocfs2_cow_align_start(struct super_block *sb,
2465 BUG_ON(start > cpos);
2467 return start + ((cpos - start) & ocfs2_cow_contig_mask(sb));
2471 * Given a cluster count of len, pad it out so that it is a multiple
2472 * of contig_clusters.
2474 static inline unsigned int ocfs2_cow_align_length(struct super_block *sb,
2477 unsigned int padded =
2478 (len + (ocfs2_cow_contig_clusters(sb) - 1)) &
2479 ocfs2_cow_contig_mask(sb);
2489 * Calculate out the start and number of virtual clusters we need to to CoW.
2491 * cpos is vitual start cluster position we want to do CoW in a
2492 * file and write_len is the cluster length.
2493 * max_cpos is the place where we want to stop CoW intentionally.
2495 * Normal we will start CoW from the beginning of extent record cotaining cpos.
2496 * We try to break up extents on boundaries of MAX_CONTIG_BYTES so that we
2497 * get good I/O from the resulting extent tree.
2499 static int ocfs2_refcount_cal_cow_clusters(struct inode *inode,
2500 struct ocfs2_extent_list *el,
2508 int tree_height = le16_to_cpu(el->l_tree_depth), i;
2509 struct buffer_head *eb_bh = NULL;
2510 struct ocfs2_extent_block *eb = NULL;
2511 struct ocfs2_extent_rec *rec;
2512 unsigned int want_clusters, rec_end = 0;
2513 int contig_clusters = ocfs2_cow_contig_clusters(inode->i_sb);
2516 BUG_ON(cpos + write_len > max_cpos);
2518 if (tree_height > 0) {
2519 ret = ocfs2_find_leaf(INODE_CACHE(inode), el, cpos, &eb_bh);
2525 eb = (struct ocfs2_extent_block *) eb_bh->b_data;
2528 if (el->l_tree_depth) {
2529 ocfs2_error(inode->i_sb,
2530 "Inode %lu has non zero tree depth in "
2531 "leaf block %llu\n", inode->i_ino,
2532 (unsigned long long)eb_bh->b_blocknr);
2539 for (i = 0; i < le16_to_cpu(el->l_next_free_rec); i++) {
2540 rec = &el->l_recs[i];
2542 if (ocfs2_is_empty_extent(rec)) {
2543 mlog_bug_on_msg(i != 0, "Inode %lu has empty record in "
2544 "index %d\n", inode->i_ino, i);
2548 if (le32_to_cpu(rec->e_cpos) +
2549 le16_to_cpu(rec->e_leaf_clusters) <= cpos)
2552 if (*cow_len == 0) {
2554 * We should find a refcounted record in the
2557 BUG_ON(!(rec->e_flags & OCFS2_EXT_REFCOUNTED));
2558 *cow_start = le32_to_cpu(rec->e_cpos);
2562 * If we encounter a hole, a non-refcounted record or
2563 * pass the max_cpos, stop the search.
2565 if ((!(rec->e_flags & OCFS2_EXT_REFCOUNTED)) ||
2566 (*cow_len && rec_end != le32_to_cpu(rec->e_cpos)) ||
2567 (max_cpos <= le32_to_cpu(rec->e_cpos)))
2570 leaf_clusters = le16_to_cpu(rec->e_leaf_clusters);
2571 rec_end = le32_to_cpu(rec->e_cpos) + leaf_clusters;
2572 if (rec_end > max_cpos) {
2574 leaf_clusters = rec_end - le32_to_cpu(rec->e_cpos);
2578 * How many clusters do we actually need from
2579 * this extent? First we see how many we actually
2580 * need to complete the write. If that's smaller
2581 * than contig_clusters, we try for contig_clusters.
2584 want_clusters = write_len;
2586 want_clusters = (cpos + write_len) -
2587 (*cow_start + *cow_len);
2588 if (want_clusters < contig_clusters)
2589 want_clusters = contig_clusters;
2592 * If the write does not cover the whole extent, we
2593 * need to calculate how we're going to split the extent.
2594 * We try to do it on contig_clusters boundaries.
2596 * Any extent smaller than contig_clusters will be
2597 * CoWed in its entirety.
2599 if (leaf_clusters <= contig_clusters)
2600 *cow_len += leaf_clusters;
2601 else if (*cow_len || (*cow_start == cpos)) {
2603 * This extent needs to be CoW'd from its
2604 * beginning, so all we have to do is compute
2605 * how many clusters to grab. We align
2606 * want_clusters to the edge of contig_clusters
2607 * to get better I/O.
2609 want_clusters = ocfs2_cow_align_length(inode->i_sb,
2612 if (leaf_clusters < want_clusters)
2613 *cow_len += leaf_clusters;
2615 *cow_len += want_clusters;
2616 } else if ((*cow_start + contig_clusters) >=
2617 (cpos + write_len)) {
2619 * Breaking off contig_clusters at the front
2620 * of the extent will cover our write. That's
2623 *cow_len = contig_clusters;
2624 } else if ((rec_end - cpos) <= contig_clusters) {
2626 * Breaking off contig_clusters at the tail of
2627 * this extent will cover cpos.
2629 *cow_start = rec_end - contig_clusters;
2630 *cow_len = contig_clusters;
2631 } else if ((rec_end - cpos) <= want_clusters) {
2633 * While we can't fit the entire write in this
2634 * extent, we know that the write goes from cpos
2635 * to the end of the extent. Break that off.
2636 * We try to break it at some multiple of
2637 * contig_clusters from the front of the extent.
2638 * Failing that (ie, cpos is within
2639 * contig_clusters of the front), we'll CoW the
2642 *cow_start = ocfs2_cow_align_start(inode->i_sb,
2644 *cow_len = rec_end - *cow_start;
2647 * Ok, the entire write lives in the middle of
2648 * this extent. Let's try to slice the extent up
2649 * nicely. Optimally, our CoW region starts at
2650 * m*contig_clusters from the beginning of the
2651 * extent and goes for n*contig_clusters,
2652 * covering the entire write.
2654 *cow_start = ocfs2_cow_align_start(inode->i_sb,
2657 want_clusters = (cpos + write_len) - *cow_start;
2658 want_clusters = ocfs2_cow_align_length(inode->i_sb,
2660 if (*cow_start + want_clusters <= rec_end)
2661 *cow_len = want_clusters;
2663 *cow_len = rec_end - *cow_start;
2666 /* Have we covered our entire write yet? */
2667 if ((*cow_start + *cow_len) >= (cpos + write_len))
2671 * If we reach the end of the extent block and don't get enough
2672 * clusters, continue with the next extent block if possible.
2674 if (i + 1 == le16_to_cpu(el->l_next_free_rec) &&
2675 eb && eb->h_next_leaf_blk) {
2679 ret = ocfs2_read_extent_block(INODE_CACHE(inode),
2680 le64_to_cpu(eb->h_next_leaf_blk),
2687 eb = (struct ocfs2_extent_block *) eb_bh->b_data;
2699 * Prepare meta_ac, data_ac and calculate credits when we want to add some
2700 * num_clusters in data_tree "et" and change the refcount for the old
2701 * clusters(starting form p_cluster) in the refcount tree.
2704 * 1. since we may split the old tree, so we at most will need num_clusters + 2
2705 * more new leaf records.
2706 * 2. In some case, we may not need to reserve new clusters(e.g, reflink), so
2707 * just give data_ac = NULL.
2709 static int ocfs2_lock_refcount_allocators(struct super_block *sb,
2710 u32 p_cluster, u32 num_clusters,
2711 struct ocfs2_extent_tree *et,
2712 struct ocfs2_caching_info *ref_ci,
2713 struct buffer_head *ref_root_bh,
2714 struct ocfs2_alloc_context **meta_ac,
2715 struct ocfs2_alloc_context **data_ac,
2718 int ret = 0, meta_add = 0;
2719 int num_free_extents = ocfs2_num_free_extents(OCFS2_SB(sb), et);
2721 if (num_free_extents < 0) {
2722 ret = num_free_extents;
2727 if (num_free_extents < num_clusters + 2)
2729 ocfs2_extend_meta_needed(et->et_root_el);
2731 *credits += ocfs2_calc_extend_credits(sb, et->et_root_el,
2734 ret = ocfs2_calc_refcount_meta_credits(sb, ref_ci, ref_root_bh,
2735 p_cluster, num_clusters,
2736 &meta_add, credits);
2742 mlog(0, "reserve new metadata %d, clusters %u, credits = %d\n",
2743 meta_add, num_clusters, *credits);
2744 ret = ocfs2_reserve_new_metadata_blocks(OCFS2_SB(sb), meta_add,
2752 ret = ocfs2_reserve_clusters(OCFS2_SB(sb), num_clusters,
2761 ocfs2_free_alloc_context(*meta_ac);
2769 static int ocfs2_clear_cow_buffer(handle_t *handle, struct buffer_head *bh)
2771 BUG_ON(buffer_dirty(bh));
2773 clear_buffer_mapped(bh);
2778 static int ocfs2_duplicate_clusters_by_page(handle_t *handle,
2779 struct ocfs2_cow_context *context,
2780 u32 cpos, u32 old_cluster,
2781 u32 new_cluster, u32 new_len)
2783 int ret = 0, partial;
2784 struct ocfs2_caching_info *ci = context->data_et.et_ci;
2785 struct super_block *sb = ocfs2_metadata_cache_get_super(ci);
2786 u64 new_block = ocfs2_clusters_to_blocks(sb, new_cluster);
2789 unsigned int from, to;
2790 loff_t offset, end, map_end;
2791 struct address_space *mapping = context->inode->i_mapping;
2793 mlog(0, "old_cluster %u, new %u, len %u at offset %u\n", old_cluster,
2794 new_cluster, new_len, cpos);
2796 offset = ((loff_t)cpos) << OCFS2_SB(sb)->s_clustersize_bits;
2797 end = offset + (new_len << OCFS2_SB(sb)->s_clustersize_bits);
2799 while (offset < end) {
2800 page_index = offset >> PAGE_CACHE_SHIFT;
2801 map_end = (page_index + 1) << PAGE_CACHE_SHIFT;
2805 /* from, to is the offset within the page. */
2806 from = offset & (PAGE_CACHE_SIZE - 1);
2807 to = PAGE_CACHE_SIZE;
2808 if (map_end & (PAGE_CACHE_SIZE - 1))
2809 to = map_end & (PAGE_CACHE_SIZE - 1);
2811 page = grab_cache_page(mapping, page_index);
2813 /* This page can't be dirtied before we CoW it out. */
2814 BUG_ON(PageDirty(page));
2816 if (!PageUptodate(page)) {
2817 ret = block_read_full_page(page, ocfs2_get_block);
2825 if (page_has_buffers(page)) {
2826 ret = walk_page_buffers(handle, page_buffers(page),
2828 ocfs2_clear_cow_buffer);
2835 ocfs2_map_and_dirty_page(context->inode,
2837 page, 0, &new_block);
2838 mark_page_accessed(page);
2841 page_cache_release(page);
2851 static int ocfs2_clear_ext_refcount(handle_t *handle,
2852 struct ocfs2_extent_tree *et,
2853 u32 cpos, u32 p_cluster, u32 len,
2854 unsigned int ext_flags,
2855 struct ocfs2_alloc_context *meta_ac,
2856 struct ocfs2_cached_dealloc_ctxt *dealloc)
2859 struct ocfs2_extent_rec replace_rec;
2860 struct ocfs2_path *path = NULL;
2861 struct ocfs2_extent_list *el;
2862 struct super_block *sb = ocfs2_metadata_cache_get_super(et->et_ci);
2863 u64 ino = ocfs2_metadata_cache_owner(et->et_ci);
2865 mlog(0, "inode %llu cpos %u, len %u, p_cluster %u, ext_flags %u\n",
2866 (unsigned long long)ino, cpos, len, p_cluster, ext_flags);
2868 memset(&replace_rec, 0, sizeof(replace_rec));
2869 replace_rec.e_cpos = cpu_to_le32(cpos);
2870 replace_rec.e_leaf_clusters = cpu_to_le16(len);
2871 replace_rec.e_blkno = cpu_to_le64(ocfs2_clusters_to_blocks(sb,
2873 replace_rec.e_flags = ext_flags;
2874 replace_rec.e_flags &= ~OCFS2_EXT_REFCOUNTED;
2876 path = ocfs2_new_path_from_et(et);
2883 ret = ocfs2_find_path(et->et_ci, path, cpos);
2889 el = path_leaf_el(path);
2891 index = ocfs2_search_extent_list(el, cpos);
2892 if (index == -1 || index >= le16_to_cpu(el->l_next_free_rec)) {
2894 "Inode %llu has an extent at cpos %u which can no "
2895 "longer be found.\n",
2896 (unsigned long long)ino, cpos);
2901 ret = ocfs2_split_extent(handle, et, path, index,
2902 &replace_rec, meta_ac, dealloc);
2907 ocfs2_free_path(path);
2911 static int ocfs2_replace_clusters(handle_t *handle,
2912 struct ocfs2_cow_context *context,
2915 unsigned int ext_flags)
2918 struct ocfs2_caching_info *ci = context->data_et.et_ci;
2919 u64 ino = ocfs2_metadata_cache_owner(ci);
2921 mlog(0, "inode %llu, cpos %u, old %u, new %u, len %u, ext_flags %u\n",
2922 (unsigned long long)ino, cpos, old, new, len, ext_flags);
2924 /*If the old clusters is unwritten, no need to duplicate. */
2925 if (!(ext_flags & OCFS2_EXT_UNWRITTEN)) {
2926 ret = context->cow_duplicate_clusters(handle, context, cpos,
2934 ret = ocfs2_clear_ext_refcount(handle, &context->data_et,
2935 cpos, new, len, ext_flags,
2936 context->meta_ac, &context->dealloc);
2943 static int ocfs2_cow_sync_writeback(struct super_block *sb,
2944 struct ocfs2_cow_context *context,
2945 u32 cpos, u32 num_clusters)
2948 loff_t offset, end, map_end;
2952 if (ocfs2_should_order_data(context->inode))
2955 offset = ((loff_t)cpos) << OCFS2_SB(sb)->s_clustersize_bits;
2956 end = offset + (num_clusters << OCFS2_SB(sb)->s_clustersize_bits);
2958 ret = filemap_fdatawrite_range(context->inode->i_mapping,
2965 while (offset < end) {
2966 page_index = offset >> PAGE_CACHE_SHIFT;
2967 map_end = (page_index + 1) << PAGE_CACHE_SHIFT;
2971 page = grab_cache_page(context->inode->i_mapping, page_index);
2974 wait_on_page_writeback(page);
2975 if (PageError(page)) {
2979 mark_page_accessed(page);
2982 page_cache_release(page);
2992 static int ocfs2_di_get_clusters(struct ocfs2_cow_context *context,
2993 u32 v_cluster, u32 *p_cluster,
2995 unsigned int *extent_flags)
2997 return ocfs2_get_clusters(context->inode, v_cluster, p_cluster,
2998 num_clusters, extent_flags);
3001 static int ocfs2_make_clusters_writable(struct super_block *sb,
3002 struct ocfs2_cow_context *context,
3003 u32 cpos, u32 p_cluster,
3004 u32 num_clusters, unsigned int e_flags)
3006 int ret, delete, index, credits = 0;
3007 u32 new_bit, new_len;
3008 unsigned int set_len;
3009 struct ocfs2_super *osb = OCFS2_SB(sb);
3011 struct buffer_head *ref_leaf_bh = NULL;
3012 struct ocfs2_caching_info *ref_ci = &context->ref_tree->rf_ci;
3013 struct ocfs2_refcount_rec rec;
3015 mlog(0, "cpos %u, p_cluster %u, num_clusters %u, e_flags %u\n",
3016 cpos, p_cluster, num_clusters, e_flags);
3018 ret = ocfs2_lock_refcount_allocators(sb, p_cluster, num_clusters,
3021 context->ref_root_bh,
3023 &context->data_ac, &credits);
3029 handle = ocfs2_start_trans(osb, credits);
3030 if (IS_ERR(handle)) {
3031 ret = PTR_ERR(handle);
3036 while (num_clusters) {
3037 ret = ocfs2_get_refcount_rec(ref_ci, context->ref_root_bh,
3038 p_cluster, num_clusters,
3039 &rec, &index, &ref_leaf_bh);
3045 BUG_ON(!rec.r_refcount);
3046 set_len = min((u64)p_cluster + num_clusters,
3047 le64_to_cpu(rec.r_cpos) +
3048 le32_to_cpu(rec.r_clusters)) - p_cluster;
3051 * There are many different situation here.
3052 * 1. If refcount == 1, remove the flag and don't COW.
3053 * 2. If refcount > 1, allocate clusters.
3054 * Here we may not allocate r_len once at a time, so continue
3055 * until we reach num_clusters.
3057 if (le32_to_cpu(rec.r_refcount) == 1) {
3059 ret = ocfs2_clear_ext_refcount(handle,
3072 ret = __ocfs2_claim_clusters(osb, handle,
3075 &new_bit, &new_len);
3081 ret = ocfs2_replace_clusters(handle, context,
3082 cpos, p_cluster, new_bit,
3091 ret = __ocfs2_decrease_refcount(handle, ref_ci,
3092 context->ref_root_bh,
3095 &context->dealloc, delete);
3102 p_cluster += set_len;
3103 num_clusters -= set_len;
3104 brelse(ref_leaf_bh);
3109 * Here we should write the new page out first if we are
3110 * in write-back mode.
3112 ret = ocfs2_cow_sync_writeback(sb, context, cpos, num_clusters);
3117 ocfs2_commit_trans(osb, handle);
3120 if (context->data_ac) {
3121 ocfs2_free_alloc_context(context->data_ac);
3122 context->data_ac = NULL;
3124 if (context->meta_ac) {
3125 ocfs2_free_alloc_context(context->meta_ac);
3126 context->meta_ac = NULL;
3128 brelse(ref_leaf_bh);
3133 static int ocfs2_replace_cow(struct ocfs2_cow_context *context)
3136 struct inode *inode = context->inode;
3137 u32 cow_start = context->cow_start, cow_len = context->cow_len;
3138 u32 p_cluster, num_clusters;
3139 unsigned int ext_flags;
3140 struct ocfs2_super *osb = OCFS2_SB(inode->i_sb);
3142 if (!ocfs2_refcount_tree(OCFS2_SB(inode->i_sb))) {
3143 ocfs2_error(inode->i_sb, "Inode %lu want to use refcount "
3144 "tree, but the feature bit is not set in the "
3145 "super block.", inode->i_ino);
3149 ocfs2_init_dealloc_ctxt(&context->dealloc);
3152 ret = context->get_clusters(context, cow_start, &p_cluster,
3153 &num_clusters, &ext_flags);
3159 BUG_ON(!(ext_flags & OCFS2_EXT_REFCOUNTED));
3161 if (cow_len < num_clusters)
3162 num_clusters = cow_len;
3164 ret = ocfs2_make_clusters_writable(inode->i_sb, context,
3165 cow_start, p_cluster,
3166 num_clusters, ext_flags);
3172 cow_len -= num_clusters;
3173 cow_start += num_clusters;
3176 if (ocfs2_dealloc_has_cluster(&context->dealloc)) {
3177 ocfs2_schedule_truncate_log_flush(osb, 1);
3178 ocfs2_run_deallocs(osb, &context->dealloc);
3185 * Starting at cpos, try to CoW write_len clusters. Don't CoW
3186 * past max_cpos. This will stop when it runs into a hole or an
3187 * unrefcounted extent.
3189 static int ocfs2_refcount_cow_hunk(struct inode *inode,
3190 struct buffer_head *di_bh,
3191 u32 cpos, u32 write_len, u32 max_cpos)
3194 u32 cow_start = 0, cow_len = 0;
3195 struct ocfs2_inode_info *oi = OCFS2_I(inode);
3196 struct ocfs2_super *osb = OCFS2_SB(inode->i_sb);
3197 struct ocfs2_dinode *di = (struct ocfs2_dinode *)di_bh->b_data;
3198 struct buffer_head *ref_root_bh = NULL;
3199 struct ocfs2_refcount_tree *ref_tree;
3200 struct ocfs2_cow_context *context = NULL;
3202 BUG_ON(!(oi->ip_dyn_features & OCFS2_HAS_REFCOUNT_FL));
3204 ret = ocfs2_refcount_cal_cow_clusters(inode, &di->id2.i_list,
3205 cpos, write_len, max_cpos,
3206 &cow_start, &cow_len);
3212 mlog(0, "CoW inode %lu, cpos %u, write_len %u, cow_start %u, "
3213 "cow_len %u\n", inode->i_ino,
3214 cpos, write_len, cow_start, cow_len);
3216 BUG_ON(cow_len == 0);
3218 context = kzalloc(sizeof(struct ocfs2_cow_context), GFP_NOFS);
3225 ret = ocfs2_lock_refcount_tree(osb, le64_to_cpu(di->i_refcount_loc),
3226 1, &ref_tree, &ref_root_bh);
3232 context->inode = inode;
3233 context->cow_start = cow_start;
3234 context->cow_len = cow_len;
3235 context->ref_tree = ref_tree;
3236 context->ref_root_bh = ref_root_bh;
3237 context->cow_duplicate_clusters = ocfs2_duplicate_clusters_by_page;
3238 context->get_clusters = ocfs2_di_get_clusters;
3240 ocfs2_init_dinode_extent_tree(&context->data_et,
3241 INODE_CACHE(inode), di_bh);
3243 ret = ocfs2_replace_cow(context);
3248 * truncate the extent map here since no matter whether we meet with
3249 * any error during the action, we shouldn't trust cached extent map
3252 ocfs2_extent_map_trunc(inode, cow_start);
3254 ocfs2_unlock_refcount_tree(osb, ref_tree, 1);
3255 brelse(ref_root_bh);
3262 * CoW any and all clusters between cpos and cpos+write_len.
3263 * Don't CoW past max_cpos. If this returns successfully, all
3264 * clusters between cpos and cpos+write_len are safe to modify.
3266 int ocfs2_refcount_cow(struct inode *inode,
3267 struct buffer_head *di_bh,
3268 u32 cpos, u32 write_len, u32 max_cpos)
3271 u32 p_cluster, num_clusters;
3272 unsigned int ext_flags;
3275 ret = ocfs2_get_clusters(inode, cpos, &p_cluster,
3276 &num_clusters, &ext_flags);
3282 if (write_len < num_clusters)
3283 num_clusters = write_len;
3285 if (ext_flags & OCFS2_EXT_REFCOUNTED) {
3286 ret = ocfs2_refcount_cow_hunk(inode, di_bh, cpos,
3287 num_clusters, max_cpos);
3294 write_len -= num_clusters;
3295 cpos += num_clusters;
3302 * Insert a new extent into refcount tree and mark a extent rec
3303 * as refcounted in the dinode tree.
3305 int ocfs2_add_refcount_flag(struct inode *inode,
3306 struct ocfs2_extent_tree *data_et,
3307 struct ocfs2_caching_info *ref_ci,
3308 struct buffer_head *ref_root_bh,
3309 u32 cpos, u32 p_cluster, u32 num_clusters,
3310 struct ocfs2_cached_dealloc_ctxt *dealloc)
3314 int credits = 1, ref_blocks = 0;
3315 struct ocfs2_super *osb = OCFS2_SB(inode->i_sb);
3316 struct ocfs2_alloc_context *meta_ac = NULL;
3318 ret = ocfs2_calc_refcount_meta_credits(inode->i_sb,
3319 ref_ci, ref_root_bh,
3320 p_cluster, num_clusters,
3321 &ref_blocks, &credits);
3327 mlog(0, "reserve new metadata %d, credits = %d\n",
3328 ref_blocks, credits);
3331 ret = ocfs2_reserve_new_metadata_blocks(OCFS2_SB(inode->i_sb),
3332 ref_blocks, &meta_ac);
3339 handle = ocfs2_start_trans(osb, credits);
3340 if (IS_ERR(handle)) {
3341 ret = PTR_ERR(handle);
3346 ret = ocfs2_mark_extent_refcounted(inode, data_et, handle,
3347 cpos, num_clusters, p_cluster,
3354 ret = __ocfs2_increase_refcount(handle, ref_ci, ref_root_bh,
3355 p_cluster, num_clusters,
3361 ocfs2_commit_trans(osb, handle);
3364 ocfs2_free_alloc_context(meta_ac);
3368 static int ocfs2_change_ctime(struct inode *inode,
3369 struct buffer_head *di_bh)
3373 struct ocfs2_dinode *di = (struct ocfs2_dinode *)di_bh->b_data;
3375 handle = ocfs2_start_trans(OCFS2_SB(inode->i_sb),
3376 OCFS2_INODE_UPDATE_CREDITS);
3377 if (IS_ERR(handle)) {
3378 ret = PTR_ERR(handle);
3383 ret = ocfs2_journal_access_di(handle, INODE_CACHE(inode), di_bh,
3384 OCFS2_JOURNAL_ACCESS_WRITE);
3390 inode->i_ctime = CURRENT_TIME;
3391 di->i_ctime = cpu_to_le64(inode->i_ctime.tv_sec);
3392 di->i_ctime_nsec = cpu_to_le32(inode->i_ctime.tv_nsec);
3394 ocfs2_journal_dirty(handle, di_bh);
3397 ocfs2_commit_trans(OCFS2_SB(inode->i_sb), handle);
3402 static int ocfs2_attach_refcount_tree(struct inode *inode,
3403 struct buffer_head *di_bh)
3405 int ret, data_changed = 0;
3406 struct buffer_head *ref_root_bh = NULL;
3407 struct ocfs2_inode_info *oi = OCFS2_I(inode);
3408 struct ocfs2_dinode *di = (struct ocfs2_dinode *)di_bh->b_data;
3409 struct ocfs2_super *osb = OCFS2_SB(inode->i_sb);
3410 struct ocfs2_refcount_tree *ref_tree;
3411 unsigned int ext_flags;
3413 u32 cpos, num_clusters, clusters, p_cluster;
3414 struct ocfs2_cached_dealloc_ctxt dealloc;
3415 struct ocfs2_extent_tree di_et;
3417 ocfs2_init_dealloc_ctxt(&dealloc);
3419 if (!(oi->ip_dyn_features & OCFS2_HAS_REFCOUNT_FL)) {
3420 ret = ocfs2_create_refcount_tree(inode, di_bh);
3427 BUG_ON(!di->i_refcount_loc);
3428 ret = ocfs2_lock_refcount_tree(osb,
3429 le64_to_cpu(di->i_refcount_loc), 1,
3430 &ref_tree, &ref_root_bh);
3436 ocfs2_init_dinode_extent_tree(&di_et, INODE_CACHE(inode), di_bh);
3438 size = i_size_read(inode);
3439 clusters = ocfs2_clusters_for_bytes(inode->i_sb, size);
3442 while (cpos < clusters) {
3443 ret = ocfs2_get_clusters(inode, cpos, &p_cluster,
3444 &num_clusters, &ext_flags);
3446 if (p_cluster && !(ext_flags & OCFS2_EXT_REFCOUNTED)) {
3447 ret = ocfs2_add_refcount_flag(inode, &di_et,
3450 p_cluster, num_clusters,
3459 cpos += num_clusters;
3463 ret = ocfs2_change_ctime(inode, di_bh);
3469 ocfs2_unlock_refcount_tree(osb, ref_tree, 1);
3470 brelse(ref_root_bh);
3472 if (!ret && ocfs2_dealloc_has_cluster(&dealloc)) {
3473 ocfs2_schedule_truncate_log_flush(osb, 1);
3474 ocfs2_run_deallocs(osb, &dealloc);
3478 * Empty the extent map so that we may get the right extent
3479 * record from the disk.
3481 ocfs2_extent_map_trunc(inode, 0);
3486 static int ocfs2_add_refcounted_extent(struct inode *inode,
3487 struct ocfs2_extent_tree *et,
3488 struct ocfs2_caching_info *ref_ci,
3489 struct buffer_head *ref_root_bh,
3490 u32 cpos, u32 p_cluster, u32 num_clusters,
3491 unsigned int ext_flags,
3492 struct ocfs2_cached_dealloc_ctxt *dealloc)
3497 struct ocfs2_super *osb = OCFS2_SB(inode->i_sb);
3498 struct ocfs2_alloc_context *meta_ac = NULL;
3500 ret = ocfs2_lock_refcount_allocators(inode->i_sb,
3501 p_cluster, num_clusters,
3503 ref_root_bh, &meta_ac,
3510 handle = ocfs2_start_trans(osb, credits);
3511 if (IS_ERR(handle)) {
3512 ret = PTR_ERR(handle);
3517 ret = ocfs2_insert_extent(handle, et, cpos,
3518 cpu_to_le64(ocfs2_clusters_to_blocks(inode->i_sb,
3520 num_clusters, ext_flags, meta_ac);
3526 ret = __ocfs2_increase_refcount(handle, ref_ci, ref_root_bh,
3527 p_cluster, num_clusters,
3533 ocfs2_commit_trans(osb, handle);
3536 ocfs2_free_alloc_context(meta_ac);
3540 static int ocfs2_duplicate_extent_list(struct inode *s_inode,
3541 struct inode *t_inode,
3542 struct buffer_head *t_bh,
3543 struct ocfs2_caching_info *ref_ci,
3544 struct buffer_head *ref_root_bh,
3545 struct ocfs2_cached_dealloc_ctxt *dealloc)
3548 u32 p_cluster, num_clusters, clusters, cpos;
3550 unsigned int ext_flags;
3551 struct ocfs2_extent_tree et;
3553 ocfs2_init_dinode_extent_tree(&et, INODE_CACHE(t_inode), t_bh);
3555 size = i_size_read(s_inode);
3556 clusters = ocfs2_clusters_for_bytes(s_inode->i_sb, size);
3559 while (cpos < clusters) {
3560 ret = ocfs2_get_clusters(s_inode, cpos, &p_cluster,
3561 &num_clusters, &ext_flags);
3564 ret = ocfs2_add_refcounted_extent(t_inode, &et,
3565 ref_ci, ref_root_bh,
3576 cpos += num_clusters;
3584 * change the new file's attributes to the src.
3586 * reflink creates a snapshot of a file, that means the attributes
3587 * must be identical except for three exceptions - nlink, ino, and ctime.
3589 static int ocfs2_complete_reflink(struct inode *s_inode,
3590 struct buffer_head *s_bh,
3591 struct inode *t_inode,
3592 struct buffer_head *t_bh)
3596 struct ocfs2_dinode *s_di = (struct ocfs2_dinode *)s_bh->b_data;
3597 struct ocfs2_dinode *di = (struct ocfs2_dinode *)t_bh->b_data;
3598 loff_t size = i_size_read(s_inode);
3600 handle = ocfs2_start_trans(OCFS2_SB(t_inode->i_sb),
3601 OCFS2_INODE_UPDATE_CREDITS);
3602 if (IS_ERR(handle)) {
3603 ret = PTR_ERR(handle);
3608 ret = ocfs2_journal_access_di(handle, INODE_CACHE(t_inode), t_bh,
3609 OCFS2_JOURNAL_ACCESS_WRITE);
3615 spin_lock(&OCFS2_I(t_inode)->ip_lock);
3616 OCFS2_I(t_inode)->ip_clusters = OCFS2_I(s_inode)->ip_clusters;
3617 OCFS2_I(t_inode)->ip_attr = OCFS2_I(s_inode)->ip_attr;
3618 OCFS2_I(t_inode)->ip_dyn_features = OCFS2_I(s_inode)->ip_dyn_features;
3619 spin_unlock(&OCFS2_I(t_inode)->ip_lock);
3620 i_size_write(t_inode, size);
3622 di->i_xattr_inline_size = s_di->i_xattr_inline_size;
3623 di->i_clusters = s_di->i_clusters;
3624 di->i_size = s_di->i_size;
3625 di->i_dyn_features = s_di->i_dyn_features;
3626 di->i_attr = s_di->i_attr;
3627 di->i_uid = s_di->i_uid;
3628 di->i_gid = s_di->i_gid;
3629 di->i_mode = s_di->i_mode;
3633 * we want mtime to appear identical to the source and update ctime.
3635 t_inode->i_ctime = CURRENT_TIME;
3637 di->i_ctime = cpu_to_le64(t_inode->i_ctime.tv_sec);
3638 di->i_ctime_nsec = cpu_to_le32(t_inode->i_ctime.tv_nsec);
3640 t_inode->i_mtime = s_inode->i_mtime;
3641 di->i_mtime = s_di->i_mtime;
3642 di->i_mtime_nsec = s_di->i_mtime_nsec;
3644 ocfs2_journal_dirty(handle, t_bh);
3647 ocfs2_commit_trans(OCFS2_SB(t_inode->i_sb), handle);
3651 static int ocfs2_create_reflink_node(struct inode *s_inode,
3652 struct buffer_head *s_bh,
3653 struct inode *t_inode,
3654 struct buffer_head *t_bh)
3657 struct buffer_head *ref_root_bh = NULL;
3658 struct ocfs2_cached_dealloc_ctxt dealloc;
3659 struct ocfs2_super *osb = OCFS2_SB(s_inode->i_sb);
3660 struct ocfs2_refcount_block *rb;
3661 struct ocfs2_dinode *di = (struct ocfs2_dinode *)s_bh->b_data;
3662 struct ocfs2_refcount_tree *ref_tree;
3664 ocfs2_init_dealloc_ctxt(&dealloc);
3666 ret = ocfs2_set_refcount_tree(t_inode, t_bh,
3667 le64_to_cpu(di->i_refcount_loc));
3673 ret = ocfs2_lock_refcount_tree(osb, le64_to_cpu(di->i_refcount_loc),
3674 1, &ref_tree, &ref_root_bh);
3679 rb = (struct ocfs2_refcount_block *)ref_root_bh->b_data;
3681 ret = ocfs2_duplicate_extent_list(s_inode, t_inode, t_bh,
3682 &ref_tree->rf_ci, ref_root_bh,
3686 goto out_unlock_refcount;
3689 ret = ocfs2_complete_reflink(s_inode, s_bh, t_inode, t_bh);
3693 out_unlock_refcount:
3694 ocfs2_unlock_refcount_tree(osb, ref_tree, 1);
3695 brelse(ref_root_bh);
3697 if (ocfs2_dealloc_has_cluster(&dealloc)) {
3698 ocfs2_schedule_truncate_log_flush(osb, 1);
3699 ocfs2_run_deallocs(osb, &dealloc);