#include "super.h"
#include "uptodate.h"
#include "xattr.h"
+#include "refcounttree.h"
#include "buffer_head_io.h"
int op_credits,
struct ocfs2_path *path)
{
+ int ret;
int credits = (path->p_tree_depth - subtree_depth) * 2 + 1 + op_credits;
- if (handle->h_buffer_credits < credits)
- return ocfs2_extend_trans(handle, credits);
+ if (handle->h_buffer_credits < credits) {
+ ret = ocfs2_extend_trans(handle,
+ credits - handle->h_buffer_credits);
+ if (ret)
+ return ret;
+
+ if (unlikely(handle->h_buffer_credits < credits))
+ return ocfs2_extend_trans(handle, credits);
+ }
return 0;
}
*
* The array is assumed to be large enough to hold an entire path (tree depth).
*
- * Upon succesful return from this function:
+ * Upon successful return from this function:
*
* - The 'right_path' array will contain a path to the leaf block
* whose range contains e_cpos.
*
* The caller is responsible for passing down meta_ac if we'll need it.
*/
-static int ocfs2_change_extent_flag(handle_t *handle,
- struct ocfs2_extent_tree *et,
- u32 cpos, u32 len, u32 phys,
- struct ocfs2_alloc_context *meta_ac,
- struct ocfs2_cached_dealloc_ctxt *dealloc,
- int new_flags, int clear_flags)
+int ocfs2_change_extent_flag(handle_t *handle,
+ struct ocfs2_extent_tree *et,
+ u32 cpos, u32 len, u32 phys,
+ struct ocfs2_alloc_context *meta_ac,
+ struct ocfs2_cached_dealloc_ctxt *dealloc,
+ int new_flags, int clear_flags)
{
int ret, index;
struct super_block *sb = ocfs2_metadata_cache_get_super(et->et_ci);
return fl;
}
-static int ocfs2_cache_block_dealloc(struct ocfs2_cached_dealloc_ctxt *ctxt,
- int type, int slot, u64 blkno,
- unsigned int bit)
+int ocfs2_cache_block_dealloc(struct ocfs2_cached_dealloc_ctxt *ctxt,
+ int type, int slot, u64 blkno,
+ unsigned int bit)
{
int ret;
struct ocfs2_per_slot_free_list *fl;
*/
static int ocfs2_trim_tree(struct inode *inode, struct ocfs2_path *path,
handle_t *handle, struct ocfs2_truncate_context *tc,
- u32 clusters_to_del, u64 *delete_start)
+ u32 clusters_to_del, u64 *delete_start, u8 *flags)
{
int ret, i, index = path->p_tree_depth;
u32 new_edge = 0;
struct ocfs2_extent_rec *rec;
*delete_start = 0;
+ *flags = 0;
while (index >= 0) {
bh = path->p_node[index].bh;
*delete_start = le64_to_cpu(rec->e_blkno)
+ ocfs2_clusters_to_blocks(inode->i_sb,
le16_to_cpu(rec->e_leaf_clusters));
+ *flags = rec->e_flags;
/*
* If it's now empty, remove this record.
struct buffer_head *fe_bh,
handle_t *handle,
struct ocfs2_truncate_context *tc,
- struct ocfs2_path *path)
+ struct ocfs2_path *path,
+ struct ocfs2_alloc_context *meta_ac)
{
int status;
struct ocfs2_dinode *fe;
struct ocfs2_extent_list *el;
struct buffer_head *last_eb_bh = NULL;
u64 delete_blk = 0;
+ u8 rec_flags;
fe = (struct ocfs2_dinode *) fe_bh->b_data;
inode->i_blocks = ocfs2_inode_sector_count(inode);
status = ocfs2_trim_tree(inode, path, handle, tc,
- clusters_to_del, &delete_blk);
+ clusters_to_del, &delete_blk, &rec_flags);
if (status) {
mlog_errno(status);
goto bail;
}
if (delete_blk) {
- status = ocfs2_truncate_log_append(osb, handle, delete_blk,
- clusters_to_del);
+ if (rec_flags & OCFS2_EXT_REFCOUNTED)
+ status = ocfs2_decrease_refcount(inode, handle,
+ ocfs2_blocks_to_clusters(osb->sb,
+ delete_blk),
+ clusters_to_del, meta_ac,
+ &tc->tc_dealloc, 1);
+ else
+ status = ocfs2_truncate_log_append(osb, handle,
+ delete_blk,
+ clusters_to_del);
if (status < 0) {
mlog_errno(status);
goto bail;
return 0;
}
-static void ocfs2_map_and_dirty_page(struct inode *inode, handle_t *handle,
- unsigned int from, unsigned int to,
- struct page *page, int zero, u64 *phys)
+void ocfs2_map_and_dirty_page(struct inode *inode, handle_t *handle,
+ unsigned int from, unsigned int to,
+ struct page *page, int zero, u64 *phys)
{
int ret, partial = 0;
ocfs2_unlock_and_free_pages(pages, numpages);
}
-static int ocfs2_grab_eof_pages(struct inode *inode, loff_t start, loff_t end,
- struct page **pages, int *num)
+int ocfs2_grab_pages(struct inode *inode, loff_t start, loff_t end,
+ struct page **pages, int *num)
{
int numpages, ret = 0;
- struct super_block *sb = inode->i_sb;
struct address_space *mapping = inode->i_mapping;
unsigned long index;
loff_t last_page_bytes;
BUG_ON(start > end);
- BUG_ON(start >> OCFS2_SB(sb)->s_clustersize_bits !=
- (end - 1) >> OCFS2_SB(sb)->s_clustersize_bits);
-
numpages = 0;
last_page_bytes = PAGE_ALIGN(end);
index = start >> PAGE_CACHE_SHIFT;
return ret;
}
+static int ocfs2_grab_eof_pages(struct inode *inode, loff_t start, loff_t end,
+ struct page **pages, int *num)
+{
+ struct super_block *sb = inode->i_sb;
+
+ BUG_ON(start >> OCFS2_SB(sb)->s_clustersize_bits !=
+ (end - 1) >> OCFS2_SB(sb)->s_clustersize_bits);
+
+ return ocfs2_grab_pages(inode, start, end, pages, num);
+}
+
/*
* Zero the area past i_size but still within an allocated
* cluster. This avoids exposing nonzero data on subsequent file
{
int status, i, credits, tl_sem = 0;
u32 clusters_to_del, new_highest_cpos, range;
+ u64 blkno = 0;
struct ocfs2_extent_list *el;
handle_t *handle = NULL;
struct inode *tl_inode = osb->osb_tl_inode;
struct ocfs2_path *path = NULL;
struct ocfs2_dinode *di = (struct ocfs2_dinode *)fe_bh->b_data;
+ struct ocfs2_alloc_context *meta_ac = NULL;
+ struct ocfs2_refcount_tree *ref_tree = NULL;
mlog_entry_void();
goto bail;
}
+ credits = 0;
+
/*
* Truncate always works against the rightmost tree branch.
*/
clusters_to_del = 0;
} else if (le32_to_cpu(el->l_recs[i].e_cpos) >= new_highest_cpos) {
clusters_to_del = ocfs2_rec_clusters(el, &el->l_recs[i]);
+ blkno = le64_to_cpu(el->l_recs[i].e_blkno);
} else if (range > new_highest_cpos) {
clusters_to_del = (ocfs2_rec_clusters(el, &el->l_recs[i]) +
le32_to_cpu(el->l_recs[i].e_cpos)) -
new_highest_cpos;
+ blkno = le64_to_cpu(el->l_recs[i].e_blkno) +
+ ocfs2_clusters_to_blocks(inode->i_sb,
+ ocfs2_rec_clusters(el, &el->l_recs[i]) -
+ clusters_to_del);
} else {
status = 0;
goto bail;
mlog(0, "clusters_to_del = %u in this pass, tail blk=%llu\n",
clusters_to_del, (unsigned long long)path_leaf_bh(path)->b_blocknr);
+ if (el->l_recs[i].e_flags & OCFS2_EXT_REFCOUNTED && clusters_to_del) {
+ BUG_ON(!(OCFS2_I(inode)->ip_dyn_features &
+ OCFS2_HAS_REFCOUNT_FL));
+
+ status = ocfs2_lock_refcount_tree(osb,
+ le64_to_cpu(di->i_refcount_loc),
+ 1, &ref_tree, NULL);
+ if (status) {
+ mlog_errno(status);
+ goto bail;
+ }
+
+ status = ocfs2_prepare_refcount_change_for_del(inode, fe_bh,
+ blkno,
+ clusters_to_del,
+ &credits,
+ &meta_ac);
+ if (status < 0) {
+ mlog_errno(status);
+ goto bail;
+ }
+ }
+
mutex_lock(&tl_inode->i_mutex);
tl_sem = 1;
/* ocfs2_truncate_log_needs_flush guarantees us at least one
}
}
- credits = ocfs2_calc_tree_trunc_credits(osb->sb, clusters_to_del,
+ credits += ocfs2_calc_tree_trunc_credits(osb->sb, clusters_to_del,
(struct ocfs2_dinode *)fe_bh->b_data,
el);
handle = ocfs2_start_trans(osb, credits);
}
status = ocfs2_do_truncate(osb, clusters_to_del, inode, fe_bh, handle,
- tc, path);
+ tc, path, meta_ac);
if (status < 0) {
mlog_errno(status);
goto bail;
ocfs2_reinit_path(path, 1);
+ if (meta_ac) {
+ ocfs2_free_alloc_context(meta_ac);
+ meta_ac = NULL;
+ }
+
+ if (ref_tree) {
+ ocfs2_unlock_refcount_tree(osb, ref_tree, 1);
+ ref_tree = NULL;
+ }
+
/*
* The check above will catch the case where we've truncated
* away all allocation.
if (handle)
ocfs2_commit_trans(osb, handle);
+ if (meta_ac)
+ ocfs2_free_alloc_context(meta_ac);
+
+ if (ref_tree)
+ ocfs2_unlock_refcount_tree(osb, ref_tree, 1);
+
ocfs2_run_deallocs(osb, &tc->tc_dealloc);
ocfs2_free_path(path);