1 /* -*- mode: c; c-basic-offset: 8; -*-
2 * vim: noexpandtab sw=8 ts=8 sts=0:
6 * Copyright (C) 2008 Oracle. All rights reserved.
8 * This program is free software; you can redistribute it and/or
9 * modify it under the terms of the GNU General Public
10 * License as published by the Free Software Foundation; either
11 * version 2 of the License, or (at your option) any later version.
13 * This program is distributed in the hope that it will be useful,
14 * but WITHOUT ANY WARRANTY; without even the implied warranty of
15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
16 * General Public License for more details.
18 * You should have received a copy of the GNU General Public
19 * License along with this program; if not, write to the
20 * Free Software Foundation, Inc., 59 Temple Place - Suite 330,
21 * Boston, MA 021110-1307, USA.
24 #define MLOG_MASK_PREFIX ML_XATTR
25 #include <cluster/masklog.h>
36 #include "buffer_head_io.h"
38 static int ocfs2_xattr_extend_allocation(struct inode *inode,
40 struct buffer_head *xattr_bh,
41 struct ocfs2_xattr_value_root *xv)
46 handle_t *handle = NULL;
47 struct ocfs2_alloc_context *data_ac = NULL;
48 struct ocfs2_alloc_context *meta_ac = NULL;
49 enum ocfs2_alloc_restarted why;
50 struct ocfs2_super *osb = OCFS2_SB(inode->i_sb);
51 struct ocfs2_extent_list *root_el = &xv->xr_list;
52 u32 prev_clusters, logical_start = le32_to_cpu(xv->xr_clusters);
54 mlog(0, "(clusters_to_add for xattr= %u)\n", clusters_to_add);
58 status = ocfs2_lock_allocators(inode, xattr_bh, root_el,
59 clusters_to_add, 0, &data_ac,
60 &meta_ac, OCFS2_XATTR_VALUE_EXTENT, xv);
66 credits = ocfs2_calc_extend_credits(osb->sb, root_el, clusters_to_add);
67 handle = ocfs2_start_trans(osb, credits);
69 status = PTR_ERR(handle);
75 restarted_transaction:
76 status = ocfs2_journal_access(handle, inode, xattr_bh,
77 OCFS2_JOURNAL_ACCESS_WRITE);
83 prev_clusters = le32_to_cpu(xv->xr_clusters);
84 status = ocfs2_add_clusters_in_btree(osb,
95 OCFS2_XATTR_VALUE_EXTENT,
97 if ((status < 0) && (status != -EAGAIN)) {
98 if (status != -ENOSPC)
103 status = ocfs2_journal_dirty(handle, xattr_bh);
109 clusters_to_add -= le32_to_cpu(xv->xr_clusters) - prev_clusters;
111 if (why != RESTART_NONE && clusters_to_add) {
112 if (why == RESTART_META) {
113 mlog(0, "restarting function.\n");
116 BUG_ON(why != RESTART_TRANS);
118 mlog(0, "restarting transaction.\n");
119 /* TODO: This can be more intelligent. */
120 credits = ocfs2_calc_extend_credits(osb->sb,
123 status = ocfs2_extend_trans(handle, credits);
125 /* handle still has to be committed at
131 goto restarted_transaction;
137 ocfs2_commit_trans(osb, handle);
141 ocfs2_free_alloc_context(data_ac);
145 ocfs2_free_alloc_context(meta_ac);
148 if ((!status) && restart_func) {
156 static int __ocfs2_remove_xattr_range(struct inode *inode,
157 struct buffer_head *root_bh,
158 struct ocfs2_xattr_value_root *xv,
159 u32 cpos, u32 phys_cpos, u32 len,
160 struct ocfs2_cached_dealloc_ctxt *dealloc)
163 u64 phys_blkno = ocfs2_clusters_to_blocks(inode->i_sb, phys_cpos);
164 struct ocfs2_super *osb = OCFS2_SB(inode->i_sb);
165 struct inode *tl_inode = osb->osb_tl_inode;
167 struct ocfs2_alloc_context *meta_ac = NULL;
169 ret = ocfs2_lock_allocators(inode, root_bh, &xv->xr_list,
170 0, 1, NULL, &meta_ac,
171 OCFS2_XATTR_VALUE_EXTENT, xv);
177 mutex_lock(&tl_inode->i_mutex);
179 if (ocfs2_truncate_log_needs_flush(osb)) {
180 ret = __ocfs2_flush_truncate_log(osb);
187 handle = ocfs2_start_trans(osb, OCFS2_REMOVE_EXTENT_CREDITS);
188 if (IS_ERR(handle)) {
189 ret = PTR_ERR(handle);
194 ret = ocfs2_journal_access(handle, inode, root_bh,
195 OCFS2_JOURNAL_ACCESS_WRITE);
201 ret = ocfs2_remove_extent(inode, root_bh, cpos, len, handle, meta_ac,
202 dealloc, OCFS2_XATTR_VALUE_EXTENT, xv);
208 le32_add_cpu(&xv->xr_clusters, -len);
210 ret = ocfs2_journal_dirty(handle, root_bh);
216 ret = ocfs2_truncate_log_append(osb, handle, phys_blkno, len);
221 ocfs2_commit_trans(osb, handle);
223 mutex_unlock(&tl_inode->i_mutex);
226 ocfs2_free_alloc_context(meta_ac);
231 static int ocfs2_xattr_shrink_size(struct inode *inode,
234 struct buffer_head *root_bh,
235 struct ocfs2_xattr_value_root *xv)
238 u32 trunc_len, cpos, phys_cpos, alloc_size;
240 struct ocfs2_super *osb = OCFS2_SB(inode->i_sb);
241 struct ocfs2_cached_dealloc_ctxt dealloc;
243 ocfs2_init_dealloc_ctxt(&dealloc);
245 if (old_clusters <= new_clusters)
249 trunc_len = old_clusters - new_clusters;
251 ret = ocfs2_xattr_get_clusters(inode, cpos, &phys_cpos,
252 &alloc_size, &xv->xr_list);
258 if (alloc_size > trunc_len)
259 alloc_size = trunc_len;
261 ret = __ocfs2_remove_xattr_range(inode, root_bh, xv, cpos,
262 phys_cpos, alloc_size,
269 block = ocfs2_clusters_to_blocks(inode->i_sb, phys_cpos);
270 ocfs2_remove_xattr_clusters_from_cache(inode, block,
273 trunc_len -= alloc_size;
277 ocfs2_schedule_truncate_log_flush(osb, 1);
278 ocfs2_run_deallocs(osb, &dealloc);
283 static int ocfs2_xattr_value_truncate(struct inode *inode,
284 struct buffer_head *root_bh,
285 struct ocfs2_xattr_value_root *xv,
289 u32 new_clusters = ocfs2_clusters_for_bytes(inode->i_sb, len);
290 u32 old_clusters = le32_to_cpu(xv->xr_clusters);
292 if (new_clusters == old_clusters)
295 if (new_clusters > old_clusters)
296 ret = ocfs2_xattr_extend_allocation(inode,
297 new_clusters - old_clusters,
300 ret = ocfs2_xattr_shrink_size(inode,
301 old_clusters, new_clusters,