/*
- * Copyright (c) 2000-2005 Silicon Graphics, Inc.
+ * Copyright (c) 2000-2006 Silicon Graphics, Inc.
* All Rights Reserved.
*
* This program is free software; you can redistribute it and/or
* along with this program; if not, write the Free Software Foundation,
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
*/
+
#include "xfs.h"
#include "xfs_fs.h"
#include "xfs_types.h"
#include "xfs_trans.h"
#include "xfs_sb.h"
#include "xfs_ag.h"
-#include "xfs_dir.h"
#include "xfs_dir2.h"
#include "xfs_dmapi.h"
#include "xfs_mount.h"
#include "xfs_bmap_btree.h"
#include "xfs_alloc_btree.h"
#include "xfs_ialloc_btree.h"
-#include "xfs_dir_sf.h"
#include "xfs_dir2_sf.h"
#include "xfs_attr_sf.h"
#include "xfs_dinode.h"
#include "xfs_inode.h"
#include "xfs_inode_item.h"
-#include "xfs_dir_leaf.h"
#include "xfs_itable.h"
#include "xfs_btree.h"
#include "xfs_ialloc.h"
#include "xfs_quota.h"
#include "xfs_utils.h"
#include "xfs_rtalloc.h"
-#include "xfs_refcache.h"
#include "xfs_trans_space.h"
#include "xfs_log_priv.h"
-#include "xfs_mac.h"
-
-
-/*
- * The maximum pathlen is 1024 bytes. Since the minimum file system
- * blocksize is 512 bytes, we can get a max of 2 extents back from
- * bmapi.
- */
-#define SYMLINK_MAPS 2
+#include "xfs_filestream.h"
+#include "xfs_vnodeops.h"
-/*
- * For xfs, we check that the file isn't too big to be opened by this kernel.
- * No other open action is required for regular files. Devices are handled
- * through the specfs file system, pipes through fifofs. Device and
- * fifo vnodes are "wrapped" by specfs and fifofs vnodes, respectively,
- * when a new vnode is first looked up or created.
- */
-STATIC int
+int
xfs_open(
- bhv_desc_t *bdp,
- cred_t *credp)
+ xfs_inode_t *ip)
{
int mode;
- vnode_t *vp;
- xfs_inode_t *ip;
-
- vp = BHV_TO_VNODE(bdp);
- ip = XFS_BHVTOI(bdp);
if (XFS_FORCED_SHUTDOWN(ip->i_mount))
return XFS_ERROR(EIO);
* If it's a directory with any blocks, read-ahead block 0
* as we're almost certain to have the next operation be a read there.
*/
- if (VN_ISDIR(vp) && ip->i_d.di_nextents > 0) {
+ if (S_ISDIR(ip->i_d.di_mode) && ip->i_d.di_nextents > 0) {
mode = xfs_ilock_map_shared(ip);
if (ip->i_d.di_nextents > 0)
(void)xfs_da_reada_buf(NULL, ip, 0, XFS_DATA_FORK);
return 0;
}
-
-/*
- * xfs_getattr
- */
-STATIC int
-xfs_getattr(
- bhv_desc_t *bdp,
- vattr_t *vap,
- int flags,
- cred_t *credp)
-{
- xfs_inode_t *ip;
- xfs_mount_t *mp;
- vnode_t *vp;
-
- vp = BHV_TO_VNODE(bdp);
- vn_trace_entry(vp, __FUNCTION__, (inst_t *)__return_address);
-
- ip = XFS_BHVTOI(bdp);
- mp = ip->i_mount;
-
- if (XFS_FORCED_SHUTDOWN(mp))
- return XFS_ERROR(EIO);
-
- if (!(flags & ATTR_LAZY))
- xfs_ilock(ip, XFS_ILOCK_SHARED);
-
- vap->va_size = ip->i_d.di_size;
- if (vap->va_mask == XFS_AT_SIZE)
- goto all_done;
-
- vap->va_nblocks =
- XFS_FSB_TO_BB(mp, ip->i_d.di_nblocks + ip->i_delayed_blks);
- vap->va_nodeid = ip->i_ino;
-#if XFS_BIG_INUMS
- vap->va_nodeid += mp->m_inoadd;
-#endif
- vap->va_nlink = ip->i_d.di_nlink;
-
- /*
- * Quick exit for non-stat callers
- */
- if ((vap->va_mask &
- ~(XFS_AT_SIZE|XFS_AT_FSID|XFS_AT_NODEID|
- XFS_AT_NLINK|XFS_AT_BLKSIZE)) == 0)
- goto all_done;
-
- /*
- * Copy from in-core inode.
- */
- vap->va_mode = ip->i_d.di_mode;
- vap->va_uid = ip->i_d.di_uid;
- vap->va_gid = ip->i_d.di_gid;
- vap->va_projid = ip->i_d.di_projid;
-
- /*
- * Check vnode type block/char vs. everything else.
- */
- switch (ip->i_d.di_mode & S_IFMT) {
- case S_IFBLK:
- case S_IFCHR:
- vap->va_rdev = ip->i_df.if_u2.if_rdev;
- vap->va_blocksize = BLKDEV_IOSIZE;
- break;
- default:
- vap->va_rdev = 0;
-
- if (!(ip->i_d.di_flags & XFS_DIFLAG_REALTIME)) {
- vap->va_blocksize = xfs_preferred_iosize(mp);
- } else {
-
- /*
- * If the file blocks are being allocated from a
- * realtime partition, then return the inode's
- * realtime extent size or the realtime volume's
- * extent size.
- */
- vap->va_blocksize = ip->i_d.di_extsize ?
- (ip->i_d.di_extsize << mp->m_sb.sb_blocklog) :
- (mp->m_sb.sb_rextsize << mp->m_sb.sb_blocklog);
- }
- break;
- }
-
- /* atime is only kept uptodate in the Linux inode */
- vap->va_atime = vp->v_inode.i_atime;
- vap->va_mtime.tv_sec = ip->i_d.di_mtime.t_sec;
- vap->va_mtime.tv_nsec = ip->i_d.di_mtime.t_nsec;
- vap->va_ctime.tv_sec = ip->i_d.di_ctime.t_sec;
- vap->va_ctime.tv_nsec = ip->i_d.di_ctime.t_nsec;
-
- /*
- * Exit for stat callers. See if any of the rest of the fields
- * to be filled in are needed.
- */
- if ((vap->va_mask &
- (XFS_AT_XFLAGS|XFS_AT_EXTSIZE|XFS_AT_NEXTENTS|XFS_AT_ANEXTENTS|
- XFS_AT_GENCOUNT|XFS_AT_VCODE)) == 0)
- goto all_done;
-
- /*
- * Convert di_flags to xflags.
- */
- vap->va_xflags = xfs_ip2xflags(ip);
-
- /*
- * Exit for inode revalidate. See if any of the rest of
- * the fields to be filled in are needed.
- */
- if ((vap->va_mask &
- (XFS_AT_EXTSIZE|XFS_AT_NEXTENTS|XFS_AT_ANEXTENTS|
- XFS_AT_GENCOUNT|XFS_AT_VCODE)) == 0)
- goto all_done;
-
- vap->va_extsize = ip->i_d.di_extsize << mp->m_sb.sb_blocklog;
- vap->va_nextents =
- (ip->i_df.if_flags & XFS_IFEXTENTS) ?
- ip->i_df.if_bytes / sizeof(xfs_bmbt_rec_t) :
- ip->i_d.di_nextents;
- if (ip->i_afp)
- vap->va_anextents =
- (ip->i_afp->if_flags & XFS_IFEXTENTS) ?
- ip->i_afp->if_bytes / sizeof(xfs_bmbt_rec_t) :
- ip->i_d.di_anextents;
- else
- vap->va_anextents = 0;
- vap->va_gen = ip->i_d.di_gen;
-
- all_done:
- if (!(flags & ATTR_LAZY))
- xfs_iunlock(ip, XFS_ILOCK_SHARED);
- return 0;
-}
-
-
/*
* xfs_setattr
*/
int
xfs_setattr(
- bhv_desc_t *bdp,
- vattr_t *vap,
+ xfs_inode_t *ip,
+ bhv_vattr_t *vap,
int flags,
cred_t *credp)
{
- xfs_inode_t *ip;
+ xfs_mount_t *mp = ip->i_mount;
xfs_trans_t *tp;
- xfs_mount_t *mp;
int mask;
int code;
uint lock_flags;
uid_t uid=0, iuid=0;
gid_t gid=0, igid=0;
int timeflags = 0;
- vnode_t *vp;
xfs_prid_t projid=0, iprojid=0;
- int mandlock_before, mandlock_after;
struct xfs_dquot *udqp, *gdqp, *olddquot1, *olddquot2;
int file_owner;
int need_iolock = 1;
- vp = BHV_TO_VNODE(bdp);
- vn_trace_entry(vp, __FUNCTION__, (inst_t *)__return_address);
+ xfs_itrace_entry(ip);
- if (vp->v_vfsp->vfs_flag & VFS_RDONLY)
+ if (mp->m_flags & XFS_MOUNT_RDONLY)
return XFS_ERROR(EROFS);
/*
return XFS_ERROR(EINVAL);
}
- ip = XFS_BHVTOI(bdp);
- mp = ip->i_mount;
-
if (XFS_FORCED_SHUTDOWN(mp))
return XFS_ERROR(EIO);
code = XFS_QM_DQVOPALLOC(mp, ip, uid, gid, projid, qflags,
&udqp, &gdqp);
if (code)
- return (code);
+ return code;
}
/*
*/
tp = NULL;
lock_flags = XFS_ILOCK_EXCL;
- ASSERT(flags & ATTR_NOLOCK ? flags & ATTR_DMI : 1);
if (flags & ATTR_NOLOCK)
need_iolock = 0;
if (!(mask & XFS_AT_SIZE)) {
}
}
} else {
- if (DM_EVENT_ENABLED (vp->v_vfsp, ip, DM_EVENT_TRUNCATE) &&
+ if (DM_EVENT_ENABLED(ip, DM_EVENT_TRUNCATE) &&
!(flags & ATTR_DMI)) {
int dmflags = AT_DELAY_FLAG(flags) | DM_SEM_FLAG_WR;
- code = XFS_SEND_DATA(mp, DM_EVENT_TRUNCATE, vp,
+ code = XFS_SEND_DATA(mp, DM_EVENT_TRUNCATE, ip,
vap->va_size, 0, dmflags, NULL);
if (code) {
lock_flags = 0;
m |= S_ISGID;
#if 0
/* Linux allows this, Irix doesn't. */
- if ((vap->va_mode & S_ISVTX) && !VN_ISDIR(vp))
+ if ((vap->va_mode & S_ISVTX) && !S_ISDIR(ip->i_d.di_mode))
m |= S_ISVTX;
#endif
if (m && !capable(CAP_FSETID))
if (mask & XFS_AT_SIZE) {
/* Short circuit the truncate case for zero length files */
if ((vap->va_size == 0) &&
- (ip->i_d.di_size == 0) && (ip->i_d.di_nextents == 0)) {
+ (ip->i_size == 0) && (ip->i_d.di_nextents == 0)) {
xfs_iunlock(ip, XFS_ILOCK_EXCL);
lock_flags &= ~XFS_ILOCK_EXCL;
if (mask & XFS_AT_CTIME)
goto error_return;
}
- if (VN_ISDIR(vp)) {
+ if (S_ISDIR(ip->i_d.di_mode)) {
code = XFS_ERROR(EISDIR);
goto error_return;
- } else if (!VN_ISREG(vp)) {
+ } else if (!S_ISREG(ip->i_d.di_mode)) {
code = XFS_ERROR(EINVAL);
goto error_return;
}
*/
if ((ip->i_d.di_nextents || ip->i_delayed_blks) &&
(mask & XFS_AT_XFLAGS) &&
- (ip->i_d.di_flags & XFS_DIFLAG_REALTIME) !=
+ (XFS_IS_REALTIME_INODE(ip)) !=
(vap->va_xflags & XFS_XFLAG_REALTIME)) {
code = XFS_ERROR(EINVAL); /* EFBIG? */
goto error_return;
if ((mask & XFS_AT_EXTSIZE) && vap->va_extsize != 0) {
xfs_extlen_t size;
- if ((ip->i_d.di_flags & XFS_DIFLAG_REALTIME) ||
+ if (XFS_IS_REALTIME_INODE(ip) ||
((mask & XFS_AT_XFLAGS) &&
(vap->va_xflags & XFS_XFLAG_REALTIME))) {
size = mp->m_sb.sb_rextsize <<
*/
if (mask & XFS_AT_SIZE) {
code = 0;
- if ((vap->va_size > ip->i_d.di_size) &&
+ if ((vap->va_size > ip->i_size) &&
(flags & ATTR_NOSIZETOK) == 0) {
code = xfs_igrow_start(ip, vap->va_size, credp);
}
xfs_iunlock(ip, XFS_ILOCK_EXCL);
+
+ /*
+ * We are going to log the inode size change in this
+ * transaction so any previous writes that are beyond the on
+ * disk EOF and the new EOF that have not been written out need
+ * to be written here. If we do not write the data out, we
+ * expose ourselves to the null files problem.
+ *
+ * Only flush from the on disk size to the smaller of the in
+ * memory file size or the new size as that's the range we
+ * really care about here and prevents waiting for other data
+ * not within the range we care about here.
+ */
+ if (!code &&
+ (ip->i_size != ip->i_d.di_size) &&
+ (vap->va_size > ip->i_d.di_size)) {
+ code = xfs_flush_pages(ip,
+ ip->i_d.di_size, vap->va_size,
+ XFS_B_ASYNC, FI_NONE);
+ }
+
+ /* wait for all I/O to complete */
+ vn_iowait(ip);
+
if (!code)
code = xfs_itruncate_data(ip, vap->va_size);
if (code) {
xfs_trans_ihold(tp, ip);
}
- /* determine whether mandatory locking mode changes */
- mandlock_before = MANDLOCK(vp, ip->i_d.di_mode);
-
/*
* Truncate file. Must have write permission and not be a directory.
*/
if (mask & XFS_AT_SIZE) {
- if (vap->va_size > ip->i_d.di_size) {
+ /*
+ * Only change the c/mtime if we are changing the size
+ * or we are explicitly asked to change it. This handles
+ * the semantic difference between truncate() and ftruncate()
+ * as implemented in the VFS.
+ */
+ if (vap->va_size != ip->i_size || (mask & XFS_AT_CTIME))
+ timeflags |= XFS_ICHGTIME_MOD | XFS_ICHGTIME_CHG;
+
+ if (vap->va_size > ip->i_size) {
xfs_igrow_finish(tp, ip, vap->va_size,
!(flags & ATTR_DMI));
- } else if ((vap->va_size <= ip->i_d.di_size) ||
+ } else if ((vap->va_size <= ip->i_size) ||
((vap->va_size == 0) && ip->i_d.di_nextents)) {
/*
* signal a sync transaction unless
((ip->i_d.di_nlink != 0 ||
!(mp->m_flags & XFS_MOUNT_WSYNC))
? 1 : 0));
- if (code) {
+ if (code)
goto abort_return;
- }
+ /*
+ * Truncated "down", so we're removing references
+ * to old data here - if we now delay flushing for
+ * a long time, we expose ourselves unduly to the
+ * notorious NULL files problem. So, we mark this
+ * vnode and flush it when the file is closed, and
+ * do not wait the usual (long) time for writeout.
+ */
+ xfs_iflags_set(ip, XFS_ITRUNCATED);
}
- /*
- * Have to do this even if the file's size doesn't change.
- */
- timeflags |= XFS_ICHGTIME_MOD | XFS_ICHGTIME_CHG;
}
/*
di_flags |= XFS_DIFLAG_NODUMP;
if (vap->va_xflags & XFS_XFLAG_PROJINHERIT)
di_flags |= XFS_DIFLAG_PROJINHERIT;
+ if (vap->va_xflags & XFS_XFLAG_NODEFRAG)
+ di_flags |= XFS_DIFLAG_NODEFRAG;
+ if (vap->va_xflags & XFS_XFLAG_FILESTREAM)
+ di_flags |= XFS_DIFLAG_FILESTREAM;
if ((ip->i_d.di_mode & S_IFMT) == S_IFDIR) {
if (vap->va_xflags & XFS_XFLAG_RTINHERIT)
di_flags |= XFS_DIFLAG_RTINHERIT;
if (vap->va_xflags & XFS_XFLAG_EXTSZINHERIT)
di_flags |= XFS_DIFLAG_EXTSZINHERIT;
} else if ((ip->i_d.di_mode & S_IFMT) == S_IFREG) {
- if (vap->va_xflags & XFS_XFLAG_REALTIME) {
+ if (vap->va_xflags & XFS_XFLAG_REALTIME)
di_flags |= XFS_DIFLAG_REALTIME;
- ip->i_iocore.io_flags |= XFS_IOCORE_RT;
- } else {
- ip->i_iocore.io_flags &= ~XFS_IOCORE_RT;
- }
if (vap->va_xflags & XFS_XFLAG_EXTSIZE)
di_flags |= XFS_DIFLAG_EXTSIZE;
}
* If this is a synchronous mount, make sure that the
* transaction goes to disk before returning to the user.
* This is slightly sub-optimal in that truncates require
- * two sync transactions instead of one for wsync filesytems.
+ * two sync transactions instead of one for wsync filesystems.
* One for the truncate and one for the timestamps since we
* don't want to change the timestamps unless we're sure the
* truncate worked. Truncates are less than 1% of the laddis
if (mp->m_flags & XFS_MOUNT_WSYNC)
xfs_trans_set_sync(tp);
- code = xfs_trans_commit(tp, commit_flags, NULL);
- }
-
- /*
- * If the (regular) file's mandatory locking mode changed, then
- * notify the vnode. We do this under the inode lock to prevent
- * racing calls to vop_vnode_change.
- */
- mandlock_after = MANDLOCK(vp, ip->i_d.di_mode);
- if (mandlock_before != mandlock_after) {
- VOP_VNODE_CHANGE(vp, VCHANGE_FLAGS_ENF_LOCKING,
- mandlock_after);
+ code = xfs_trans_commit(tp, commit_flags);
}
xfs_iunlock(ip, lock_flags);
return code;
}
- if (DM_EVENT_ENABLED(vp->v_vfsp, ip, DM_EVENT_ATTRIBUTE) &&
+ if (DM_EVENT_ENABLED(ip, DM_EVENT_ATTRIBUTE) &&
!(flags & ATTR_DMI)) {
- (void) XFS_SEND_NAMESP(mp, DM_EVENT_ATTRIBUTE, vp, DM_RIGHT_NULL,
+ (void) XFS_SEND_NAMESP(mp, DM_EVENT_ATTRIBUTE, ip, DM_RIGHT_NULL,
NULL, DM_RIGHT_NULL, NULL, NULL,
0, 0, AT_DELAY_FLAG(flags));
}
return code;
}
-
/*
- * xfs_access
- * Null conversion from vnode mode bits to inode mode bits, as in efs.
+ * The maximum pathlen is 1024 bytes. Since the minimum file system
+ * blocksize is 512 bytes, we can get a max of 2 extents back from
+ * bmapi.
*/
+#define SYMLINK_MAPS 2
+
STATIC int
-xfs_access(
- bhv_desc_t *bdp,
- int mode,
- cred_t *credp)
+xfs_readlink_bmap(
+ xfs_inode_t *ip,
+ char *link)
{
- xfs_inode_t *ip;
- int error;
+ xfs_mount_t *mp = ip->i_mount;
+ int pathlen = ip->i_d.di_size;
+ int nmaps = SYMLINK_MAPS;
+ xfs_bmbt_irec_t mval[SYMLINK_MAPS];
+ xfs_daddr_t d;
+ int byte_cnt;
+ int n;
+ xfs_buf_t *bp;
+ int error = 0;
- vn_trace_entry(BHV_TO_VNODE(bdp), __FUNCTION__,
- (inst_t *)__return_address);
+ error = xfs_bmapi(NULL, ip, 0, XFS_B_TO_FSB(mp, pathlen), 0, NULL, 0,
+ mval, &nmaps, NULL, NULL);
+ if (error)
+ goto out;
- ip = XFS_BHVTOI(bdp);
- xfs_ilock(ip, XFS_ILOCK_SHARED);
- error = xfs_iaccess(ip, mode, credp);
- xfs_iunlock(ip, XFS_ILOCK_SHARED);
+ for (n = 0; n < nmaps; n++) {
+ d = XFS_FSB_TO_DADDR(mp, mval[n].br_startblock);
+ byte_cnt = XFS_FSB_TO_B(mp, mval[n].br_blockcount);
+
+ bp = xfs_buf_read(mp->m_ddev_targp, d, BTOBB(byte_cnt), 0);
+ error = XFS_BUF_GETERROR(bp);
+ if (error) {
+ xfs_ioerror_alert("xfs_readlink",
+ ip->i_mount, bp, XFS_BUF_ADDR(bp));
+ xfs_buf_relse(bp);
+ goto out;
+ }
+ if (pathlen < byte_cnt)
+ byte_cnt = pathlen;
+ pathlen -= byte_cnt;
+
+ memcpy(link, XFS_BUF_PTR(bp), byte_cnt);
+ xfs_buf_relse(bp);
+ }
+
+ link[ip->i_d.di_size] = '\0';
+ error = 0;
+
+ out:
return error;
}
-
-/*
- * xfs_readlink
- *
- */
-STATIC int
+int
xfs_readlink(
- bhv_desc_t *bdp,
- uio_t *uiop,
- int ioflags,
- cred_t *credp)
+ xfs_inode_t *ip,
+ char *link)
{
- xfs_inode_t *ip;
- int count;
- xfs_off_t offset;
+ xfs_mount_t *mp = ip->i_mount;
int pathlen;
- vnode_t *vp;
int error = 0;
- xfs_mount_t *mp;
- int nmaps;
- xfs_bmbt_irec_t mval[SYMLINK_MAPS];
- xfs_daddr_t d;
- int byte_cnt;
- int n;
- xfs_buf_t *bp;
-
- vp = BHV_TO_VNODE(bdp);
- vn_trace_entry(vp, __FUNCTION__, (inst_t *)__return_address);
- ip = XFS_BHVTOI(bdp);
- mp = ip->i_mount;
+ xfs_itrace_entry(ip);
if (XFS_FORCED_SHUTDOWN(mp))
return XFS_ERROR(EIO);
xfs_ilock(ip, XFS_ILOCK_SHARED);
ASSERT((ip->i_d.di_mode & S_IFMT) == S_IFLNK);
+ ASSERT(ip->i_d.di_size <= MAXPATHLEN);
- offset = uiop->uio_offset;
- count = uiop->uio_resid;
-
- if (offset < 0) {
- error = XFS_ERROR(EINVAL);
- goto error_return;
- }
- if (count <= 0) {
- error = 0;
- goto error_return;
- }
-
- /*
- * See if the symlink is stored inline.
- */
- pathlen = (int)ip->i_d.di_size;
+ pathlen = ip->i_d.di_size;
+ if (!pathlen)
+ goto out;
if (ip->i_df.if_flags & XFS_IFINLINE) {
- error = uio_read(ip->i_df.if_u1.if_data, pathlen, uiop);
- }
- else {
- /*
- * Symlink not inline. Call bmap to get it in.
- */
- nmaps = SYMLINK_MAPS;
-
- error = xfs_bmapi(NULL, ip, 0, XFS_B_TO_FSB(mp, pathlen),
- 0, NULL, 0, mval, &nmaps, NULL);
-
- if (error) {
- goto error_return;
- }
-
- for (n = 0; n < nmaps; n++) {
- d = XFS_FSB_TO_DADDR(mp, mval[n].br_startblock);
- byte_cnt = XFS_FSB_TO_B(mp, mval[n].br_blockcount);
- bp = xfs_buf_read(mp->m_ddev_targp, d,
- BTOBB(byte_cnt), 0);
- error = XFS_BUF_GETERROR(bp);
- if (error) {
- xfs_ioerror_alert("xfs_readlink",
- ip->i_mount, bp, XFS_BUF_ADDR(bp));
- xfs_buf_relse(bp);
- goto error_return;
- }
- if (pathlen < byte_cnt)
- byte_cnt = pathlen;
- pathlen -= byte_cnt;
-
- error = uio_read(XFS_BUF_PTR(bp), byte_cnt, uiop);
- xfs_buf_relse (bp);
- }
-
+ memcpy(link, ip->i_df.if_u1.if_data, pathlen);
+ link[pathlen] = '\0';
+ } else {
+ error = xfs_readlink_bmap(ip, link);
}
-
-error_return:
-
+ out:
xfs_iunlock(ip, XFS_ILOCK_SHARED);
-
return error;
}
-
/*
* xfs_fsync
*
- * This is called to sync the inode and its data out to disk.
- * We need to hold the I/O lock while flushing the data, and
- * the inode lock while flushing the inode. The inode lock CANNOT
- * be held while flushing the data, so acquire after we're done
- * with that.
+ * This is called to sync the inode and its data out to disk. We need to hold
+ * the I/O lock while flushing the data, and the inode lock while flushing the
+ * inode. The inode lock CANNOT be held while flushing the data, so acquire
+ * after we're done with that.
*/
-STATIC int
+int
xfs_fsync(
- bhv_desc_t *bdp,
- int flag,
- cred_t *credp,
- xfs_off_t start,
- xfs_off_t stop)
+ xfs_inode_t *ip)
{
- xfs_inode_t *ip;
xfs_trans_t *tp;
int error;
int log_flushed = 0, changed = 1;
- vn_trace_entry(BHV_TO_VNODE(bdp),
- __FUNCTION__, (inst_t *)__return_address);
-
- ip = XFS_BHVTOI(bdp);
-
- ASSERT(start >= 0 && stop >= -1);
+ xfs_itrace_entry(ip);
if (XFS_FORCED_SHUTDOWN(ip->i_mount))
return XFS_ERROR(EIO);
+ /* capture size updates in I/O completion before writing the inode. */
+ error = filemap_fdatawait(vn_to_inode(XFS_ITOV(ip))->i_mapping);
+ if (error)
+ return XFS_ERROR(error);
+
/*
- * We always need to make sure that the required inode state
- * is safe on disk. The vnode might be clean but because
- * of committed transactions that haven't hit the disk yet.
- * Likewise, there could be unflushed non-transactional
- * changes to the inode core that have to go to disk.
+ * We always need to make sure that the required inode state is safe on
+ * disk. The vnode might be clean but we still might need to force the
+ * log because of committed transactions that haven't hit the disk yet.
+ * Likewise, there could be unflushed non-transactional changes to the
+ * inode core that have to go to disk and this requires us to issue
+ * a synchronous transaction to capture these changes correctly.
*
- * The following code depends on one assumption: that
- * any transaction that changes an inode logs the core
- * because it has to change some field in the inode core
- * (typically nextents or nblocks). That assumption
- * implies that any transactions against an inode will
- * catch any non-transactional updates. If inode-altering
- * transactions exist that violate this assumption, the
- * code breaks. Right now, it figures that if the involved
- * update_* field is clear and the inode is unpinned, the
- * inode is clean. Either it's been flushed or it's been
- * committed and the commit has hit the disk unpinning the inode.
- * (Note that xfs_inode_item_format() called at commit clears
- * the update_* fields.)
+ * This code relies on the assumption that if the update_* fields
+ * of the inode are clear and the inode is unpinned then it is clean
+ * and no action is required.
*/
xfs_ilock(ip, XFS_ILOCK_SHARED);
- /* If we are flushing data then we care about update_size
- * being set, otherwise we care about update_core
- */
- if ((flag & FSYNC_DATA) ?
- (ip->i_update_size == 0) :
- (ip->i_update_core == 0)) {
+ if (!(ip->i_update_size || ip->i_update_core)) {
/*
- * Timestamps/size haven't changed since last inode
- * flush or inode transaction commit. That means
- * either nothing got written or a transaction
- * committed which caught the updates. If the
- * latter happened and the transaction hasn't
- * hit the disk yet, the inode will be still
- * be pinned. If it is, force the log.
+ * Timestamps/size haven't changed since last inode flush or
+ * inode transaction commit. That means either nothing got
+ * written or a transaction committed which caught the updates.
+ * If the latter happened and the transaction hasn't hit the
+ * disk yet, the inode will be still be pinned. If it is,
+ * force the log.
*/
xfs_iunlock(ip, XFS_ILOCK_SHARED);
if (xfs_ipincount(ip)) {
- _xfs_log_force(ip->i_mount, (xfs_lsn_t)0,
- XFS_LOG_FORCE |
- ((flag & FSYNC_WAIT)
- ? XFS_LOG_SYNC : 0),
+ error = _xfs_log_force(ip->i_mount, (xfs_lsn_t)0,
+ XFS_LOG_FORCE | XFS_LOG_SYNC,
&log_flushed);
} else {
/*
- * If the inode is not pinned and nothing
- * has changed we don't need to flush the
- * cache.
+ * If the inode is not pinned and nothing has changed
+ * we don't need to flush the cache.
*/
changed = 0;
}
- error = 0;
} else {
/*
- * Kick off a transaction to log the inode
- * core to get the updates. Make it
- * sync if FSYNC_WAIT is passed in (which
- * is done by everybody but specfs). The
- * sync transaction will also force the log.
+ * Kick off a transaction to log the inode core to get the
+ * updates. The sync transaction will also force the log.
*/
xfs_iunlock(ip, XFS_ILOCK_SHARED);
tp = xfs_trans_alloc(ip->i_mount, XFS_TRANS_FSYNC_TS);
- if ((error = xfs_trans_reserve(tp, 0,
- XFS_FSYNC_TS_LOG_RES(ip->i_mount),
- 0, 0, 0))) {
+ error = xfs_trans_reserve(tp, 0,
+ XFS_FSYNC_TS_LOG_RES(ip->i_mount), 0, 0, 0);
+ if (error) {
xfs_trans_cancel(tp, 0);
return error;
}
xfs_ilock(ip, XFS_ILOCK_EXCL);
/*
- * Note - it's possible that we might have pushed
- * ourselves out of the way during trans_reserve
- * which would flush the inode. But there's no
- * guarantee that the inode buffer has actually
- * gone out yet (it's delwri). Plus the buffer
- * could be pinned anyway if it's part of an
- * inode in another recent transaction. So we
- * play it safe and fire off the transaction anyway.
+ * Note - it's possible that we might have pushed ourselves out
+ * of the way during trans_reserve which would flush the inode.
+ * But there's no guarantee that the inode buffer has actually
+ * gone out yet (it's delwri). Plus the buffer could be pinned
+ * anyway if it's part of an inode in another recent
+ * transaction. So we play it safe and fire off the
+ * transaction anyway.
*/
xfs_trans_ijoin(tp, ip, XFS_ILOCK_EXCL);
xfs_trans_ihold(tp, ip);
xfs_trans_log_inode(tp, ip, XFS_ILOG_CORE);
- if (flag & FSYNC_WAIT)
- xfs_trans_set_sync(tp);
- error = _xfs_trans_commit(tp, 0, NULL, &log_flushed);
+ xfs_trans_set_sync(tp);
+ error = _xfs_trans_commit(tp, 0, &log_flushed);
xfs_iunlock(ip, XFS_ILOCK_EXCL);
}
/*
* If this inode is on the RT dev we need to flush that
- * cache aswell.
+ * cache as well.
*/
- if (ip->i_d.di_flags & XFS_DIFLAG_REALTIME)
+ if (XFS_IS_REALTIME_INODE(ip))
xfs_blkdev_issue_flush(ip->i_mount->m_rtdev_targp);
}
}
/*
- * This is called by xfs_inactive to free any blocks beyond eof,
- * when the link count isn't zero.
+ * This is called by xfs_inactive to free any blocks beyond eof
+ * when the link count isn't zero and by xfs_dm_punch_hole() when
+ * punching a hole to EOF.
*/
-STATIC int
-xfs_inactive_free_eofblocks(
+int
+xfs_free_eofblocks(
xfs_mount_t *mp,
- xfs_inode_t *ip)
+ xfs_inode_t *ip,
+ int flags)
{
xfs_trans_t *tp;
int error;
xfs_filblks_t map_len;
int nimaps;
xfs_bmbt_irec_t imap;
+ int use_iolock = (flags & XFS_FREE_EOF_LOCK);
/*
* Figure out if there are any blocks beyond the end
* of the file. If not, then there is nothing to do.
*/
- end_fsb = XFS_B_TO_FSB(mp, ((xfs_ufsize_t)ip->i_d.di_size));
+ end_fsb = XFS_B_TO_FSB(mp, ((xfs_ufsize_t)ip->i_size));
last_fsb = XFS_B_TO_FSB(mp, (xfs_ufsize_t)XFS_MAXIOFFSET(mp));
map_len = last_fsb - end_fsb;
if (map_len <= 0)
- return (0);
+ return 0;
nimaps = 1;
xfs_ilock(ip, XFS_ILOCK_SHARED);
error = xfs_bmapi(NULL, ip, end_fsb, map_len, 0,
- NULL, 0, &imap, &nimaps, NULL);
+ NULL, 0, &imap, &nimaps, NULL, NULL);
xfs_iunlock(ip, XFS_ILOCK_SHARED);
if (!error && (nimaps != 0) &&
* Attach the dquots to the inode up front.
*/
if ((error = XFS_QM_DQATTACH(mp, ip, 0)))
- return (error);
+ return error;
/*
* There are blocks after the end of file.
* cache and we can't
* do that within a transaction.
*/
- xfs_ilock(ip, XFS_IOLOCK_EXCL);
- xfs_itruncate_start(ip, XFS_ITRUNC_DEFINITE,
- ip->i_d.di_size);
+ if (use_iolock)
+ xfs_ilock(ip, XFS_IOLOCK_EXCL);
+ error = xfs_itruncate_start(ip, XFS_ITRUNC_DEFINITE,
+ ip->i_size);
+ if (error) {
+ xfs_trans_cancel(tp, 0);
+ if (use_iolock)
+ xfs_iunlock(ip, XFS_IOLOCK_EXCL);
+ return error;
+ }
error = xfs_trans_reserve(tp, 0,
XFS_ITRUNCATE_LOG_RES(mp),
ASSERT(XFS_FORCED_SHUTDOWN(mp));
xfs_trans_cancel(tp, 0);
xfs_iunlock(ip, XFS_IOLOCK_EXCL);
- return (error);
+ return error;
}
xfs_ilock(ip, XFS_ILOCK_EXCL);
xfs_trans_ihold(tp, ip);
error = xfs_itruncate_finish(&tp, ip,
- ip->i_d.di_size,
+ ip->i_size,
XFS_DATA_FORK,
0);
/*
XFS_TRANS_ABORT));
} else {
error = xfs_trans_commit(tp,
- XFS_TRANS_RELEASE_LOG_RES,
- NULL);
+ XFS_TRANS_RELEASE_LOG_RES);
}
- xfs_iunlock(ip, XFS_IOLOCK_EXCL | XFS_ILOCK_EXCL);
+ xfs_iunlock(ip, (use_iolock ? (XFS_IOLOCK_EXCL|XFS_ILOCK_EXCL)
+ : XFS_ILOCK_EXCL));
}
- return (error);
+ return error;
}
/*
*/
done = 0;
XFS_BMAP_INIT(&free_list, &first_block);
- nmaps = sizeof(mval) / sizeof(mval[0]);
+ nmaps = ARRAY_SIZE(mval);
if ((error = xfs_bmapi(tp, ip, 0, XFS_B_TO_FSB(mp, size),
XFS_BMAPI_METADATA, &first_block, 0, mval, &nmaps,
- &free_list)))
+ &free_list, NULL)))
goto error0;
/*
* Invalidate the block(s).
* Unmap the dead block(s) to the free_list.
*/
if ((error = xfs_bunmapi(tp, ip, 0, size, XFS_BMAPI_METADATA, nmaps,
- &first_block, &free_list, &done)))
+ &first_block, &free_list, NULL, &done)))
goto error1;
ASSERT(done);
/*
* Commit the first transaction. This logs the EFI and the inode.
*/
- if ((error = xfs_bmap_finish(&tp, &free_list, first_block, &committed)))
+ if ((error = xfs_bmap_finish(&tp, &free_list, &committed)))
goto error1;
/*
* The transaction must have been committed, since there were
*/
ntp = xfs_trans_dup(tp);
/*
- * Commit the transaction containing extent freeing and EFD's.
+ * Commit the transaction containing extent freeing and EFDs.
* If we get an error on the commit here or on the reserve below,
* we need to unlock the inode since the new transaction doesn't
* have the inode attached.
*/
- error = xfs_trans_commit(tp, 0, NULL);
+ error = xfs_trans_commit(tp, 0);
tp = ntp;
if (error) {
ASSERT(XFS_FORCED_SHUTDOWN(mp));
if (error) {
xfs_trans_cancel(*tpp, 0);
*tpp = NULL;
- return (error);
+ return error;
}
xfs_ilock(ip, XFS_ILOCK_EXCL | XFS_IOLOCK_EXCL);
XFS_DATA_FORK);
ASSERT(ip->i_df.if_bytes == 0);
}
- return (0);
+ return 0;
}
-/*
- *
- */
STATIC int
xfs_inactive_attrs(
xfs_inode_t *ip,
int error;
xfs_mount_t *mp;
- ASSERT(ismrlocked(&ip->i_iolock, MR_UPDATE));
+ ASSERT(xfs_isilocked(ip, XFS_IOLOCK_EXCL));
tp = *tpp;
mp = ip->i_mount;
ASSERT(ip->i_d.di_forkoff != 0);
- xfs_trans_commit(tp, XFS_TRANS_RELEASE_LOG_RES, NULL);
+ error = xfs_trans_commit(tp, XFS_TRANS_RELEASE_LOG_RES);
xfs_iunlock(ip, XFS_ILOCK_EXCL);
+ if (error)
+ goto error_unlock;
error = xfs_attr_inactive(ip);
- if (error) {
- *tpp = NULL;
- xfs_iunlock(ip, XFS_IOLOCK_EXCL);
- return (error); /* goto out*/
- }
+ if (error)
+ goto error_unlock;
tp = xfs_trans_alloc(mp, XFS_TRANS_INACTIVE);
error = xfs_trans_reserve(tp, 0,
XFS_IFREE_LOG_RES(mp),
0, XFS_TRANS_PERM_LOG_RES,
XFS_INACTIVE_LOG_COUNT);
- if (error) {
- ASSERT(XFS_FORCED_SHUTDOWN(mp));
- xfs_trans_cancel(tp, 0);
- *tpp = NULL;
- xfs_iunlock(ip, XFS_IOLOCK_EXCL);
- return (error);
- }
+ if (error)
+ goto error_cancel;
xfs_ilock(ip, XFS_ILOCK_EXCL);
xfs_trans_ijoin(tp, ip, XFS_IOLOCK_EXCL | XFS_ILOCK_EXCL);
ASSERT(ip->i_d.di_anextents == 0);
*tpp = tp;
- return (0);
+ return 0;
+
+error_cancel:
+ ASSERT(XFS_FORCED_SHUTDOWN(mp));
+ xfs_trans_cancel(tp, 0);
+error_unlock:
+ *tpp = NULL;
+ xfs_iunlock(ip, XFS_IOLOCK_EXCL);
+ return error;
}
-STATIC int
+int
xfs_release(
- bhv_desc_t *bdp)
+ xfs_inode_t *ip)
{
- xfs_inode_t *ip;
- vnode_t *vp;
- xfs_mount_t *mp;
+ bhv_vnode_t *vp = XFS_ITOV(ip);
+ xfs_mount_t *mp = ip->i_mount;
int error;
- vp = BHV_TO_VNODE(bdp);
- ip = XFS_BHVTOI(bdp);
-
- if (!VN_ISREG(vp) || (ip->i_d.di_mode == 0)) {
+ if (!S_ISREG(ip->i_d.di_mode) || (ip->i_d.di_mode == 0))
return 0;
- }
/* If this is a read-only mount, don't do this (would generate I/O) */
- if (vp->v_vfsp->vfs_flag & VFS_RDONLY)
+ if (mp->m_flags & XFS_MOUNT_RDONLY)
return 0;
-#ifdef HAVE_REFCACHE
- /* If we are in the NFS reference cache then don't do this now */
- if (ip->i_refcache)
- return 0;
-#endif
+ if (!XFS_FORCED_SHUTDOWN(mp)) {
+ int truncated;
- mp = ip->i_mount;
+ /*
+ * If we are using filestreams, and we have an unlinked
+ * file that we are processing the last close on, then nothing
+ * will be able to reopen and write to this file. Purge this
+ * inode from the filestreams cache so that it doesn't delay
+ * teardown of the inode.
+ */
+ if ((ip->i_d.di_nlink == 0) && xfs_inode_is_filestream(ip))
+ xfs_filestream_deassociate(ip);
+
+ /*
+ * If we previously truncated this file and removed old data
+ * in the process, we want to initiate "early" writeout on
+ * the last close. This is an attempt to combat the notorious
+ * NULL files problem which is particularly noticable from a
+ * truncate down, buffered (re-)write (delalloc), followed by
+ * a crash. What we are effectively doing here is
+ * significantly reducing the time window where we'd otherwise
+ * be exposed to that problem.
+ */
+ truncated = xfs_iflags_test_and_clear(ip, XFS_ITRUNCATED);
+ if (truncated && VN_DIRTY(vp) && ip->i_delayed_blks > 0)
+ xfs_flush_pages(ip, 0, -1, XFS_B_ASYNC, FI_NONE);
+ }
if (ip->i_d.di_nlink != 0) {
if ((((ip->i_d.di_mode & S_IFMT) == S_IFREG) &&
- ((ip->i_d.di_size > 0) || (VN_CACHED(vp) > 0 ||
+ ((ip->i_size > 0) || (VN_CACHED(vp) > 0 ||
ip->i_delayed_blks > 0)) &&
(ip->i_df.if_flags & XFS_IFEXTENTS)) &&
(!(ip->i_d.di_flags &
(XFS_DIFLAG_PREALLOC | XFS_DIFLAG_APPEND)))) {
- if ((error = xfs_inactive_free_eofblocks(mp, ip)))
- return (error);
- /* Update linux inode block count after free above */
- LINVFS_GET_IP(vp)->i_blocks = XFS_FSB_TO_BB(mp,
- ip->i_d.di_nblocks + ip->i_delayed_blks);
+ error = xfs_free_eofblocks(mp, ip, XFS_FREE_EOF_LOCK);
+ if (error)
+ return error;
}
}
* now be truncated. Also, we clear all of the read-ahead state
* kept for the inode here since the file is now closed.
*/
-STATIC int
+int
xfs_inactive(
- bhv_desc_t *bdp,
- cred_t *credp)
+ xfs_inode_t *ip)
{
- xfs_inode_t *ip;
- vnode_t *vp;
- xfs_bmap_free_t free_list;
+ bhv_vnode_t *vp = XFS_ITOV(ip);
+ xfs_bmap_free_t free_list;
xfs_fsblock_t first_block;
int committed;
xfs_trans_t *tp;
int error;
int truncate;
- vp = BHV_TO_VNODE(bdp);
- vn_trace_entry(vp, __FUNCTION__, (inst_t *)__return_address);
-
- ip = XFS_BHVTOI(bdp);
+ xfs_itrace_entry(ip);
/*
* If the inode is already free, then there can be nothing
* only one with a reference to the inode.
*/
truncate = ((ip->i_d.di_nlink == 0) &&
- ((ip->i_d.di_size != 0) || (ip->i_d.di_nextents > 0) ||
- (ip->i_delayed_blks > 0)) &&
+ ((ip->i_d.di_size != 0) || (ip->i_size != 0) ||
+ (ip->i_d.di_nextents > 0) || (ip->i_delayed_blks > 0)) &&
((ip->i_d.di_mode & S_IFMT) == S_IFREG));
mp = ip->i_mount;
- if (ip->i_d.di_nlink == 0 &&
- DM_EVENT_ENABLED(vp->v_vfsp, ip, DM_EVENT_DESTROY)) {
- (void) XFS_SEND_DESTROY(mp, vp, DM_RIGHT_NULL);
- }
+ if (ip->i_d.di_nlink == 0 && DM_EVENT_ENABLED(ip, DM_EVENT_DESTROY))
+ XFS_SEND_DESTROY(mp, ip, DM_RIGHT_NULL);
error = 0;
/* If this is a read-only mount, don't do this (would generate I/O) */
- if (vp->v_vfsp->vfs_flag & VFS_RDONLY)
+ if (mp->m_flags & XFS_MOUNT_RDONLY)
goto out;
if (ip->i_d.di_nlink != 0) {
if ((((ip->i_d.di_mode & S_IFMT) == S_IFREG) &&
- ((ip->i_d.di_size > 0) || (VN_CACHED(vp) > 0 ||
+ ((ip->i_size > 0) || (VN_CACHED(vp) > 0 ||
ip->i_delayed_blks > 0)) &&
(ip->i_df.if_flags & XFS_IFEXTENTS) &&
(!(ip->i_d.di_flags &
(XFS_DIFLAG_PREALLOC | XFS_DIFLAG_APPEND)) ||
(ip->i_delayed_blks != 0)))) {
- if ((error = xfs_inactive_free_eofblocks(mp, ip)))
- return (VN_INACTIVE_CACHE);
- /* Update linux inode block count after free above */
- LINVFS_GET_IP(vp)->i_blocks = XFS_FSB_TO_BB(mp,
- ip->i_d.di_nblocks + ip->i_delayed_blks);
+ error = xfs_free_eofblocks(mp, ip, XFS_FREE_EOF_LOCK);
+ if (error)
+ return VN_INACTIVE_CACHE;
}
goto out;
}
ASSERT(ip->i_d.di_nlink == 0);
if ((error = XFS_QM_DQATTACH(mp, ip, 0)))
- return (VN_INACTIVE_CACHE);
+ return VN_INACTIVE_CACHE;
tp = xfs_trans_alloc(mp, XFS_TRANS_INACTIVE);
if (truncate) {
*/
xfs_ilock(ip, XFS_IOLOCK_EXCL);
- xfs_itruncate_start(ip, XFS_ITRUNC_DEFINITE, 0);
+ error = xfs_itruncate_start(ip, XFS_ITRUNC_DEFINITE, 0);
+ if (error) {
+ xfs_trans_cancel(tp, 0);
+ xfs_iunlock(ip, XFS_IOLOCK_EXCL);
+ return VN_INACTIVE_CACHE;
+ }
error = xfs_trans_reserve(tp, 0,
XFS_ITRUNCATE_LOG_RES(mp),
ASSERT(XFS_FORCED_SHUTDOWN(mp));
xfs_trans_cancel(tp, 0);
xfs_iunlock(ip, XFS_IOLOCK_EXCL);
- return (VN_INACTIVE_CACHE);
+ return VN_INACTIVE_CACHE;
}
xfs_ilock(ip, XFS_ILOCK_EXCL);
xfs_trans_cancel(tp,
XFS_TRANS_RELEASE_LOG_RES | XFS_TRANS_ABORT);
xfs_iunlock(ip, XFS_IOLOCK_EXCL | XFS_ILOCK_EXCL);
- return (VN_INACTIVE_CACHE);
+ return VN_INACTIVE_CACHE;
}
} else if ((ip->i_d.di_mode & S_IFMT) == S_IFLNK) {
if (error) {
ASSERT(tp == NULL);
- return (VN_INACTIVE_CACHE);
+ return VN_INACTIVE_CACHE;
}
xfs_trans_ijoin(tp, ip, XFS_IOLOCK_EXCL | XFS_ILOCK_EXCL);
if (error) {
ASSERT(XFS_FORCED_SHUTDOWN(mp));
xfs_trans_cancel(tp, 0);
- return (VN_INACTIVE_CACHE);
+ return VN_INACTIVE_CACHE;
}
xfs_ilock(ip, XFS_ILOCK_EXCL | XFS_IOLOCK_EXCL);
* cancelled, and the inode is unlocked. Just get out.
*/
if (error)
- return (VN_INACTIVE_CACHE);
+ return VN_INACTIVE_CACHE;
} else if (ip->i_afp) {
xfs_idestroy_fork(ip, XFS_ATTR_FORK);
}
cmn_err(CE_NOTE,
"xfs_inactive: xfs_ifree() returned an error = %d on %s",
error, mp->m_fsname);
- xfs_force_shutdown(mp, XFS_METADATA_IO_ERROR);
+ xfs_force_shutdown(mp, SHUTDOWN_META_IO_ERROR);
}
xfs_trans_cancel(tp, XFS_TRANS_RELEASE_LOG_RES|XFS_TRANS_ABORT);
} else {
XFS_TRANS_MOD_DQUOT_BYINO(mp, tp, ip, XFS_TRANS_DQ_ICOUNT, -1);
/*
- * Just ignore errors at this point. There is
- * nothing we can do except to try to keep going.
+ * Just ignore errors at this point. There is nothing we can
+ * do except to try to keep going. Make sure it's not a silent
+ * error.
*/
- (void) xfs_bmap_finish(&tp, &free_list, first_block,
- &committed);
- (void) xfs_trans_commit(tp, XFS_TRANS_RELEASE_LOG_RES, NULL);
+ error = xfs_bmap_finish(&tp, &free_list, &committed);
+ if (error)
+ xfs_fs_cmn_err(CE_NOTE, mp, "xfs_inactive: "
+ "xfs_bmap_finish() returned error %d", error);
+ error = xfs_trans_commit(tp, XFS_TRANS_RELEASE_LOG_RES);
+ if (error)
+ xfs_fs_cmn_err(CE_NOTE, mp, "xfs_inactive: "
+ "xfs_trans_commit() returned error %d", error);
}
/*
* Release the dquots held by inode, if any.
}
-/*
- * xfs_lookup
- */
-STATIC int
+int
xfs_lookup(
- bhv_desc_t *dir_bdp,
- vname_t *dentry,
- vnode_t **vpp,
- int flags,
- vnode_t *rdir,
- cred_t *credp)
+ xfs_inode_t *dp,
+ struct xfs_name *name,
+ xfs_inode_t **ipp)
{
- xfs_inode_t *dp, *ip;
- xfs_ino_t e_inum;
+ xfs_ino_t inum;
int error;
uint lock_mode;
- vnode_t *dir_vp;
-
- dir_vp = BHV_TO_VNODE(dir_bdp);
- vn_trace_entry(dir_vp, __FUNCTION__, (inst_t *)__return_address);
- dp = XFS_BHVTOI(dir_bdp);
+ xfs_itrace_entry(dp);
if (XFS_FORCED_SHUTDOWN(dp->i_mount))
return XFS_ERROR(EIO);
lock_mode = xfs_ilock_map_shared(dp);
- error = xfs_dir_lookup_int(dir_bdp, lock_mode, dentry, &e_inum, &ip);
- if (!error) {
- *vpp = XFS_ITOV(ip);
- ITRACE(ip);
- }
+ error = xfs_dir_lookup(NULL, dp, name, &inum);
xfs_iunlock_map_shared(dp, lock_mode);
+
+ if (error)
+ goto out;
+
+ error = xfs_iget(dp->i_mount, NULL, inum, 0, 0, ipp, 0);
+ if (error)
+ goto out;
+
+ xfs_itrace_ref(*ipp);
+ return 0;
+
+ out:
+ *ipp = NULL;
return error;
}
-
-/*
- * xfs_create (create a new file).
- */
-STATIC int
+int
xfs_create(
- bhv_desc_t *dir_bdp,
- vname_t *dentry,
- vattr_t *vap,
- vnode_t **vpp,
+ xfs_inode_t *dp,
+ struct xfs_name *name,
+ mode_t mode,
+ xfs_dev_t rdev,
+ xfs_inode_t **ipp,
cred_t *credp)
{
- char *name = VNAME(dentry);
- vnode_t *dir_vp;
- xfs_inode_t *dp, *ip;
- vnode_t *vp=NULL;
+ xfs_mount_t *mp = dp->i_mount;
+ xfs_inode_t *ip;
xfs_trans_t *tp;
- xfs_mount_t *mp;
- xfs_dev_t rdev;
- int error;
+ int error;
xfs_bmap_free_t free_list;
xfs_fsblock_t first_block;
- boolean_t dp_joined_to_trans;
+ boolean_t unlock_dp_on_error = B_FALSE;
int dm_event_sent = 0;
uint cancel_flags;
int committed;
xfs_prid_t prid;
struct xfs_dquot *udqp, *gdqp;
uint resblks;
- int dm_di_mode;
- int namelen;
-
- ASSERT(!*vpp);
- dir_vp = BHV_TO_VNODE(dir_bdp);
- vn_trace_entry(dir_vp, __FUNCTION__, (inst_t *)__return_address);
- dp = XFS_BHVTOI(dir_bdp);
- mp = dp->i_mount;
-
- dm_di_mode = vap->va_mode;
- namelen = VNAMELEN(dentry);
+ ASSERT(!*ipp);
+ xfs_itrace_entry(dp);
- if (DM_EVENT_ENABLED(dir_vp->v_vfsp, dp, DM_EVENT_CREATE)) {
+ if (DM_EVENT_ENABLED(dp, DM_EVENT_CREATE)) {
error = XFS_SEND_NAMESP(mp, DM_EVENT_CREATE,
- dir_vp, DM_RIGHT_NULL, NULL,
- DM_RIGHT_NULL, name, NULL,
- dm_di_mode, 0, 0);
+ dp, DM_RIGHT_NULL, NULL,
+ DM_RIGHT_NULL, name->name, NULL,
+ mode, 0, 0);
if (error)
return error;
udqp = gdqp = NULL;
if (dp->i_d.di_flags & XFS_DIFLAG_PROJINHERIT)
prid = dp->i_d.di_projid;
- else if (vap->va_mask & XFS_AT_PROJID)
- prid = (xfs_prid_t)vap->va_projid;
else
prid = (xfs_prid_t)dfltprid;
goto std_return;
ip = NULL;
- dp_joined_to_trans = B_FALSE;
tp = xfs_trans_alloc(mp, XFS_TRANS_CREATE);
cancel_flags = XFS_TRANS_RELEASE_LOG_RES;
- resblks = XFS_CREATE_SPACE_RES(mp, namelen);
+ resblks = XFS_CREATE_SPACE_RES(mp, name->len);
/*
* Initially assume that the file does not exist and
* reserve the resources for that case. If that is not
}
if (error) {
cancel_flags = 0;
- dp = NULL;
goto error_return;
}
- xfs_ilock(dp, XFS_ILOCK_EXCL);
+ xfs_ilock(dp, XFS_ILOCK_EXCL | XFS_ILOCK_PARENT);
+ unlock_dp_on_error = B_TRUE;
XFS_BMAP_INIT(&free_list, &first_block);
if (error)
goto error_return;
- if (resblks == 0 &&
- (error = XFS_DIR_CANENTER(mp, tp, dp, name, namelen)))
+ error = xfs_dir_canenter(tp, dp, name, resblks);
+ if (error)
goto error_return;
- rdev = (vap->va_mask & XFS_AT_RDEV) ? vap->va_rdev : 0;
- error = xfs_dir_ialloc(&tp, dp, vap->va_mode, 1,
+ error = xfs_dir_ialloc(&tp, dp, mode, 1,
rdev, credp, prid, resblks > 0,
&ip, &committed);
if (error) {
goto error_return;
goto abort_return;
}
- ITRACE(ip);
+ xfs_itrace_ref(ip);
/*
* At this point, we've gotten a newly allocated inode.
* It is locked (and joined to the transaction).
*/
- ASSERT(ismrlocked (&ip->i_lock, MR_UPDATE));
+ ASSERT(xfs_isilocked(ip, XFS_ILOCK_EXCL));
/*
- * Now we join the directory inode to the transaction.
- * We do not do it earlier because xfs_dir_ialloc
- * might commit the previous transaction (and release
- * all the locks).
+ * Now we join the directory inode to the transaction. We do not do it
+ * earlier because xfs_dir_ialloc might commit the previous transaction
+ * (and release all the locks). An error from here on will result in
+ * the transaction cancel unlocking dp so don't do it explicitly in the
+ * error path.
*/
-
- VN_HOLD(dir_vp);
+ IHOLD(dp);
xfs_trans_ijoin(tp, dp, XFS_ILOCK_EXCL);
- dp_joined_to_trans = B_TRUE;
+ unlock_dp_on_error = B_FALSE;
- error = XFS_DIR_CREATENAME(mp, tp, dp, name, namelen, ip->i_ino,
- &first_block, &free_list,
- resblks ? resblks - XFS_IALLOC_SPACE_RES(mp) : 0);
+ error = xfs_dir_createname(tp, dp, name, ip->i_ino,
+ &first_block, &free_list, resblks ?
+ resblks - XFS_IALLOC_SPACE_RES(mp) : 0);
if (error) {
ASSERT(error != ENOSPC);
goto abort_return;
* vnode to the caller, we bump the vnode ref count now.
*/
IHOLD(ip);
- vp = XFS_ITOV(ip);
- error = xfs_bmap_finish(&tp, &free_list, first_block, &committed);
+ error = xfs_bmap_finish(&tp, &free_list, &committed);
if (error) {
xfs_bmap_cancel(&free_list);
goto abort_rele;
}
- error = xfs_trans_commit(tp, XFS_TRANS_RELEASE_LOG_RES, NULL);
+ error = xfs_trans_commit(tp, XFS_TRANS_RELEASE_LOG_RES);
if (error) {
IRELE(ip);
tp = NULL;
XFS_QM_DQRELE(mp, udqp);
XFS_QM_DQRELE(mp, gdqp);
- /*
- * Propogate the fact that the vnode changed after the
- * xfs_inode locks have been released.
- */
- VOP_VNODE_CHANGE(vp, VCHANGE_FLAGS_TRUNCATED, 3);
-
- *vpp = vp;
+ *ipp = ip;
/* Fallthrough to std_return with error = 0 */
std_return:
- if ( (*vpp || (error != 0 && dm_event_sent != 0)) &&
- DM_EVENT_ENABLED(dir_vp->v_vfsp, XFS_BHVTOI(dir_bdp),
- DM_EVENT_POSTCREATE)) {
+ if ((*ipp || (error != 0 && dm_event_sent != 0)) &&
+ DM_EVENT_ENABLED(dp, DM_EVENT_POSTCREATE)) {
(void) XFS_SEND_NAMESP(mp, DM_EVENT_POSTCREATE,
- dir_vp, DM_RIGHT_NULL,
- *vpp ? vp:NULL,
- DM_RIGHT_NULL, name, NULL,
- dm_di_mode, error, 0);
+ dp, DM_RIGHT_NULL,
+ *ipp ? ip : NULL,
+ DM_RIGHT_NULL, name->name, NULL,
+ mode, error, 0);
}
return error;
abort_return:
cancel_flags |= XFS_TRANS_ABORT;
/* FALLTHROUGH */
- error_return:
+ error_return:
if (tp != NULL)
xfs_trans_cancel(tp, cancel_flags);
- if (!dp_joined_to_trans && (dp != NULL))
- xfs_iunlock(dp, XFS_ILOCK_EXCL);
XFS_QM_DQRELE(mp, udqp);
XFS_QM_DQRELE(mp, gdqp);
+ if (unlock_dp_on_error)
+ xfs_iunlock(dp, XFS_ILOCK_EXCL);
+
goto std_return;
abort_rele:
STATIC int
xfs_lock_dir_and_entry(
xfs_inode_t *dp,
- vname_t *dentry,
xfs_inode_t *ip) /* inode of entry 'name' */
{
int attempts;
attempts = 0;
again:
- xfs_ilock(dp, XFS_ILOCK_EXCL);
+ xfs_ilock(dp, XFS_ILOCK_EXCL | XFS_ILOCK_PARENT);
e_inum = ip->i_ino;
- ITRACE(ip);
+ xfs_itrace_ref(ip);
/*
* We want to lock in increasing inum. Since we've already
ips[0] = ip;
ips[1] = dp;
- xfs_lock_inodes(ips, 2, 0, XFS_ILOCK_EXCL);
+ xfs_lock_inodes(ips, 2, XFS_ILOCK_EXCL);
}
/* else e_inum == dp->i_ino */
/* This can happen if we're asked to lock /x/..
#endif
/*
+ * Bump the subclass so xfs_lock_inodes() acquires each lock with
+ * a different value
+ */
+static inline int
+xfs_lock_inumorder(int lock_mode, int subclass)
+{
+ if (lock_mode & (XFS_IOLOCK_SHARED|XFS_IOLOCK_EXCL))
+ lock_mode |= (subclass + XFS_LOCK_INUMORDER) << XFS_IOLOCK_SHIFT;
+ if (lock_mode & (XFS_ILOCK_SHARED|XFS_ILOCK_EXCL))
+ lock_mode |= (subclass + XFS_LOCK_INUMORDER) << XFS_ILOCK_SHIFT;
+
+ return lock_mode;
+}
+
+/*
* The following routine will lock n inodes in exclusive mode.
* We assume the caller calls us with the inodes in i_ino order.
*
xfs_lock_inodes(
xfs_inode_t **ips,
int inodes,
- int first_locked,
uint lock_mode)
{
int attempts = 0, i, j, try_lock;
ASSERT(ips && (inodes >= 2)); /* we need at least two */
- if (first_locked) {
- try_lock = 1;
- i = 1;
- } else {
- try_lock = 0;
- i = 0;
- }
+ try_lock = 0;
+ i = 0;
again:
for (; i < inodes; i++) {
* that is in the AIL.
*/
ASSERT(i != 0);
- if (!xfs_ilock_nowait(ips[i], lock_mode)) {
+ if (!xfs_ilock_nowait(ips[i], xfs_lock_inumorder(lock_mode, i))) {
attempts++;
/*
goto again;
}
} else {
- xfs_ilock(ips[i], lock_mode);
+ xfs_ilock(ips[i], xfs_lock_inumorder(lock_mode, i));
}
}
#define REMOVE_DEBUG_TRACE(x)
#endif /* ! DEBUG */
-
-/*
- * xfs_remove
- *
- */
-STATIC int
+int
xfs_remove(
- bhv_desc_t *dir_bdp,
- vname_t *dentry,
- cred_t *credp)
+ xfs_inode_t *dp,
+ struct xfs_name *name,
+ xfs_inode_t *ip)
{
- vnode_t *dir_vp;
- char *name = VNAME(dentry);
- xfs_inode_t *dp, *ip;
+ xfs_mount_t *mp = dp->i_mount;
xfs_trans_t *tp = NULL;
- xfs_mount_t *mp;
int error = 0;
xfs_bmap_free_t free_list;
xfs_fsblock_t first_block;
int cancel_flags;
int committed;
- int dm_di_mode = 0;
int link_zero;
uint resblks;
- int namelen;
- dir_vp = BHV_TO_VNODE(dir_bdp);
- vn_trace_entry(dir_vp, __FUNCTION__, (inst_t *)__return_address);
-
- dp = XFS_BHVTOI(dir_bdp);
- mp = dp->i_mount;
+ xfs_itrace_entry(dp);
if (XFS_FORCED_SHUTDOWN(mp))
return XFS_ERROR(EIO);
- namelen = VNAMELEN(dentry);
-
- if (DM_EVENT_ENABLED(dir_vp->v_vfsp, dp, DM_EVENT_REMOVE)) {
- error = XFS_SEND_NAMESP(mp, DM_EVENT_REMOVE, dir_vp,
- DM_RIGHT_NULL, NULL, DM_RIGHT_NULL,
- name, NULL, 0, 0, 0);
+ if (DM_EVENT_ENABLED(dp, DM_EVENT_REMOVE)) {
+ error = XFS_SEND_NAMESP(mp, DM_EVENT_REMOVE, dp, DM_RIGHT_NULL,
+ NULL, DM_RIGHT_NULL, name->name, NULL,
+ ip->i_d.di_mode, 0, 0);
if (error)
return error;
}
- /* From this point on, return through std_return */
- ip = NULL;
-
- /*
- * We need to get a reference to ip before we get our log
- * reservation. The reason for this is that we cannot call
- * xfs_iget for an inode for which we do not have a reference
- * once we've acquired a log reservation. This is because the
- * inode we are trying to get might be in xfs_inactive going
- * for a log reservation. Since we'll have to wait for the
- * inactive code to complete before returning from xfs_iget,
- * we need to make sure that we don't have log space reserved
- * when we call xfs_iget. Instead we get an unlocked referece
- * to the inode before getting our log reservation.
- */
- error = xfs_get_dir_entry(dentry, &ip);
- if (error) {
- REMOVE_DEBUG_TRACE(__LINE__);
- goto std_return;
- }
-
- dm_di_mode = ip->i_d.di_mode;
-
- vn_trace_entry(XFS_ITOV(ip), __FUNCTION__, (inst_t *)__return_address);
-
- ITRACE(ip);
+ xfs_itrace_entry(ip);
+ xfs_itrace_ref(ip);
error = XFS_QM_DQATTACH(mp, dp, 0);
- if (!error && dp != ip)
+ if (!error)
error = XFS_QM_DQATTACH(mp, ip, 0);
if (error) {
REMOVE_DEBUG_TRACE(__LINE__);
- IRELE(ip);
goto std_return;
}
ASSERT(error != ENOSPC);
REMOVE_DEBUG_TRACE(__LINE__);
xfs_trans_cancel(tp, 0);
- IRELE(ip);
return error;
}
- error = xfs_lock_dir_and_entry(dp, dentry, ip);
+ error = xfs_lock_dir_and_entry(dp, ip);
if (error) {
REMOVE_DEBUG_TRACE(__LINE__);
xfs_trans_cancel(tp, cancel_flags);
- IRELE(ip);
goto std_return;
}
* At this point, we've gotten both the directory and the entry
* inodes locked.
*/
+ IHOLD(ip);
xfs_trans_ijoin(tp, dp, XFS_ILOCK_EXCL);
- if (dp != ip) {
- /*
- * Increment vnode ref count only in this case since
- * there's an extra vnode reference in the case where
- * dp == ip.
- */
- IHOLD(dp);
- xfs_trans_ijoin(tp, ip, XFS_ILOCK_EXCL);
- }
+
+ IHOLD(dp);
+ xfs_trans_ijoin(tp, ip, XFS_ILOCK_EXCL);
/*
* Entry must exist since we did a lookup in xfs_lock_dir_and_entry.
*/
XFS_BMAP_INIT(&free_list, &first_block);
- error = XFS_DIR_REMOVENAME(mp, tp, dp, name, namelen, ip->i_ino,
- &first_block, &free_list, 0);
+ error = xfs_dir_removename(tp, dp, name, ip->i_ino,
+ &first_block, &free_list, resblks);
if (error) {
ASSERT(error != ENOENT);
REMOVE_DEBUG_TRACE(__LINE__);
link_zero = (ip)->i_d.di_nlink==0;
/*
- * Take an extra ref on the inode so that it doesn't
- * go to xfs_inactive() from within the commit.
- */
- IHOLD(ip);
-
- /*
* If this is a synchronous mount, make sure that the
* remove transaction goes to disk before returning to
* the user.
xfs_trans_set_sync(tp);
}
- error = xfs_bmap_finish(&tp, &free_list, first_block, &committed);
+ error = xfs_bmap_finish(&tp, &free_list, &committed);
if (error) {
REMOVE_DEBUG_TRACE(__LINE__);
goto error_rele;
}
- error = xfs_trans_commit(tp, XFS_TRANS_RELEASE_LOG_RES, NULL);
- if (error) {
- IRELE(ip);
+ error = xfs_trans_commit(tp, XFS_TRANS_RELEASE_LOG_RES);
+ if (error)
goto std_return;
- }
-
- /*
- * Before we drop our extra reference to the inode, purge it
- * from the refcache if it is there. By waiting until afterwards
- * to do the IRELE, we ensure that we won't go inactive in the
- * xfs_refcache_purge_ip routine (although that would be OK).
- */
- xfs_refcache_purge_ip(ip);
-
- vn_trace_exit(XFS_ITOV(ip), __FUNCTION__, (inst_t *)__return_address);
/*
- * Let interposed file systems know about removed links.
+ * If we are using filestreams, kill the stream association.
+ * If the file is still open it may get a new one but that
+ * will get killed on last close in xfs_close() so we don't
+ * have to worry about that.
*/
- VOP_LINK_REMOVED(XFS_ITOV(ip), dir_vp, link_zero);
+ if (link_zero && xfs_inode_is_filestream(ip))
+ xfs_filestream_deassociate(ip);
- IRELE(ip);
+ xfs_itrace_exit(ip);
/* Fall through to std_return with error = 0 */
std_return:
- if (DM_EVENT_ENABLED(dir_vp->v_vfsp, dp,
- DM_EVENT_POSTREMOVE)) {
+ if (DM_EVENT_ENABLED(dp, DM_EVENT_POSTREMOVE)) {
(void) XFS_SEND_NAMESP(mp, DM_EVENT_POSTREMOVE,
- dir_vp, DM_RIGHT_NULL,
+ dp, DM_RIGHT_NULL,
NULL, DM_RIGHT_NULL,
- name, NULL, dm_di_mode, error, 0);
+ name->name, NULL, ip->i_d.di_mode, error, 0);
}
return error;
* transaction which can easily deadlock with the current one.
*/
xfs_bmap_cancel(&free_list);
- cancel_flags |= XFS_TRANS_ABORT;
- xfs_trans_cancel(tp, cancel_flags);
-
- /*
- * Before we drop our extra reference to the inode, purge it
- * from the refcache if it is there. By waiting until afterwards
- * to do the IRELE, we ensure that we won't go inactive in the
- * xfs_refcache_purge_ip routine (although that would be OK).
- */
- xfs_refcache_purge_ip(ip);
-
- IRELE(ip);
+ cancel_flags |= XFS_TRANS_ABORT;
+ xfs_trans_cancel(tp, cancel_flags);
goto std_return;
}
-
-/*
- * xfs_link
- *
- */
-STATIC int
+int
xfs_link(
- bhv_desc_t *target_dir_bdp,
- vnode_t *src_vp,
- vname_t *dentry,
- cred_t *credp)
+ xfs_inode_t *tdp,
+ xfs_inode_t *sip,
+ struct xfs_name *target_name)
{
- xfs_inode_t *tdp, *sip;
+ xfs_mount_t *mp = tdp->i_mount;
xfs_trans_t *tp;
- xfs_mount_t *mp;
xfs_inode_t *ips[2];
int error;
xfs_bmap_free_t free_list;
xfs_fsblock_t first_block;
int cancel_flags;
int committed;
- vnode_t *target_dir_vp;
int resblks;
- char *target_name = VNAME(dentry);
- int target_namelen;
- target_dir_vp = BHV_TO_VNODE(target_dir_bdp);
- vn_trace_entry(target_dir_vp, __FUNCTION__, (inst_t *)__return_address);
- vn_trace_entry(src_vp, __FUNCTION__, (inst_t *)__return_address);
+ xfs_itrace_entry(tdp);
+ xfs_itrace_entry(sip);
- target_namelen = VNAMELEN(dentry);
- if (VN_ISDIR(src_vp))
- return XFS_ERROR(EPERM);
+ ASSERT(!S_ISDIR(sip->i_d.di_mode));
- sip = xfs_vtoi(src_vp);
- tdp = XFS_BHVTOI(target_dir_bdp);
- mp = tdp->i_mount;
if (XFS_FORCED_SHUTDOWN(mp))
return XFS_ERROR(EIO);
- if (DM_EVENT_ENABLED(src_vp->v_vfsp, tdp, DM_EVENT_LINK)) {
+ if (DM_EVENT_ENABLED(tdp, DM_EVENT_LINK)) {
error = XFS_SEND_NAMESP(mp, DM_EVENT_LINK,
- target_dir_vp, DM_RIGHT_NULL,
- src_vp, DM_RIGHT_NULL,
- target_name, NULL, 0, 0, 0);
+ tdp, DM_RIGHT_NULL,
+ sip, DM_RIGHT_NULL,
+ target_name->name, NULL, 0, 0, 0);
if (error)
return error;
}
tp = xfs_trans_alloc(mp, XFS_TRANS_LINK);
cancel_flags = XFS_TRANS_RELEASE_LOG_RES;
- resblks = XFS_LINK_SPACE_RES(mp, target_namelen);
+ resblks = XFS_LINK_SPACE_RES(mp, target_name->len);
error = xfs_trans_reserve(tp, resblks, XFS_LINK_LOG_RES(mp), 0,
XFS_TRANS_PERM_LOG_RES, XFS_LINK_LOG_COUNT);
if (error == ENOSPC) {
ips[1] = sip;
}
- xfs_lock_inodes(ips, 2, 0, XFS_ILOCK_EXCL);
+ xfs_lock_inodes(ips, 2, XFS_ILOCK_EXCL);
/*
* Increment vnode ref counts since xfs_trans_commit &
* xfs_trans_cancel will both unlock the inodes and
* decrement the associated ref counts.
*/
- VN_HOLD(src_vp);
- VN_HOLD(target_dir_vp);
+ IHOLD(sip);
+ IHOLD(tdp);
xfs_trans_ijoin(tp, sip, XFS_ILOCK_EXCL);
xfs_trans_ijoin(tp, tdp, XFS_ILOCK_EXCL);
*/
if (unlikely((tdp->i_d.di_flags & XFS_DIFLAG_PROJINHERIT) &&
(tdp->i_d.di_projid != sip->i_d.di_projid))) {
- error = XFS_ERROR(EPERM);
+ error = XFS_ERROR(EXDEV);
goto error_return;
}
- if (resblks == 0 &&
- (error = XFS_DIR_CANENTER(mp, tp, tdp, target_name,
- target_namelen)))
+ error = xfs_dir_canenter(tp, tdp, target_name, resblks);
+ if (error)
goto error_return;
XFS_BMAP_INIT(&free_list, &first_block);
- error = XFS_DIR_CREATENAME(mp, tp, tdp, target_name, target_namelen,
- sip->i_ino, &first_block, &free_list,
- resblks);
+ error = xfs_dir_createname(tp, tdp, target_name, sip->i_ino,
+ &first_block, &free_list, resblks);
if (error)
goto abort_return;
xfs_ichgtime(tdp, XFS_ICHGTIME_MOD | XFS_ICHGTIME_CHG);
xfs_trans_log_inode(tp, tdp, XFS_ILOG_CORE);
error = xfs_bumplink(tp, sip);
- if (error) {
+ if (error)
goto abort_return;
- }
/*
* If this is a synchronous mount, make sure that the
xfs_trans_set_sync(tp);
}
- error = xfs_bmap_finish (&tp, &free_list, first_block, &committed);
+ error = xfs_bmap_finish (&tp, &free_list, &committed);
if (error) {
xfs_bmap_cancel(&free_list);
goto abort_return;
}
- error = xfs_trans_commit(tp, XFS_TRANS_RELEASE_LOG_RES, NULL);
- if (error) {
+ error = xfs_trans_commit(tp, XFS_TRANS_RELEASE_LOG_RES);
+ if (error)
goto std_return;
- }
/* Fall through to std_return with error = 0. */
std_return:
- if (DM_EVENT_ENABLED(src_vp->v_vfsp, sip,
- DM_EVENT_POSTLINK)) {
+ if (DM_EVENT_ENABLED(sip, DM_EVENT_POSTLINK)) {
(void) XFS_SEND_NAMESP(mp, DM_EVENT_POSTLINK,
- target_dir_vp, DM_RIGHT_NULL,
- src_vp, DM_RIGHT_NULL,
- target_name, NULL, 0, error, 0);
+ tdp, DM_RIGHT_NULL,
+ sip, DM_RIGHT_NULL,
+ target_name->name, NULL, 0, error, 0);
}
return error;
abort_return:
cancel_flags |= XFS_TRANS_ABORT;
/* FALLTHROUGH */
+
error_return:
xfs_trans_cancel(tp, cancel_flags);
-
goto std_return;
}
-/*
- * xfs_mkdir
- *
- */
-STATIC int
+
+
+int
xfs_mkdir(
- bhv_desc_t *dir_bdp,
- vname_t *dentry,
- vattr_t *vap,
- vnode_t **vpp,
+ xfs_inode_t *dp,
+ struct xfs_name *dir_name,
+ mode_t mode,
+ xfs_inode_t **ipp,
cred_t *credp)
{
- char *dir_name = VNAME(dentry);
- xfs_inode_t *dp;
+ xfs_mount_t *mp = dp->i_mount;
xfs_inode_t *cdp; /* inode of created dir */
- vnode_t *cvp; /* vnode of created dir */
xfs_trans_t *tp;
- xfs_mount_t *mp;
int cancel_flags;
int error;
int committed;
xfs_bmap_free_t free_list;
xfs_fsblock_t first_block;
- vnode_t *dir_vp;
- boolean_t dp_joined_to_trans;
+ boolean_t unlock_dp_on_error = B_FALSE;
boolean_t created = B_FALSE;
int dm_event_sent = 0;
xfs_prid_t prid;
struct xfs_dquot *udqp, *gdqp;
uint resblks;
- int dm_di_mode;
- int dir_namelen;
-
- dir_vp = BHV_TO_VNODE(dir_bdp);
- dp = XFS_BHVTOI(dir_bdp);
- mp = dp->i_mount;
if (XFS_FORCED_SHUTDOWN(mp))
return XFS_ERROR(EIO);
- dir_namelen = VNAMELEN(dentry);
-
tp = NULL;
- dp_joined_to_trans = B_FALSE;
- dm_di_mode = vap->va_mode;
- if (DM_EVENT_ENABLED(dir_vp->v_vfsp, dp, DM_EVENT_CREATE)) {
+ if (DM_EVENT_ENABLED(dp, DM_EVENT_CREATE)) {
error = XFS_SEND_NAMESP(mp, DM_EVENT_CREATE,
- dir_vp, DM_RIGHT_NULL, NULL,
- DM_RIGHT_NULL, dir_name, NULL,
- dm_di_mode, 0, 0);
+ dp, DM_RIGHT_NULL, NULL,
+ DM_RIGHT_NULL, dir_name->name, NULL,
+ mode, 0, 0);
if (error)
return error;
dm_event_sent = 1;
/* Return through std_return after this point. */
- vn_trace_entry(dir_vp, __FUNCTION__, (inst_t *)__return_address);
+ xfs_itrace_entry(dp);
mp = dp->i_mount;
udqp = gdqp = NULL;
if (dp->i_d.di_flags & XFS_DIFLAG_PROJINHERIT)
prid = dp->i_d.di_projid;
- else if (vap->va_mask & XFS_AT_PROJID)
- prid = (xfs_prid_t)vap->va_projid;
else
prid = (xfs_prid_t)dfltprid;
tp = xfs_trans_alloc(mp, XFS_TRANS_MKDIR);
cancel_flags = XFS_TRANS_RELEASE_LOG_RES;
- resblks = XFS_MKDIR_SPACE_RES(mp, dir_namelen);
+ resblks = XFS_MKDIR_SPACE_RES(mp, dir_name->len);
error = xfs_trans_reserve(tp, resblks, XFS_MKDIR_LOG_RES(mp), 0,
XFS_TRANS_PERM_LOG_RES, XFS_MKDIR_LOG_COUNT);
if (error == ENOSPC) {
}
if (error) {
cancel_flags = 0;
- dp = NULL;
goto error_return;
}
- xfs_ilock(dp, XFS_ILOCK_EXCL);
+ xfs_ilock(dp, XFS_ILOCK_EXCL | XFS_ILOCK_PARENT);
+ unlock_dp_on_error = B_TRUE;
/*
* Check for directory link count overflow.
if (error)
goto error_return;
- if (resblks == 0 &&
- (error = XFS_DIR_CANENTER(mp, tp, dp, dir_name, dir_namelen)))
+ error = xfs_dir_canenter(tp, dp, dir_name, resblks);
+ if (error)
goto error_return;
/*
* create the directory inode.
*/
- error = xfs_dir_ialloc(&tp, dp, vap->va_mode, 2,
+ error = xfs_dir_ialloc(&tp, dp, mode, 2,
0, credp, prid, resblks > 0,
&cdp, NULL);
if (error) {
goto error_return;
goto abort_return;
}
- ITRACE(cdp);
+ xfs_itrace_ref(cdp);
/*
* Now we add the directory inode to the transaction.
* We waited until now since xfs_dir_ialloc might start
* a new transaction. Had we joined the transaction
- * earlier, the locks might have gotten released.
+ * earlier, the locks might have gotten released. An error
+ * from here on will result in the transaction cancel
+ * unlocking dp so don't do it explicitly in the error path.
*/
- VN_HOLD(dir_vp);
+ IHOLD(dp);
xfs_trans_ijoin(tp, dp, XFS_ILOCK_EXCL);
- dp_joined_to_trans = B_TRUE;
+ unlock_dp_on_error = B_FALSE;
XFS_BMAP_INIT(&free_list, &first_block);
- error = XFS_DIR_CREATENAME(mp, tp, dp, dir_name, dir_namelen,
- cdp->i_ino, &first_block, &free_list,
- resblks ? resblks - XFS_IALLOC_SPACE_RES(mp) : 0);
+ error = xfs_dir_createname(tp, dp, dir_name, cdp->i_ino,
+ &first_block, &free_list, resblks ?
+ resblks - XFS_IALLOC_SPACE_RES(mp) : 0);
if (error) {
ASSERT(error != ENOSPC);
goto error1;
*/
dp->i_gen++;
- error = XFS_DIR_INIT(mp, tp, cdp, dp);
- if (error) {
+ error = xfs_dir_init(tp, cdp, dp);
+ if (error)
goto error2;
- }
cdp->i_gen = 1;
error = xfs_bumplink(tp, dp);
- if (error) {
+ if (error)
goto error2;
- }
-
- cvp = XFS_ITOV(cdp);
created = B_TRUE;
- *vpp = cvp;
+ *ipp = cdp;
IHOLD(cdp);
/*
xfs_trans_set_sync(tp);
}
- error = xfs_bmap_finish(&tp, &free_list, first_block, &committed);
+ error = xfs_bmap_finish(&tp, &free_list, &committed);
if (error) {
IRELE(cdp);
goto error2;
}
- error = xfs_trans_commit(tp, XFS_TRANS_RELEASE_LOG_RES, NULL);
+ error = xfs_trans_commit(tp, XFS_TRANS_RELEASE_LOG_RES);
XFS_QM_DQRELE(mp, udqp);
XFS_QM_DQRELE(mp, gdqp);
if (error) {
* xfs_trans_commit. */
std_return:
- if ( (created || (error != 0 && dm_event_sent != 0)) &&
- DM_EVENT_ENABLED(dir_vp->v_vfsp, XFS_BHVTOI(dir_bdp),
- DM_EVENT_POSTCREATE)) {
+ if ((created || (error != 0 && dm_event_sent != 0)) &&
+ DM_EVENT_ENABLED(dp, DM_EVENT_POSTCREATE)) {
(void) XFS_SEND_NAMESP(mp, DM_EVENT_POSTCREATE,
- dir_vp, DM_RIGHT_NULL,
- created ? XFS_ITOV(cdp):NULL,
+ dp, DM_RIGHT_NULL,
+ created ? cdp : NULL,
DM_RIGHT_NULL,
- dir_name, NULL,
- dm_di_mode, error, 0);
+ dir_name->name, NULL,
+ mode, error, 0);
}
return error;
XFS_QM_DQRELE(mp, udqp);
XFS_QM_DQRELE(mp, gdqp);
- if (!dp_joined_to_trans && (dp != NULL)) {
+ if (unlock_dp_on_error)
xfs_iunlock(dp, XFS_ILOCK_EXCL);
- }
goto std_return;
}
-
-/*
- * xfs_rmdir
- *
- */
-STATIC int
+int
xfs_rmdir(
- bhv_desc_t *dir_bdp,
- vname_t *dentry,
- cred_t *credp)
+ xfs_inode_t *dp,
+ struct xfs_name *name,
+ xfs_inode_t *cdp)
{
- char *name = VNAME(dentry);
- xfs_inode_t *dp;
- xfs_inode_t *cdp; /* child directory */
+ xfs_mount_t *mp = dp->i_mount;
xfs_trans_t *tp;
- xfs_mount_t *mp;
int error;
xfs_bmap_free_t free_list;
xfs_fsblock_t first_block;
int cancel_flags;
int committed;
- vnode_t *dir_vp;
- int dm_di_mode = 0;
int last_cdp_link;
- int namelen;
uint resblks;
- dir_vp = BHV_TO_VNODE(dir_bdp);
- dp = XFS_BHVTOI(dir_bdp);
- mp = dp->i_mount;
-
- vn_trace_entry(dir_vp, __FUNCTION__, (inst_t *)__return_address);
+ xfs_itrace_entry(dp);
- if (XFS_FORCED_SHUTDOWN(XFS_BHVTOI(dir_bdp)->i_mount))
+ if (XFS_FORCED_SHUTDOWN(mp))
return XFS_ERROR(EIO);
- namelen = VNAMELEN(dentry);
- if (DM_EVENT_ENABLED(dir_vp->v_vfsp, dp, DM_EVENT_REMOVE)) {
+ if (DM_EVENT_ENABLED(dp, DM_EVENT_REMOVE)) {
error = XFS_SEND_NAMESP(mp, DM_EVENT_REMOVE,
- dir_vp, DM_RIGHT_NULL,
- NULL, DM_RIGHT_NULL,
- name, NULL, 0, 0, 0);
+ dp, DM_RIGHT_NULL,
+ NULL, DM_RIGHT_NULL, name->name,
+ NULL, cdp->i_d.di_mode, 0, 0);
if (error)
return XFS_ERROR(error);
}
- /* Return through std_return after this point. */
-
- cdp = NULL;
-
- /*
- * We need to get a reference to cdp before we get our log
- * reservation. The reason for this is that we cannot call
- * xfs_iget for an inode for which we do not have a reference
- * once we've acquired a log reservation. This is because the
- * inode we are trying to get might be in xfs_inactive going
- * for a log reservation. Since we'll have to wait for the
- * inactive code to complete before returning from xfs_iget,
- * we need to make sure that we don't have log space reserved
- * when we call xfs_iget. Instead we get an unlocked referece
- * to the inode before getting our log reservation.
- */
- error = xfs_get_dir_entry(dentry, &cdp);
- if (error) {
- REMOVE_DEBUG_TRACE(__LINE__);
- goto std_return;
- }
- mp = dp->i_mount;
- dm_di_mode = cdp->i_d.di_mode;
-
/*
* Get the dquots for the inodes.
*/
error = XFS_QM_DQATTACH(mp, dp, 0);
- if (!error && dp != cdp)
+ if (!error)
error = XFS_QM_DQATTACH(mp, cdp, 0);
if (error) {
- IRELE(cdp);
REMOVE_DEBUG_TRACE(__LINE__);
goto std_return;
}
if (error) {
ASSERT(error != ENOSPC);
cancel_flags = 0;
- IRELE(cdp);
goto error_return;
}
XFS_BMAP_INIT(&free_list, &first_block);
* that the directory entry for the child directory inode has
* not changed while we were obtaining a log reservation.
*/
- error = xfs_lock_dir_and_entry(dp, dentry, cdp);
+ error = xfs_lock_dir_and_entry(dp, cdp);
if (error) {
xfs_trans_cancel(tp, cancel_flags);
- IRELE(cdp);
goto std_return;
}
+ IHOLD(dp);
xfs_trans_ijoin(tp, dp, XFS_ILOCK_EXCL);
- if (dp != cdp) {
- /*
- * Only increment the parent directory vnode count if
- * we didn't bump it in looking up cdp. The only time
- * we don't bump it is when we're looking up ".".
- */
- VN_HOLD(dir_vp);
- }
- ITRACE(cdp);
+ IHOLD(cdp);
xfs_trans_ijoin(tp, cdp, XFS_ILOCK_EXCL);
ASSERT(cdp->i_d.di_nlink >= 2);
error = XFS_ERROR(ENOTEMPTY);
goto error_return;
}
- if (!XFS_DIR_ISEMPTY(mp, cdp)) {
+ if (!xfs_dir_isempty(cdp)) {
error = XFS_ERROR(ENOTEMPTY);
goto error_return;
}
- error = XFS_DIR_REMOVENAME(mp, tp, dp, name, namelen, cdp->i_ino,
- &first_block, &free_list, resblks);
- if (error) {
+ error = xfs_dir_removename(tp, dp, name, cdp->i_ino,
+ &first_block, &free_list, resblks);
+ if (error)
goto error1;
- }
xfs_ichgtime(dp, XFS_ICHGTIME_MOD | XFS_ICHGTIME_CHG);
last_cdp_link = (cdp)->i_d.di_nlink==0;
/*
- * Take an extra ref on the child vnode so that it
- * does not go to xfs_inactive() from within the commit.
- */
- IHOLD(cdp);
-
- /*
* If this is a synchronous mount, make sure that the
* rmdir transaction goes to disk before returning to
* the user.
xfs_trans_set_sync(tp);
}
- error = xfs_bmap_finish (&tp, &free_list, first_block, &committed);
+ error = xfs_bmap_finish (&tp, &free_list, &committed);
if (error) {
xfs_bmap_cancel(&free_list);
xfs_trans_cancel(tp, (XFS_TRANS_RELEASE_LOG_RES |
XFS_TRANS_ABORT));
- IRELE(cdp);
goto std_return;
}
- error = xfs_trans_commit(tp, XFS_TRANS_RELEASE_LOG_RES, NULL);
+ error = xfs_trans_commit(tp, XFS_TRANS_RELEASE_LOG_RES);
if (error) {
- IRELE(cdp);
goto std_return;
}
- /*
- * Let interposed file systems know about removed links.
- */
- VOP_LINK_REMOVED(XFS_ITOV(cdp), dir_vp, last_cdp_link);
-
- IRELE(cdp);
-
/* Fall through to std_return with error = 0 or the errno
* from xfs_trans_commit. */
-std_return:
- if (DM_EVENT_ENABLED(dir_vp->v_vfsp, dp, DM_EVENT_POSTREMOVE)) {
+ std_return:
+ if (DM_EVENT_ENABLED(dp, DM_EVENT_POSTREMOVE)) {
(void) XFS_SEND_NAMESP(mp, DM_EVENT_POSTREMOVE,
- dir_vp, DM_RIGHT_NULL,
+ dp, DM_RIGHT_NULL,
NULL, DM_RIGHT_NULL,
- name, NULL, dm_di_mode,
+ name->name, NULL, cdp->i_d.di_mode,
error, 0);
}
return error;
error1:
xfs_bmap_cancel(&free_list);
cancel_flags |= XFS_TRANS_ABORT;
+ /* FALLTHROUGH */
+
error_return:
xfs_trans_cancel(tp, cancel_flags);
goto std_return;
}
-
-/*
- * xfs_readdir
- *
- * Read dp's entries starting at uiop->uio_offset and translate them into
- * bufsize bytes worth of struct dirents starting at bufbase.
- */
-STATIC int
-xfs_readdir(
- bhv_desc_t *dir_bdp,
- uio_t *uiop,
- cred_t *credp,
- int *eofp)
-{
- xfs_inode_t *dp;
- xfs_trans_t *tp = NULL;
- int error = 0;
- uint lock_mode;
-
- vn_trace_entry(BHV_TO_VNODE(dir_bdp), __FUNCTION__,
- (inst_t *)__return_address);
- dp = XFS_BHVTOI(dir_bdp);
-
- if (XFS_FORCED_SHUTDOWN(dp->i_mount)) {
- return XFS_ERROR(EIO);
- }
-
- lock_mode = xfs_ilock_map_shared(dp);
- error = XFS_DIR_GETDENTS(dp->i_mount, tp, dp, uiop, eofp);
- xfs_iunlock_map_shared(dp, lock_mode);
- return error;
-}
-
-
-/*
- * xfs_symlink
- *
- */
-STATIC int
+int
xfs_symlink(
- bhv_desc_t *dir_bdp,
- vname_t *dentry,
- vattr_t *vap,
- char *target_path,
- vnode_t **vpp,
+ xfs_inode_t *dp,
+ struct xfs_name *link_name,
+ const char *target_path,
+ mode_t mode,
+ xfs_inode_t **ipp,
cred_t *credp)
{
+ xfs_mount_t *mp = dp->i_mount;
xfs_trans_t *tp;
- xfs_mount_t *mp;
- xfs_inode_t *dp;
xfs_inode_t *ip;
int error;
int pathlen;
xfs_bmap_free_t free_list;
xfs_fsblock_t first_block;
- boolean_t dp_joined_to_trans;
- vnode_t *dir_vp;
+ boolean_t unlock_dp_on_error = B_FALSE;
uint cancel_flags;
int committed;
xfs_fileoff_t first_fsb;
int nmaps;
xfs_bmbt_irec_t mval[SYMLINK_MAPS];
xfs_daddr_t d;
- char *cur_chunk;
+ const char *cur_chunk;
int byte_cnt;
int n;
xfs_buf_t *bp;
xfs_prid_t prid;
struct xfs_dquot *udqp, *gdqp;
uint resblks;
- char *link_name = VNAME(dentry);
- int link_namelen;
- *vpp = NULL;
- dir_vp = BHV_TO_VNODE(dir_bdp);
- dp = XFS_BHVTOI(dir_bdp);
- dp_joined_to_trans = B_FALSE;
+ *ipp = NULL;
error = 0;
ip = NULL;
tp = NULL;
- vn_trace_entry(dir_vp, __FUNCTION__, (inst_t *)__return_address);
-
- mp = dp->i_mount;
+ xfs_itrace_entry(dp);
if (XFS_FORCED_SHUTDOWN(mp))
return XFS_ERROR(EIO);
- link_namelen = VNAMELEN(dentry);
-
/*
* Check component lengths of the target path name.
*/
pathlen = strlen(target_path);
if (pathlen >= MAXPATHLEN) /* total string too long */
return XFS_ERROR(ENAMETOOLONG);
- if (pathlen >= MAXNAMELEN) { /* is any component too long? */
- int len, total;
- char *path;
-
- for(total = 0, path = target_path; total < pathlen;) {
- /*
- * Skip any slashes.
- */
- while(*path == '/') {
- total++;
- path++;
- }
- /*
- * Count up to the next slash or end of path.
- * Error out if the component is bigger than MAXNAMELEN.
- */
- for(len = 0; *path != '/' && total < pathlen;total++, path++) {
- if (++len >= MAXNAMELEN) {
- error = ENAMETOOLONG;
- return error;
- }
- }
- }
- }
-
- if (DM_EVENT_ENABLED(dir_vp->v_vfsp, dp, DM_EVENT_SYMLINK)) {
- error = XFS_SEND_NAMESP(mp, DM_EVENT_SYMLINK, dir_vp,
+ if (DM_EVENT_ENABLED(dp, DM_EVENT_SYMLINK)) {
+ error = XFS_SEND_NAMESP(mp, DM_EVENT_SYMLINK, dp,
DM_RIGHT_NULL, NULL, DM_RIGHT_NULL,
- link_name, target_path, 0, 0, 0);
+ link_name->name, target_path, 0, 0, 0);
if (error)
return error;
}
udqp = gdqp = NULL;
if (dp->i_d.di_flags & XFS_DIFLAG_PROJINHERIT)
prid = dp->i_d.di_projid;
- else if (vap->va_mask & XFS_AT_PROJID)
- prid = (xfs_prid_t)vap->va_projid;
else
prid = (xfs_prid_t)dfltprid;
fs_blocks = 0;
else
fs_blocks = XFS_B_TO_FSB(mp, pathlen);
- resblks = XFS_SYMLINK_SPACE_RES(mp, link_namelen, fs_blocks);
+ resblks = XFS_SYMLINK_SPACE_RES(mp, link_name->len, fs_blocks);
error = xfs_trans_reserve(tp, resblks, XFS_SYMLINK_LOG_RES(mp), 0,
XFS_TRANS_PERM_LOG_RES, XFS_SYMLINK_LOG_COUNT);
if (error == ENOSPC && fs_blocks == 0) {
}
if (error) {
cancel_flags = 0;
- dp = NULL;
goto error_return;
}
- xfs_ilock(dp, XFS_ILOCK_EXCL);
+ xfs_ilock(dp, XFS_ILOCK_EXCL | XFS_ILOCK_PARENT);
+ unlock_dp_on_error = B_TRUE;
/*
* Check whether the directory allows new symlinks or not.
/*
* Check for ability to enter directory entry, if no space reserved.
*/
- if (resblks == 0 &&
- (error = XFS_DIR_CANENTER(mp, tp, dp, link_name, link_namelen)))
+ error = xfs_dir_canenter(tp, dp, link_name, resblks);
+ if (error)
goto error_return;
/*
* Initialize the bmap freelist prior to calling either
/*
* Allocate an inode for the symlink.
*/
- error = xfs_dir_ialloc(&tp, dp, S_IFLNK | (vap->va_mode&~S_IFMT),
+ error = xfs_dir_ialloc(&tp, dp, S_IFLNK | (mode & ~S_IFMT),
1, 0, credp, prid, resblks > 0, &ip, NULL);
if (error) {
if (error == ENOSPC)
goto error_return;
goto error1;
}
- ITRACE(ip);
+ xfs_itrace_ref(ip);
- VN_HOLD(dir_vp);
+ /*
+ * An error after we've joined dp to the transaction will result in the
+ * transaction cancel unlocking dp so don't do it explicitly in the
+ * error path.
+ */
+ IHOLD(dp);
xfs_trans_ijoin(tp, dp, XFS_ILOCK_EXCL);
- dp_joined_to_trans = B_TRUE;
+ unlock_dp_on_error = B_FALSE;
/*
* Also attach the dquot(s) to it, if applicable.
error = xfs_bmapi(tp, ip, first_fsb, fs_blocks,
XFS_BMAPI_WRITE | XFS_BMAPI_METADATA,
&first_block, resblks, mval, &nmaps,
- &free_list);
+ &free_list, NULL);
if (error) {
goto error1;
}
/*
* Create the directory entry for the symlink.
*/
- error = XFS_DIR_CREATENAME(mp, tp, dp, link_name, link_namelen,
- ip->i_ino, &first_block, &free_list, resblks);
- if (error) {
+ error = xfs_dir_createname(tp, dp, link_name, ip->i_ino,
+ &first_block, &free_list, resblks);
+ if (error)
goto error1;
- }
xfs_ichgtime(dp, XFS_ICHGTIME_MOD | XFS_ICHGTIME_CHG);
xfs_trans_log_inode(tp, dp, XFS_ILOG_CORE);
*/
IHOLD(ip);
- error = xfs_bmap_finish(&tp, &free_list, first_block, &committed);
+ error = xfs_bmap_finish(&tp, &free_list, &committed);
if (error) {
goto error2;
}
- error = xfs_trans_commit(tp, XFS_TRANS_RELEASE_LOG_RES, NULL);
+ error = xfs_trans_commit(tp, XFS_TRANS_RELEASE_LOG_RES);
XFS_QM_DQRELE(mp, udqp);
XFS_QM_DQRELE(mp, gdqp);
/* Fall through to std_return with error = 0 or errno from
* xfs_trans_commit */
std_return:
- if (DM_EVENT_ENABLED(dir_vp->v_vfsp, XFS_BHVTOI(dir_bdp),
- DM_EVENT_POSTSYMLINK)) {
+ if (DM_EVENT_ENABLED(dp, DM_EVENT_POSTSYMLINK)) {
(void) XFS_SEND_NAMESP(mp, DM_EVENT_POSTSYMLINK,
- dir_vp, DM_RIGHT_NULL,
- error ? NULL : XFS_ITOV(ip),
- DM_RIGHT_NULL, link_name, target_path,
- 0, error, 0);
+ dp, DM_RIGHT_NULL,
+ error ? NULL : ip,
+ DM_RIGHT_NULL, link_name->name,
+ target_path, 0, error, 0);
}
- if (!error) {
- vnode_t *vp;
-
- ASSERT(ip);
- vp = XFS_ITOV(ip);
- *vpp = vp;
- }
+ if (!error)
+ *ipp = ip;
return error;
error2:
XFS_QM_DQRELE(mp, udqp);
XFS_QM_DQRELE(mp, gdqp);
- if (!dp_joined_to_trans && (dp != NULL)) {
+ if (unlock_dp_on_error)
xfs_iunlock(dp, XFS_ILOCK_EXCL);
- }
goto std_return;
}
-
-/*
- * xfs_fid2
- *
- * A fid routine that takes a pointer to a previously allocated
- * fid structure (like xfs_fast_fid) but uses a 64 bit inode number.
- */
-STATIC int
-xfs_fid2(
- bhv_desc_t *bdp,
- fid_t *fidp)
-{
- xfs_inode_t *ip;
- xfs_fid2_t *xfid;
-
- vn_trace_entry(BHV_TO_VNODE(bdp), __FUNCTION__,
- (inst_t *)__return_address);
- ASSERT(sizeof(fid_t) >= sizeof(xfs_fid2_t));
-
- xfid = (xfs_fid2_t *)fidp;
- ip = XFS_BHVTOI(bdp);
- xfid->fid_len = sizeof(xfs_fid2_t) - sizeof(xfid->fid_len);
- xfid->fid_pad = 0;
- /*
- * use memcpy because the inode is a long long and there's no
- * assurance that xfid->fid_ino is properly aligned.
- */
- memcpy(&xfid->fid_ino, &ip->i_ino, sizeof(xfid->fid_ino));
- xfid->fid_gen = ip->i_d.di_gen;
-
- return 0;
-}
-
-
-/*
- * xfs_rwlock
- */
int
-xfs_rwlock(
- bhv_desc_t *bdp,
- vrwlock_t locktype)
-{
- xfs_inode_t *ip;
- vnode_t *vp;
-
- vp = BHV_TO_VNODE(bdp);
- if (VN_ISDIR(vp))
- return 1;
- ip = XFS_BHVTOI(bdp);
- if (locktype == VRWLOCK_WRITE) {
- xfs_ilock(ip, XFS_IOLOCK_EXCL);
- } else if (locktype == VRWLOCK_TRY_READ) {
- return (xfs_ilock_nowait(ip, XFS_IOLOCK_SHARED));
- } else if (locktype == VRWLOCK_TRY_WRITE) {
- return (xfs_ilock_nowait(ip, XFS_IOLOCK_EXCL));
- } else {
- ASSERT((locktype == VRWLOCK_READ) ||
- (locktype == VRWLOCK_WRITE_DIRECT));
- xfs_ilock(ip, XFS_IOLOCK_SHARED);
- }
-
- return 1;
-}
-
-
-/*
- * xfs_rwunlock
- */
-void
-xfs_rwunlock(
- bhv_desc_t *bdp,
- vrwlock_t locktype)
-{
- xfs_inode_t *ip;
- vnode_t *vp;
-
- vp = BHV_TO_VNODE(bdp);
- if (VN_ISDIR(vp))
- return;
- ip = XFS_BHVTOI(bdp);
- if (locktype == VRWLOCK_WRITE) {
- /*
- * In the write case, we may have added a new entry to
- * the reference cache. This might store a pointer to
- * an inode to be released in this inode. If it is there,
- * clear the pointer and release the inode after unlocking
- * this one.
- */
- xfs_refcache_iunlock(ip, XFS_IOLOCK_EXCL);
- } else {
- ASSERT((locktype == VRWLOCK_READ) ||
- (locktype == VRWLOCK_WRITE_DIRECT));
- xfs_iunlock(ip, XFS_IOLOCK_SHARED);
- }
- return;
-}
-
-STATIC int
xfs_inode_flush(
- bhv_desc_t *bdp,
+ xfs_inode_t *ip,
int flags)
{
- xfs_inode_t *ip;
- xfs_mount_t *mp;
- xfs_inode_log_item_t *iip;
+ xfs_mount_t *mp = ip->i_mount;
int error = 0;
- ip = XFS_BHVTOI(bdp);
- mp = ip->i_mount;
- iip = ip->i_itemp;
-
if (XFS_FORCED_SHUTDOWN(mp))
return XFS_ERROR(EIO);
* Bypass inodes which have already been cleaned by
* the inode flush clustering code inside xfs_iflush
*/
- if ((ip->i_update_core == 0) &&
- ((iip == NULL) || !(iip->ili_format.ilf_fields & XFS_ILOG_ALL)))
+ if (xfs_inode_clean(ip))
return 0;
- if (flags & FLUSH_LOG) {
- if (iip && iip->ili_last_lsn) {
- xlog_t *log = mp->m_log;
- xfs_lsn_t sync_lsn;
- int s, log_flags = XFS_LOG_FORCE;
-
- s = GRANT_LOCK(log);
- sync_lsn = log->l_last_sync_lsn;
- GRANT_UNLOCK(log, s);
-
- if ((XFS_LSN_CMP(iip->ili_last_lsn, sync_lsn) <= 0))
- return 0;
-
- if (flags & FLUSH_SYNC)
- log_flags |= XFS_LOG_SYNC;
- return xfs_log_force(mp, iip->ili_last_lsn, log_flags);
- }
- }
-
/*
* We make this non-blocking if the inode is contended,
* return EAGAIN to indicate to the caller that they
* blocking on inodes inside another operation right
* now, they get caught later by xfs_sync.
*/
- if (flags & FLUSH_INODE) {
- int flush_flags;
-
- if (xfs_ipincount(ip))
- return EAGAIN;
-
- if (flags & FLUSH_SYNC) {
- xfs_ilock(ip, XFS_ILOCK_SHARED);
- xfs_iflock(ip);
- } else if (xfs_ilock_nowait(ip, XFS_ILOCK_SHARED)) {
- if (xfs_ipincount(ip) || !xfs_iflock_nowait(ip)) {
- xfs_iunlock(ip, XFS_ILOCK_SHARED);
- return EAGAIN;
- }
- } else {
+ if (flags & FLUSH_SYNC) {
+ xfs_ilock(ip, XFS_ILOCK_SHARED);
+ xfs_iflock(ip);
+ } else if (xfs_ilock_nowait(ip, XFS_ILOCK_SHARED)) {
+ if (xfs_ipincount(ip) || !xfs_iflock_nowait(ip)) {
+ xfs_iunlock(ip, XFS_ILOCK_SHARED);
return EAGAIN;
}
-
- if (flags & FLUSH_SYNC)
- flush_flags = XFS_IFLUSH_SYNC;
- else
- flush_flags = XFS_IFLUSH_ASYNC;
-
- error = xfs_iflush(ip, flush_flags);
- xfs_iunlock(ip, XFS_ILOCK_SHARED);
+ } else {
+ return EAGAIN;
}
+ error = xfs_iflush(ip, (flags & FLUSH_SYNC) ? XFS_IFLUSH_SYNC
+ : XFS_IFLUSH_ASYNC_NOBLOCK);
+ xfs_iunlock(ip, XFS_ILOCK_SHARED);
+
return error;
}
int
-xfs_set_dmattrs (
- bhv_desc_t *bdp,
+xfs_set_dmattrs(
+ xfs_inode_t *ip,
u_int evmask,
- u_int16_t state,
- cred_t *credp)
+ u_int16_t state)
{
- xfs_inode_t *ip;
+ xfs_mount_t *mp = ip->i_mount;
xfs_trans_t *tp;
- xfs_mount_t *mp;
int error;
if (!capable(CAP_SYS_ADMIN))
return XFS_ERROR(EPERM);
- ip = XFS_BHVTOI(bdp);
- mp = ip->i_mount;
-
if (XFS_FORCED_SHUTDOWN(mp))
return XFS_ERROR(EIO);
xfs_ilock(ip, XFS_ILOCK_EXCL);
xfs_trans_ijoin(tp, ip, XFS_ILOCK_EXCL);
- ip->i_iocore.io_dmevmask = ip->i_d.di_dmevmask = evmask;
- ip->i_iocore.io_dmstate = ip->i_d.di_dmstate = state;
+ ip->i_d.di_dmevmask = evmask;
+ ip->i_d.di_dmstate = state;
xfs_trans_log_inode(tp, ip, XFS_ILOG_CORE);
IHOLD(ip);
- error = xfs_trans_commit(tp, 0, NULL);
+ error = xfs_trans_commit(tp, 0);
return error;
}
-
-/*
- * xfs_reclaim
- */
-STATIC int
+int
xfs_reclaim(
- bhv_desc_t *bdp)
+ xfs_inode_t *ip)
{
- xfs_inode_t *ip;
- vnode_t *vp;
+ bhv_vnode_t *vp = XFS_ITOV(ip);
- vp = BHV_TO_VNODE(bdp);
- ip = XFS_BHVTOI(bdp);
-
- vn_trace_entry(vp, __FUNCTION__, (inst_t *)__return_address);
+ xfs_itrace_entry(ip);
ASSERT(!VN_MAPPED(vp));
return 0;
}
- vn_iowait(vp);
+ vn_iowait(ip);
ASSERT(XFS_FORCED_SHUTDOWN(ip->i_mount) || ip->i_delayed_blks == 0);
*/
xfs_synchronize_atime(ip);
- /* If we have nothing to flush with this inode then complete the
- * teardown now, otherwise break the link between the xfs inode
- * and the linux inode and clean up the xfs inode later. This
- * avoids flushing the inode to disk during the delete operation
- * itself.
+ /*
+ * If we have nothing to flush with this inode then complete the
+ * teardown now, otherwise break the link between the xfs inode and the
+ * linux inode and clean up the xfs inode later. This avoids flushing
+ * the inode to disk during the delete operation itself.
+ *
+ * When breaking the link, we need to set the XFS_IRECLAIMABLE flag
+ * first to ensure that xfs_iunpin() will never see an xfs inode
+ * that has a linux inode being reclaimed. Synchronisation is provided
+ * by the i_flags_lock.
*/
if (!ip->i_update_core && (ip->i_itemp == NULL)) {
xfs_ilock(ip, XFS_ILOCK_EXCL);
} else {
xfs_mount_t *mp = ip->i_mount;
- /* Protect sync from us */
+ /* Protect sync and unpin from us */
XFS_MOUNT_ILOCK(mp);
- vn_bhv_remove(VN_BHV_HEAD(vp), XFS_ITOBHV(ip));
+ spin_lock(&ip->i_flags_lock);
+ __xfs_iflags_set(ip, XFS_IRECLAIMABLE);
+ vn_to_inode(vp)->i_private = NULL;
+ ip->i_vnode = NULL;
+ spin_unlock(&ip->i_flags_lock);
list_add_tail(&ip->i_reclaim, &mp->m_del_inodes);
- ip->i_flags |= XFS_IRECLAIMABLE;
XFS_MOUNT_IUNLOCK(mp);
}
return 0;
int locked,
int sync_mode)
{
- xfs_ihash_t *ih = ip->i_hash;
- vnode_t *vp = XFS_ITOV_NULL(ip);
+ xfs_perag_t *pag = xfs_get_perag(ip->i_mount, ip->i_ino);
+ bhv_vnode_t *vp = XFS_ITOV_NULL(ip);
int error;
if (vp && VN_BAD(vp))
* Once we have the XFS_IRECLAIM flag set it will not touch
* us.
*/
- write_lock(&ih->ih_lock);
- if ((ip->i_flags & XFS_IRECLAIM) ||
- (!(ip->i_flags & XFS_IRECLAIMABLE) && vp == NULL)) {
- write_unlock(&ih->ih_lock);
+ write_lock(&pag->pag_ici_lock);
+ spin_lock(&ip->i_flags_lock);
+ if (__xfs_iflags_test(ip, XFS_IRECLAIM) ||
+ (!__xfs_iflags_test(ip, XFS_IRECLAIMABLE) && vp == NULL)) {
+ spin_unlock(&ip->i_flags_lock);
+ write_unlock(&pag->pag_ici_lock);
if (locked) {
xfs_ifunlock(ip);
xfs_iunlock(ip, XFS_ILOCK_EXCL);
}
- return(1);
+ return 1;
}
- ip->i_flags |= XFS_IRECLAIM;
- write_unlock(&ih->ih_lock);
+ __xfs_iflags_set(ip, XFS_IRECLAIM);
+ spin_unlock(&ip->i_flags_lock);
+ write_unlock(&pag->pag_ici_lock);
+ xfs_put_perag(ip->i_mount, pag);
/*
* If the inode is still dirty, then flush it out. If the inode
* We get the flush lock regardless, though, just to make sure
* we don't free it while it is being flushed.
*/
- if (!XFS_FORCED_SHUTDOWN(ip->i_mount)) {
- if (!locked) {
- xfs_ilock(ip, XFS_ILOCK_EXCL);
- xfs_iflock(ip);
- }
+ if (!locked) {
+ xfs_ilock(ip, XFS_ILOCK_EXCL);
+ xfs_iflock(ip);
+ }
+ if (!XFS_FORCED_SHUTDOWN(ip->i_mount)) {
if (ip->i_update_core ||
((ip->i_itemp != NULL) &&
(ip->i_itemp->ili_format.ilf_fields != 0))) {
ASSERT(ip->i_update_core == 0);
ASSERT(ip->i_itemp == NULL ||
ip->i_itemp->ili_format.ilf_fields == 0);
- xfs_iunlock(ip, XFS_ILOCK_EXCL);
- } else if (locked) {
- /*
- * We are not interested in doing an iflush if we're
- * in the process of shutting down the filesystem forcibly.
- * So, just reclaim the inode.
- */
- xfs_ifunlock(ip);
- xfs_iunlock(ip, XFS_ILOCK_EXCL);
}
+ xfs_ifunlock(ip);
+ xfs_iunlock(ip, XFS_ILOCK_EXCL);
+
reclaim:
xfs_ireclaim(ip);
return 0;
int committed;
int error;
- vn_trace_entry(XFS_ITOV(ip), __FUNCTION__, (inst_t *)__return_address);
+ xfs_itrace_entry(ip);
if (XFS_FORCED_SHUTDOWN(mp))
return XFS_ERROR(EIO);
- rt = XFS_IS_REALTIME_INODE(ip);
- if (unlikely(rt)) {
- if (!(extsz = ip->i_d.di_extsize))
- extsz = mp->m_sb.sb_rextsize;
- } else {
- extsz = ip->i_d.di_extsize;
- }
-
if ((error = XFS_QM_DQATTACH(mp, ip, 0)))
return error;
if (len <= 0)
return XFS_ERROR(EINVAL);
+ rt = XFS_IS_REALTIME_INODE(ip);
+ extsz = xfs_get_extsz_hint(ip);
+
count = len;
- error = 0;
imapp = &imaps[0];
nimaps = 1;
bmapi_flag = XFS_BMAPI_WRITE | (alloc_type ? XFS_BMAPI_PREALLOC : 0);
allocatesize_fsb = XFS_B_TO_FSB(mp, count);
/* Generate a DMAPI event if needed. */
- if (alloc_type != 0 && offset < ip->i_d.di_size &&
+ if (alloc_type != 0 && offset < ip->i_size &&
(attr_flags&ATTR_DMI) == 0 &&
- DM_EVENT_ENABLED(XFS_MTOVFS(mp), ip, DM_EVENT_WRITE)) {
+ DM_EVENT_ENABLED(ip, DM_EVENT_WRITE)) {
xfs_off_t end_dmi_offset;
end_dmi_offset = offset+len;
- if (end_dmi_offset > ip->i_d.di_size)
- end_dmi_offset = ip->i_d.di_size;
- error = XFS_SEND_DATA(mp, DM_EVENT_WRITE, XFS_ITOV(ip),
- offset, end_dmi_offset - offset,
- 0, NULL);
+ if (end_dmi_offset > ip->i_size)
+ end_dmi_offset = ip->i_size;
+ error = XFS_SEND_DATA(mp, DM_EVENT_WRITE, ip, offset,
+ end_dmi_offset - offset, 0, NULL);
if (error)
- return(error);
+ return error;
}
/*
error = xfs_bmapi(tp, ip, startoffset_fsb,
allocatesize_fsb, bmapi_flag,
&firstfsb, 0, imapp, &nimaps,
- &free_list);
+ &free_list, NULL);
if (error) {
goto error0;
}
/*
* Complete the transaction
*/
- error = xfs_bmap_finish(&tp, &free_list, firstfsb, &committed);
+ error = xfs_bmap_finish(&tp, &free_list, &committed);
if (error) {
goto error0;
}
- error = xfs_trans_commit(tp, XFS_TRANS_RELEASE_LOG_RES, NULL);
+ error = xfs_trans_commit(tp, XFS_TRANS_RELEASE_LOG_RES);
xfs_iunlock(ip, XFS_ILOCK_EXCL);
if (error) {
break;
allocatesize_fsb -= allocated_fsb;
}
dmapi_enospc_check:
- if (error == ENOSPC && (attr_flags&ATTR_DMI) == 0 &&
- DM_EVENT_ENABLED(XFS_MTOVFS(mp), ip, DM_EVENT_NOSPACE)) {
-
+ if (error == ENOSPC && (attr_flags & ATTR_DMI) == 0 &&
+ DM_EVENT_ENABLED(ip, DM_EVENT_NOSPACE)) {
error = XFS_SEND_NAMESP(mp, DM_EVENT_NOSPACE,
- XFS_ITOV(ip), DM_RIGHT_NULL,
- XFS_ITOV(ip), DM_RIGHT_NULL,
+ ip, DM_RIGHT_NULL,
+ ip, DM_RIGHT_NULL,
NULL, NULL, 0, 0, 0); /* Delay flag intentionally unused */
if (error == 0)
goto retry; /* Maybe DMAPI app. has made space */
int error = 0;
bp = xfs_buf_get_noaddr(mp->m_sb.sb_blocksize,
- ip->i_d.di_flags & XFS_DIFLAG_REALTIME ?
+ XFS_IS_REALTIME_INODE(ip) ?
mp->m_rtdev_targp : mp->m_ddev_targp);
for (offset = startoff; offset <= endoff; offset = lastoffset + 1) {
offset_fsb = XFS_B_TO_FSBT(mp, offset);
nimap = 1;
- error = xfs_bmapi(NULL, ip, offset_fsb, 1, 0, NULL, 0, &imap,
- &nimap, NULL);
+ error = xfs_bmapi(NULL, ip, offset_fsb, 1, 0,
+ NULL, 0, &imap, &nimap, NULL, NULL);
if (error || nimap < 1)
break;
ASSERT(imap.br_blockcount >= 1);
XFS_BUF_READ(bp);
XFS_BUF_SET_ADDR(bp, XFS_FSB_TO_DB(ip, imap.br_startblock));
xfsbdstrat(mp, bp);
- if ((error = xfs_iowait(bp))) {
+ error = xfs_iowait(bp);
+ if (error) {
xfs_ioerror_alert("xfs_zero_remaining_bytes(read)",
mp, bp, XFS_BUF_ADDR(bp));
break;
XFS_BUF_UNREAD(bp);
XFS_BUF_WRITE(bp);
xfsbdstrat(mp, bp);
- if ((error = xfs_iowait(bp))) {
+ error = xfs_iowait(bp);
+ if (error) {
xfs_ioerror_alert("xfs_zero_remaining_bytes(write)",
mp, bp, XFS_BUF_ADDR(bp));
break;
xfs_off_t len,
int attr_flags)
{
- vnode_t *vp;
+ bhv_vnode_t *vp;
int committed;
int done;
xfs_off_t end_dmi_offset;
int error;
xfs_fsblock_t firstfsb;
xfs_bmap_free_t free_list;
- xfs_off_t ilen;
xfs_bmbt_irec_t imap;
xfs_off_t ioffset;
xfs_extlen_t mod=0;
xfs_mount_t *mp;
int nimap;
uint resblks;
- int rounding;
+ uint rounding;
int rt;
xfs_fileoff_t startoffset_fsb;
xfs_trans_t *tp;
vp = XFS_ITOV(ip);
mp = ip->i_mount;
- vn_trace_entry(vp, __FUNCTION__, (inst_t *)__return_address);
+ xfs_itrace_entry(ip);
if ((error = XFS_QM_DQATTACH(mp, ip, 0)))
return error;
error = 0;
if (len <= 0) /* if nothing being freed */
return error;
- rt = (ip->i_d.di_flags & XFS_DIFLAG_REALTIME);
+ rt = XFS_IS_REALTIME_INODE(ip);
startoffset_fsb = XFS_B_TO_FSB(mp, offset);
end_dmi_offset = offset + len;
endoffset_fsb = XFS_B_TO_FSBT(mp, end_dmi_offset);
- if (offset < ip->i_d.di_size &&
- (attr_flags & ATTR_DMI) == 0 &&
- DM_EVENT_ENABLED(XFS_MTOVFS(mp), ip, DM_EVENT_WRITE)) {
- if (end_dmi_offset > ip->i_d.di_size)
- end_dmi_offset = ip->i_d.di_size;
- error = XFS_SEND_DATA(mp, DM_EVENT_WRITE, vp,
+ if (offset < ip->i_size && (attr_flags & ATTR_DMI) == 0 &&
+ DM_EVENT_ENABLED(ip, DM_EVENT_WRITE)) {
+ if (end_dmi_offset > ip->i_size)
+ end_dmi_offset = ip->i_size;
+ error = XFS_SEND_DATA(mp, DM_EVENT_WRITE, ip,
offset, end_dmi_offset - offset,
AT_DELAY_FLAG(attr_flags), NULL);
if (error)
- return(error);
+ return error;
}
- ASSERT(attr_flags & ATTR_NOLOCK ? attr_flags & ATTR_DMI : 1);
if (attr_flags & ATTR_NOLOCK)
need_iolock = 0;
- if (need_iolock)
+ if (need_iolock) {
xfs_ilock(ip, XFS_IOLOCK_EXCL);
+ vn_iowait(ip); /* wait for the completion of any pending DIOs */
+ }
- rounding = MAX((__uint8_t)(1 << mp->m_sb.sb_blocklog),
- (__uint8_t)NBPP);
- ilen = len + (offset & (rounding - 1));
+ rounding = max_t(uint, 1 << mp->m_sb.sb_blocklog, PAGE_CACHE_SIZE);
ioffset = offset & ~(rounding - 1);
- if (ilen & (rounding - 1))
- ilen = (ilen + rounding) & ~(rounding - 1);
if (VN_CACHED(vp) != 0) {
- xfs_inval_cached_trace(&ip->i_iocore, ioffset, -1,
- ctooff(offtoct(ioffset)), -1);
- VOP_FLUSHINVAL_PAGES(vp, ctooff(offtoct(ioffset)),
- -1, FI_REMAPF_LOCKED);
+ xfs_inval_cached_trace(ip, ioffset, -1, ioffset, -1);
+ error = xfs_flushinval_pages(ip, ioffset, -1, FI_REMAPF_LOCKED);
+ if (error)
+ goto out_unlock_iolock;
}
/*
* actually need to zero the extent edges. Otherwise xfs_bunmapi
* will take care of it for us.
*/
- if (rt && !XFS_SB_VERSION_HASEXTFLGBIT(&mp->m_sb)) {
+ if (rt && !xfs_sb_version_hasextflgbit(&mp->m_sb)) {
nimap = 1;
- error = xfs_bmapi(NULL, ip, startoffset_fsb, 1, 0, NULL, 0,
- &imap, &nimap, NULL);
+ error = xfs_bmapi(NULL, ip, startoffset_fsb,
+ 1, 0, NULL, 0, &imap, &nimap, NULL, NULL);
if (error)
goto out_unlock_iolock;
ASSERT(nimap == 0 || nimap == 1);
startoffset_fsb += mp->m_sb.sb_rextsize - mod;
}
nimap = 1;
- error = xfs_bmapi(NULL, ip, endoffset_fsb - 1, 1, 0, NULL, 0,
- &imap, &nimap, NULL);
+ error = xfs_bmapi(NULL, ip, endoffset_fsb - 1,
+ 1, 0, NULL, 0, &imap, &nimap, NULL, NULL);
if (error)
goto out_unlock_iolock;
ASSERT(nimap == 0 || nimap == 1);
while (!error && !done) {
/*
- * allocate and setup the transaction
+ * allocate and setup the transaction. Allow this
+ * transaction to dip into the reserve blocks to ensure
+ * the freeing of the space succeeds at ENOSPC.
*/
tp = xfs_trans_alloc(mp, XFS_TRANS_DIOSTRAT);
+ tp->t_flags |= XFS_TRANS_RESERVE;
error = xfs_trans_reserve(tp,
resblks,
XFS_WRITE_LOG_RES(mp),
XFS_BMAP_INIT(&free_list, &firstfsb);
error = xfs_bunmapi(tp, ip, startoffset_fsb,
endoffset_fsb - startoffset_fsb,
- 0, 2, &firstfsb, &free_list, &done);
+ 0, 2, &firstfsb, &free_list, NULL, &done);
if (error) {
goto error0;
}
/*
* complete the transaction
*/
- error = xfs_bmap_finish(&tp, &free_list, firstfsb, &committed);
+ error = xfs_bmap_finish(&tp, &free_list, &committed);
if (error) {
goto error0;
}
- error = xfs_trans_commit(tp, XFS_TRANS_RELEASE_LOG_RES, NULL);
+ error = xfs_trans_commit(tp, XFS_TRANS_RELEASE_LOG_RES);
xfs_iunlock(ip, XFS_ILOCK_EXCL);
}
*/
int
xfs_change_file_space(
- bhv_desc_t *bdp,
+ xfs_inode_t *ip,
int cmd,
xfs_flock64_t *bf,
xfs_off_t offset,
cred_t *credp,
int attr_flags)
{
+ xfs_mount_t *mp = ip->i_mount;
int clrprealloc;
int error;
xfs_fsize_t fsize;
- xfs_inode_t *ip;
- xfs_mount_t *mp;
int setprealloc;
xfs_off_t startoffset;
xfs_off_t llen;
xfs_trans_t *tp;
- vattr_t va;
- vnode_t *vp;
+ bhv_vattr_t va;
- vp = BHV_TO_VNODE(bdp);
- vn_trace_entry(vp, __FUNCTION__, (inst_t *)__return_address);
-
- ip = XFS_BHVTOI(bdp);
- mp = ip->i_mount;
+ xfs_itrace_entry(ip);
- /*
- * must be a regular file and have write permission
- */
- if (!VN_ISREG(vp))
+ if (!S_ISREG(ip->i_d.di_mode))
return XFS_ERROR(EINVAL);
- xfs_ilock(ip, XFS_ILOCK_SHARED);
-
- if ((error = xfs_iaccess(ip, S_IWUSR, credp))) {
- xfs_iunlock(ip, XFS_ILOCK_SHARED);
- return error;
- }
-
- xfs_iunlock(ip, XFS_ILOCK_SHARED);
-
switch (bf->l_whence) {
case 0: /*SEEK_SET*/
break;
bf->l_start += offset;
break;
case 2: /*SEEK_END*/
- bf->l_start += ip->i_d.di_size;
+ bf->l_start += ip->i_size;
break;
default:
return XFS_ERROR(EINVAL);
bf->l_whence = 0;
startoffset = bf->l_start;
- fsize = ip->i_d.di_size;
+ fsize = ip->i_size;
/*
* XFS_IOC_RESVSP and XFS_IOC_UNRESVSP will reserve or unreserve
va.va_mask = XFS_AT_SIZE;
va.va_size = startoffset;
- error = xfs_setattr(bdp, &va, attr_flags, credp);
+ error = xfs_setattr(ip, &va, attr_flags, credp);
if (error)
return error;
xfs_trans_log_inode(tp, ip, XFS_ILOG_CORE);
xfs_trans_set_sync(tp);
- error = xfs_trans_commit(tp, 0, NULL);
+ error = xfs_trans_commit(tp, 0);
xfs_iunlock(ip, XFS_ILOCK_EXCL);
return error;
}
-
-vnodeops_t xfs_vnodeops = {
- BHV_IDENTITY_INIT(VN_BHV_XFS,VNODE_POSITION_XFS),
- .vop_open = xfs_open,
- .vop_read = xfs_read,
-#ifdef HAVE_SENDFILE
- .vop_sendfile = xfs_sendfile,
-#endif
- .vop_write = xfs_write,
- .vop_ioctl = xfs_ioctl,
- .vop_getattr = xfs_getattr,
- .vop_setattr = xfs_setattr,
- .vop_access = xfs_access,
- .vop_lookup = xfs_lookup,
- .vop_create = xfs_create,
- .vop_remove = xfs_remove,
- .vop_link = xfs_link,
- .vop_rename = xfs_rename,
- .vop_mkdir = xfs_mkdir,
- .vop_rmdir = xfs_rmdir,
- .vop_readdir = xfs_readdir,
- .vop_symlink = xfs_symlink,
- .vop_readlink = xfs_readlink,
- .vop_fsync = xfs_fsync,
- .vop_inactive = xfs_inactive,
- .vop_fid2 = xfs_fid2,
- .vop_rwlock = xfs_rwlock,
- .vop_rwunlock = xfs_rwunlock,
- .vop_bmap = xfs_bmap,
- .vop_reclaim = xfs_reclaim,
- .vop_attr_get = xfs_attr_get,
- .vop_attr_set = xfs_attr_set,
- .vop_attr_remove = xfs_attr_remove,
- .vop_attr_list = xfs_attr_list,
- .vop_link_removed = (vop_link_removed_t)fs_noval,
- .vop_vnode_change = (vop_vnode_change_t)fs_noval,
- .vop_tosspages = fs_tosspages,
- .vop_flushinval_pages = fs_flushinval_pages,
- .vop_flush_pages = fs_flush_pages,
- .vop_release = xfs_release,
- .vop_iflush = xfs_inode_flush,
-};