#include "xfs.h"
#include "xfs_fs.h"
#include "xfs_types.h"
+#include "xfs_acl.h"
#include "xfs_bit.h"
#include "xfs_log.h"
#include "xfs_inum.h"
#include "xfs_ialloc.h"
#include "xfs_quota.h"
#include "xfs_utils.h"
+#include "xfs_trans_priv.h"
+#include "xfs_inode_item.h"
+#include "xfs_bmap.h"
+#include "xfs_btree_trace.h"
+#include "xfs_dir2_trace.h"
+
+
+/*
+ * Allocate and initialise an xfs_inode.
+ */
+STATIC struct xfs_inode *
+xfs_inode_alloc(
+ struct xfs_mount *mp,
+ xfs_ino_t ino)
+{
+ struct xfs_inode *ip;
+
+ /*
+ * if this didn't occur in transactions, we could use
+ * KM_MAYFAIL and return NULL here on ENOMEM. Set the
+ * code up to do this anyway.
+ */
+ ip = kmem_zone_alloc(xfs_inode_zone, KM_SLEEP);
+ if (!ip)
+ return NULL;
+ if (inode_init_always(mp->m_super, VFS_I(ip))) {
+ kmem_zone_free(xfs_inode_zone, ip);
+ return NULL;
+ }
+
+ ASSERT(atomic_read(&ip->i_iocount) == 0);
+ ASSERT(atomic_read(&ip->i_pincount) == 0);
+ ASSERT(!spin_is_locked(&ip->i_flags_lock));
+ ASSERT(completion_done(&ip->i_flush));
+
+ /* initialise the xfs inode */
+ ip->i_ino = ino;
+ ip->i_mount = mp;
+ memset(&ip->i_imap, 0, sizeof(struct xfs_imap));
+ ip->i_afp = NULL;
+ memset(&ip->i_df, 0, sizeof(xfs_ifork_t));
+ ip->i_flags = 0;
+ ip->i_update_core = 0;
+ ip->i_delayed_blks = 0;
+ memset(&ip->i_d, 0, sizeof(xfs_icdinode_t));
+ ip->i_size = 0;
+ ip->i_new_size = 0;
+
+ /*
+ * Initialize inode's trace buffers.
+ */
+#ifdef XFS_INODE_TRACE
+ ip->i_trace = ktrace_alloc(INODE_TRACE_SIZE, KM_NOFS);
+#endif
+#ifdef XFS_BMAP_TRACE
+ ip->i_xtrace = ktrace_alloc(XFS_BMAP_KTRACE_SIZE, KM_NOFS);
+#endif
+#ifdef XFS_BTREE_TRACE
+ ip->i_btrace = ktrace_alloc(XFS_BMBT_KTRACE_SIZE, KM_NOFS);
+#endif
+#ifdef XFS_RW_TRACE
+ ip->i_rwtrace = ktrace_alloc(XFS_RW_KTRACE_SIZE, KM_NOFS);
+#endif
+#ifdef XFS_ILOCK_TRACE
+ ip->i_lock_trace = ktrace_alloc(XFS_ILOCK_KTRACE_SIZE, KM_NOFS);
+#endif
+#ifdef XFS_DIR2_TRACE
+ ip->i_dir_trace = ktrace_alloc(XFS_DIR2_KTRACE_SIZE, KM_NOFS);
+#endif
+
+ /* prevent anyone from using this yet */
+ VFS_I(ip)->i_state = I_NEW|I_LOCK;
+
+ return ip;
+}
+
+STATIC void
+xfs_inode_free(
+ struct xfs_inode *ip)
+{
+ switch (ip->i_d.di_mode & S_IFMT) {
+ case S_IFREG:
+ case S_IFDIR:
+ case S_IFLNK:
+ xfs_idestroy_fork(ip, XFS_DATA_FORK);
+ break;
+ }
+
+ if (ip->i_afp)
+ xfs_idestroy_fork(ip, XFS_ATTR_FORK);
+
+#ifdef XFS_INODE_TRACE
+ ktrace_free(ip->i_trace);
+#endif
+#ifdef XFS_BMAP_TRACE
+ ktrace_free(ip->i_xtrace);
+#endif
+#ifdef XFS_BTREE_TRACE
+ ktrace_free(ip->i_btrace);
+#endif
+#ifdef XFS_RW_TRACE
+ ktrace_free(ip->i_rwtrace);
+#endif
+#ifdef XFS_ILOCK_TRACE
+ ktrace_free(ip->i_lock_trace);
+#endif
+#ifdef XFS_DIR2_TRACE
+ ktrace_free(ip->i_dir_trace);
+#endif
+
+ if (ip->i_itemp) {
+ /*
+ * Only if we are shutting down the fs will we see an
+ * inode still in the AIL. If it is there, we should remove
+ * it to prevent a use-after-free from occurring.
+ */
+ xfs_log_item_t *lip = &ip->i_itemp->ili_item;
+ struct xfs_ail *ailp = lip->li_ailp;
+
+ ASSERT(((lip->li_flags & XFS_LI_IN_AIL) == 0) ||
+ XFS_FORCED_SHUTDOWN(ip->i_mount));
+ if (lip->li_flags & XFS_LI_IN_AIL) {
+ spin_lock(&ailp->xa_lock);
+ if (lip->li_flags & XFS_LI_IN_AIL)
+ xfs_trans_ail_delete(ailp, lip);
+ else
+ spin_unlock(&ailp->xa_lock);
+ }
+ xfs_inode_item_destroy(ip);
+ ip->i_itemp = NULL;
+ }
+
+ /* asserts to verify all state is correct here */
+ ASSERT(atomic_read(&ip->i_iocount) == 0);
+ ASSERT(atomic_read(&ip->i_pincount) == 0);
+ ASSERT(!spin_is_locked(&ip->i_flags_lock));
+ ASSERT(completion_done(&ip->i_flush));
+
+ kmem_zone_free(xfs_inode_zone, ip);
+}
/*
* Check the validity of the inode we just found it the cache
int flags,
int lock_flags) __releases(pag->pag_ici_lock)
{
+ struct inode *inode = VFS_I(ip);
struct xfs_mount *mp = ip->i_mount;
- int error = 0;
+ int error;
+
+ spin_lock(&ip->i_flags_lock);
/*
- * If INEW is set this inode is being set up
- * If IRECLAIM is set this inode is being torn down
- * Pause and try again.
+ * If we are racing with another cache hit that is currently
+ * instantiating this inode or currently recycling it out of
+ * reclaimabe state, wait for the initialisation to complete
+ * before continuing.
+ *
+ * XXX(hch): eventually we should do something equivalent to
+ * wait_on_inode to wait for these flags to be cleared
+ * instead of polling for it.
*/
- if (xfs_iflags_test(ip, (XFS_INEW|XFS_IRECLAIM))) {
- error = EAGAIN;
+ if (ip->i_flags & (XFS_INEW|XFS_IRECLAIM)) {
XFS_STATS_INC(xs_ig_frecycle);
+ error = EAGAIN;
goto out_error;
}
- /* If IRECLAIMABLE is set, we've torn down the vfs inode part */
- if (xfs_iflags_test(ip, XFS_IRECLAIMABLE)) {
-
- /*
- * If lookup is racing with unlink, then we should return an
- * error immediately so we don't remove it from the reclaim
- * list and potentially leak the inode.
- */
-
- if ((ip->i_d.di_mode == 0) && !(flags & XFS_IGET_CREATE)) {
- error = ENOENT;
- goto out_error;
- }
+ /*
+ * If lookup is racing with unlink return an error immediately.
+ */
+ if (ip->i_d.di_mode == 0 && !(flags & XFS_IGET_CREATE)) {
+ error = ENOENT;
+ goto out_error;
+ }
+ /*
+ * If IRECLAIMABLE is set, we've torn down the VFS inode already.
+ * Need to carefully get it back into useable state.
+ */
+ if (ip->i_flags & XFS_IRECLAIMABLE) {
xfs_itrace_exit_tag(ip, "xfs_iget.alloc");
/*
- * We need to re-initialise the VFS inode as it has been
- * 'freed' by the VFS. Do this here so we can deal with
- * errors cleanly, then tag it so it can be set up correctly
- * later.
+ * We need to set XFS_INEW atomically with clearing the
+ * reclaimable tag so that we do have an indicator of the
+ * inode still being initialized.
*/
- if (!inode_init_always(mp->m_super, VFS_I(ip))) {
- error = ENOMEM;
- goto out_error;
- }
- xfs_iflags_set(ip, XFS_INEW);
- xfs_iflags_clear(ip, XFS_IRECLAIMABLE);
-
- /* clear the radix tree reclaim flag as well. */
+ ip->i_flags |= XFS_INEW;
+ ip->i_flags &= ~XFS_IRECLAIMABLE;
__xfs_inode_clear_reclaim_tag(mp, pag, ip);
+
+ spin_unlock(&ip->i_flags_lock);
read_unlock(&pag->pag_ici_lock);
- XFS_MOUNT_ILOCK(mp);
- list_del_init(&ip->i_reclaim);
- XFS_MOUNT_IUNLOCK(mp);
- } else if (!igrab(VFS_I(ip))) {
- /* If the VFS inode is being torn down, pause and try again. */
- error = EAGAIN;
- XFS_STATS_INC(xs_ig_frecycle);
- goto out_error;
+ error = -inode_init_always(mp->m_super, inode);
+ if (error) {
+ /*
+ * Re-initializing the inode failed, and we are in deep
+ * trouble. Try to re-add it to the reclaim list.
+ */
+ read_lock(&pag->pag_ici_lock);
+ spin_lock(&ip->i_flags_lock);
+
+ ip->i_flags &= ~XFS_INEW;
+ ip->i_flags |= XFS_IRECLAIMABLE;
+ __xfs_inode_set_reclaim_tag(pag, ip);
+ goto out_error;
+ }
+ inode->i_state = I_LOCK|I_NEW;
} else {
- /* we've got a live one */
- read_unlock(&pag->pag_ici_lock);
- }
+ /* If the VFS inode is being torn down, pause and try again. */
+ if (!igrab(inode)) {
+ error = EAGAIN;
+ goto out_error;
+ }
- if (ip->i_d.di_mode == 0 && !(flags & XFS_IGET_CREATE)) {
- error = ENOENT;
- goto out;
+ /* We've got a live one. */
+ spin_unlock(&ip->i_flags_lock);
+ read_unlock(&pag->pag_ici_lock);
}
if (lock_flags != 0)
return 0;
out_error:
+ spin_unlock(&ip->i_flags_lock);
read_unlock(&pag->pag_ici_lock);
-out:
return error;
}
unsigned long first_index, mask;
xfs_agino_t agino = XFS_INO_TO_AGINO(mp, ino);
- /*
- * Read the disk inode attributes into a new inode structure and get
- * a new vnode for it. This should also initialize i_ino and i_mount.
- */
- error = xfs_iread(mp, tp, ino, &ip, bno,
- (flags & XFS_IGET_BULKSTAT) ? XFS_IMAP_BULKSTAT : 0);
+ ip = xfs_inode_alloc(mp, ino);
+ if (!ip)
+ return ENOMEM;
+
+ error = xfs_iread(mp, tp, ip, bno, flags);
if (error)
- return error;
+ goto out_destroy;
xfs_itrace_exit_tag(ip, "xfs_iget.alloc");
/*
* Preload the radix tree so we can insert safely under the
- * write spinlock.
+ * write spinlock. Note that we cannot sleep inside the preload
+ * region.
*/
if (radix_tree_preload(GFP_KERNEL)) {
error = EAGAIN;
goto out_destroy;
}
- if (lock_flags)
- xfs_ilock(ip, lock_flags);
+ /*
+ * Because the inode hasn't been added to the radix-tree yet it can't
+ * be found by another thread, so we can do the non-sleeping lock here.
+ */
+ if (lock_flags) {
+ if (!xfs_ilock_nowait(ip, lock_flags))
+ BUG();
+ }
mask = ~(((XFS_INODE_CLUSTER_SIZE(mp) >> mp->m_sb.sb_inodelog)) - 1);
first_index = agino & mask;
WARN_ON(error != -EEXIST);
XFS_STATS_INC(xs_ig_dup);
error = EAGAIN;
- goto out_unlock;
+ goto out_preload_end;
}
/* These values _must_ be set before releasing the radix tree lock! */
*ipp = ip;
return 0;
-out_unlock:
+out_preload_end:
write_unlock(&pag->pag_ici_lock);
radix_tree_preload_end();
+ if (lock_flags)
+ xfs_iunlock(ip, lock_flags);
out_destroy:
- xfs_idestroy(ip);
+ __destroy_inode(VFS_I(ip));
+ xfs_inode_free(ip);
return error;
}
}
xfs_put_perag(mp, pag);
- xfs_iflags_set(ip, XFS_IMODIFIED);
*ipp = ip;
ASSERT(ip->i_df.if_ext_max ==
return error;
}
-
-/*
- * Look for the inode corresponding to the given ino in the hash table.
- * If it is there and its i_transp pointer matches tp, return it.
- * Otherwise, return NULL.
- */
-xfs_inode_t *
-xfs_inode_incore(xfs_mount_t *mp,
- xfs_ino_t ino,
- xfs_trans_t *tp)
-{
- xfs_inode_t *ip;
- xfs_perag_t *pag;
-
- pag = xfs_get_perag(mp, ino);
- read_lock(&pag->pag_ici_lock);
- ip = radix_tree_lookup(&pag->pag_ici_root, XFS_INO_TO_AGINO(mp, ino));
- read_unlock(&pag->pag_ici_lock);
- xfs_put_perag(mp, pag);
-
- /* the returned inode must match the transaction */
- if (ip && (ip->i_transp != tp))
- return NULL;
- return ip;
-}
-
/*
* Decrement reference count of an inode structure and unlock it.
*
IRELE(ip);
}
-
/*
- * This routine embodies the part of the reclaim code that pulls
- * the inode from the inode hash table and the mount structure's
- * inode list.
- * This should only be called from xfs_reclaim().
+ * This is called free all the memory associated with an inode.
+ * It must free the inode itself and any buffers allocated for
+ * if_extents/if_data and if_broot. It must also free the lock
+ * associated with the inode.
+ *
+ * Note: because we don't initialise everything on reallocation out
+ * of the zone, we must ensure we nullify everything correctly before
+ * freeing the structure.
*/
void
-xfs_ireclaim(xfs_inode_t *ip)
+xfs_ireclaim(
+ struct xfs_inode *ip)
{
- /*
- * Remove from old hash list and mount list.
- */
- XFS_STATS_INC(xs_ig_reclaims);
-
- xfs_iextract(ip);
-
- /*
- * Here we do a spurious inode lock in order to coordinate with inode
- * cache radix tree lookups. This is because the lookup can reference
- * the inodes in the cache without taking references. We make that OK
- * here by ensuring that we wait until the inode is unlocked after the
- * lookup before we go ahead and free it. We get both the ilock and
- * the iolock because the code may need to drop the ilock one but will
- * still hold the iolock.
- */
- xfs_ilock(ip, XFS_ILOCK_EXCL | XFS_IOLOCK_EXCL);
+ struct xfs_mount *mp = ip->i_mount;
+ struct xfs_perag *pag;
- /*
- * Release dquots (and their references) if any. An inode may escape
- * xfs_inactive and get here via vn_alloc->vn_reclaim path.
- */
- XFS_QM_DQDETACH(ip->i_mount, ip);
+ XFS_STATS_INC(xs_ig_reclaims);
/*
- * Free all memory associated with the inode.
+ * Remove the inode from the per-AG radix tree. It doesn't matter
+ * if it was never added to it because radix_tree_delete can deal
+ * with that case just fine.
*/
- xfs_iunlock(ip, XFS_ILOCK_EXCL | XFS_IOLOCK_EXCL);
- xfs_idestroy(ip);
-}
-
-/*
- * This routine removes an about-to-be-destroyed inode from
- * all of the lists in which it is located with the exception
- * of the behavior chain.
- */
-void
-xfs_iextract(
- xfs_inode_t *ip)
-{
- xfs_mount_t *mp = ip->i_mount;
- xfs_perag_t *pag = xfs_get_perag(mp, ip->i_ino);
-
+ pag = xfs_get_perag(mp, ip->i_ino);
write_lock(&pag->pag_ici_lock);
radix_tree_delete(&pag->pag_ici_root, XFS_INO_TO_AGINO(mp, ip->i_ino));
write_unlock(&pag->pag_ici_lock);
xfs_put_perag(mp, pag);
- /* Deal with the deleted inodes list */
- XFS_MOUNT_ILOCK(mp);
- list_del_init(&ip->i_reclaim);
- mp->m_ireclaims++;
- XFS_MOUNT_IUNLOCK(mp);
+ /*
+ * Here we do an (almost) spurious inode lock in order to coordinate
+ * with inode cache radix tree lookups. This is because the lookup
+ * can reference the inodes in the cache without taking references.
+ *
+ * We make that OK here by ensuring that we wait until the inode is
+ * unlocked after the lookup before we go ahead and free it. We get
+ * both the ilock and the iolock because the code may need to drop the
+ * ilock one but will still hold the iolock.
+ */
+ xfs_ilock(ip, XFS_ILOCK_EXCL | XFS_IOLOCK_EXCL);
+ xfs_qm_dqdetach(ip);
+ xfs_iunlock(ip, XFS_ILOCK_EXCL | XFS_IOLOCK_EXCL);
+
+ xfs_inode_free(ip);
}
/*
* it is in the AIL and anyone is waiting on it. Don't do
* this if the caller has asked us not to.
*/
- xfs_trans_unlocked_item(ip->i_mount,
+ xfs_trans_unlocked_item(ip->i_itemp->ili_item.li_ailp,
(xfs_log_item_t*)(ip->i_itemp));
}
xfs_ilock_trace(ip, 3, lock_flags, (inst_t *)__return_address);
}
#endif
+#ifdef XFS_INODE_TRACE
+
+#define KTRACE_ENTER(ip, vk, s, line, ra) \
+ ktrace_enter((ip)->i_trace, \
+/* 0 */ (void *)(__psint_t)(vk), \
+/* 1 */ (void *)(s), \
+/* 2 */ (void *)(__psint_t) line, \
+/* 3 */ (void *)(__psint_t)atomic_read(&VFS_I(ip)->i_count), \
+/* 4 */ (void *)(ra), \
+/* 5 */ NULL, \
+/* 6 */ (void *)(__psint_t)current_cpu(), \
+/* 7 */ (void *)(__psint_t)current_pid(), \
+/* 8 */ (void *)__return_address, \
+/* 9 */ NULL, NULL, NULL, NULL, NULL, NULL, NULL)
+
+/*
+ * Vnode tracing code.
+ */
+void
+_xfs_itrace_entry(xfs_inode_t *ip, const char *func, inst_t *ra)
+{
+ KTRACE_ENTER(ip, INODE_KTRACE_ENTRY, func, 0, ra);
+}
+
+void
+_xfs_itrace_exit(xfs_inode_t *ip, const char *func, inst_t *ra)
+{
+ KTRACE_ENTER(ip, INODE_KTRACE_EXIT, func, 0, ra);
+}
+
+void
+xfs_itrace_hold(xfs_inode_t *ip, char *file, int line, inst_t *ra)
+{
+ KTRACE_ENTER(ip, INODE_KTRACE_HOLD, file, line, ra);
+}
+
+void
+_xfs_itrace_ref(xfs_inode_t *ip, char *file, int line, inst_t *ra)
+{
+ KTRACE_ENTER(ip, INODE_KTRACE_REF, file, line, ra);
+}
+
+void
+xfs_itrace_rele(xfs_inode_t *ip, char *file, int line, inst_t *ra)
+{
+ KTRACE_ENTER(ip, INODE_KTRACE_RELE, file, line, ra);
+}
+#endif /* XFS_INODE_TRACE */