xfs: remove dead XFS_LOUD_RECOVERY code
[safe/jmp/linux-2.6] / fs / xfs / quota / xfs_qm.c
index 53a00fb..38e7641 100644 (file)
 #include "xfs_bit.h"
 #include "xfs_log.h"
 #include "xfs_inum.h"
-#include "xfs_clnt.h"
 #include "xfs_trans.h"
 #include "xfs_sb.h"
 #include "xfs_ag.h"
-#include "xfs_dir.h"
 #include "xfs_dir2.h"
 #include "xfs_alloc.h"
 #include "xfs_dmapi.h"
@@ -33,7 +31,6 @@
 #include "xfs_bmap_btree.h"
 #include "xfs_alloc_btree.h"
 #include "xfs_ialloc_btree.h"
-#include "xfs_dir_sf.h"
 #include "xfs_dir2_sf.h"
 #include "xfs_attr_sf.h"
 #include "xfs_dinode.h"
 #include "xfs_error.h"
 #include "xfs_bmap.h"
 #include "xfs_rw.h"
-#include "xfs_acl.h"
-#include "xfs_cap.h"
-#include "xfs_mac.h"
 #include "xfs_attr.h"
 #include "xfs_buf_item.h"
 #include "xfs_trans_space.h"
 #include "xfs_utils.h"
 #include "xfs_qm.h"
+#include "xfs_trace.h"
 
 /*
  * The global quota manager. There is only one of these for the entire
  * quota functionality, including maintaining the freelist and hash
  * tables of dquots.
  */
-mutex_t                xfs_Gqm_lock;
+struct mutex   xfs_Gqm_lock;
 struct xfs_qm  *xfs_Gqm;
 uint           ndquot;
 
 kmem_zone_t    *qm_dqzone;
 kmem_zone_t    *qm_dqtrxzone;
-STATIC kmem_shaker_t   xfs_qm_shaker;
+
+static cred_t  xfs_zerocr;
 
 STATIC void    xfs_qm_list_init(xfs_dqlist_t *, char *, int);
 STATIC void    xfs_qm_list_destroy(xfs_dqlist_t *);
 
-STATIC void    xfs_qm_freelist_init(xfs_frlist_t *);
-STATIC void    xfs_qm_freelist_destroy(xfs_frlist_t *);
-STATIC int     xfs_qm_mplist_nowait(xfs_mount_t *);
-STATIC int     xfs_qm_dqhashlock_nowait(xfs_dquot_t *);
-
 STATIC int     xfs_qm_init_quotainos(xfs_mount_t *);
 STATIC int     xfs_qm_init_quotainfo(xfs_mount_t *);
 STATIC int     xfs_qm_shake(int, gfp_t);
 
+static struct shrinker xfs_qm_shaker = {
+       .shrink = xfs_qm_shake,
+       .seeks = DEFAULT_SEEKS,
+};
+
 #ifdef DEBUG
-extern mutex_t qcheck_lock;
+extern struct mutex    qcheck_lock;
 #endif
 
 #ifdef QUOTADEBUG
-#define XQM_LIST_PRINT(l, NXT, title) \
-{ \
-       xfs_dquot_t     *dqp; int i = 0; \
-       cmn_err(CE_DEBUG, "%s (#%d)", title, (int) (l)->qh_nelems); \
-       for (dqp = (l)->qh_next; dqp != NULL; dqp = dqp->NXT) { \
-               cmn_err(CE_DEBUG, "   %d.  \"%d (%s)\"   " \
-                                 "bcnt = %d, icnt = %d, refs = %d", \
-                       ++i, (int) be32_to_cpu(dqp->q_core.d_id), \
-                       DQFLAGTO_TYPESTR(dqp),       \
-                       (int) be64_to_cpu(dqp->q_core.d_bcount), \
-                       (int) be64_to_cpu(dqp->q_core.d_icount), \
-                       (int) dqp->q_nrefs);  } \
+static void
+xfs_qm_dquot_list_print(
+       struct xfs_mount *mp)
+{
+       xfs_dquot_t     *dqp;
+       int             i = 0;
+
+       list_for_each_entry(dqp, &mp->m_quotainfo->qi_dqlist_lock, qi_mplist) {
+               cmn_err(CE_DEBUG, "   %d. \"%d (%s)\"   "
+                                 "bcnt = %lld, icnt = %lld, refs = %d",
+                       i++, be32_to_cpu(dqp->q_core.d_id),
+                       DQFLAGTO_TYPESTR(dqp),
+                       (long long)be64_to_cpu(dqp->q_core.d_bcount),
+                       (long long)be64_to_cpu(dqp->q_core.d_icount),
+                       dqp->q_nrefs);
+       }
 }
 #else
-#define XQM_LIST_PRINT(l, NXT, title) do { } while (0)
+static void xfs_qm_dquot_list_print(struct xfs_mount *mp) { }
 #endif
 
 /*
@@ -111,17 +111,23 @@ xfs_Gqm_init(void)
 {
        xfs_dqhash_t    *udqhash, *gdqhash;
        xfs_qm_t        *xqm;
-       uint            i, hsize, flags = KM_SLEEP | KM_MAYFAIL;
+       size_t          hsize;
+       uint            i;
 
        /*
         * Initialize the dquot hash tables.
         */
-       hsize = XFS_QM_HASHSIZE_HIGH;
-       while (!(udqhash = kmem_zalloc(hsize * sizeof(xfs_dqhash_t), flags))) {
-               if ((hsize >>= 1) <= XFS_QM_HASHSIZE_LOW)
-                       flags = KM_SLEEP;
-       }
-       gdqhash = kmem_zalloc(hsize * sizeof(xfs_dqhash_t), KM_SLEEP);
+       udqhash = kmem_zalloc_greedy(&hsize,
+                                    XFS_QM_HASHSIZE_LOW * sizeof(xfs_dqhash_t),
+                                    XFS_QM_HASHSIZE_HIGH * sizeof(xfs_dqhash_t));
+       if (!udqhash)
+               goto out;
+
+       gdqhash = kmem_zalloc_large(hsize);
+       if (!gdqhash)
+               goto out_free_udqhash;
+
+       hsize /= sizeof(xfs_dqhash_t);
        ndquot = hsize << 8;
 
        xqm = kmem_zalloc(sizeof(xfs_qm_t), KM_SLEEP);
@@ -139,7 +145,9 @@ xfs_Gqm_init(void)
        /*
         * Freelist of all dquots of all file systems
         */
-       xfs_qm_freelist_init(&(xqm->qm_dqfreelist));
+       INIT_LIST_HEAD(&xqm->qm_dqfrlist);
+       xqm->qm_dqfrlist_cnt = 0;
+       mutex_init(&xqm->qm_dqfrlist_lock);
 
        /*
         * dquot zone. we register our own low-memory callback.
@@ -151,7 +159,7 @@ xfs_Gqm_init(void)
        } else
                xqm->qm_dqzone = qm_dqzone;
 
-       xfs_qm_shaker = kmem_shake_register(xfs_qm_shake);
+       register_shrinker(&xfs_qm_shaker);
 
        /*
         * The t_dqinfo portion of transactions.
@@ -170,6 +178,11 @@ xfs_Gqm_init(void)
        mutex_init(&qcheck_lock);
 #endif
        return xqm;
+
+ out_free_udqhash:
+       kmem_free_large(udqhash);
+ out:
+       return NULL;
 }
 
 /*
@@ -179,26 +192,41 @@ STATIC void
 xfs_qm_destroy(
        struct xfs_qm   *xqm)
 {
+       struct xfs_dquot *dqp, *n;
        int             hsize, i;
 
        ASSERT(xqm != NULL);
        ASSERT(xqm->qm_nrefs == 0);
-       kmem_shake_deregister(xfs_qm_shaker);
+       unregister_shrinker(&xfs_qm_shaker);
        hsize = xqm->qm_dqhashmask + 1;
        for (i = 0; i < hsize; i++) {
                xfs_qm_list_destroy(&(xqm->qm_usr_dqhtable[i]));
                xfs_qm_list_destroy(&(xqm->qm_grp_dqhtable[i]));
        }
-       kmem_free(xqm->qm_usr_dqhtable, hsize * sizeof(xfs_dqhash_t));
-       kmem_free(xqm->qm_grp_dqhtable, hsize * sizeof(xfs_dqhash_t));
+       kmem_free_large(xqm->qm_usr_dqhtable);
+       kmem_free_large(xqm->qm_grp_dqhtable);
        xqm->qm_usr_dqhtable = NULL;
        xqm->qm_grp_dqhtable = NULL;
        xqm->qm_dqhashmask = 0;
-       xfs_qm_freelist_destroy(&(xqm->qm_dqfreelist));
+
+       /* frlist cleanup */
+       mutex_lock(&xqm->qm_dqfrlist_lock);
+       list_for_each_entry_safe(dqp, n, &xqm->qm_dqfrlist, q_freelist) {
+               xfs_dqlock(dqp);
+#ifdef QUOTADEBUG
+               cmn_err(CE_DEBUG, "FREELIST destroy 0x%p", dqp);
+#endif
+               list_del_init(&dqp->q_freelist);
+               xfs_Gqm->qm_dqfrlist_cnt--;
+               xfs_dqunlock(dqp);
+               xfs_qm_dqdestroy(dqp);
+       }
+       mutex_unlock(&xqm->qm_dqfrlist_lock);
+       mutex_destroy(&xqm->qm_dqfrlist_lock);
 #ifdef DEBUG
        mutex_destroy(&qcheck_lock);
 #endif
-       kmem_free(xqm, sizeof(xfs_qm_t));
+       kmem_free(xqm);
 }
 
 /*
@@ -217,17 +245,21 @@ xfs_qm_hold_quotafs_ref(
         * the structure could disappear between the entry to this routine and
         * a HOLD operation if not locked.
         */
-       XFS_QM_LOCK(xfs_Gqm);
+       mutex_lock(&xfs_Gqm_lock);
 
-       if (xfs_Gqm == NULL)
+       if (!xfs_Gqm) {
                xfs_Gqm = xfs_Gqm_init();
+               if (!xfs_Gqm)
+                       return ENOMEM;
+       }
+
        /*
         * We can keep a list of all filesystems with quotas mounted for
         * debugging and statistical purposes, but ...
         * Just take a reference and get out.
         */
-       XFS_QM_HOLD(xfs_Gqm);
-       XFS_QM_UNLOCK(xfs_Gqm);
+       xfs_Gqm->qm_nrefs++;
+       mutex_unlock(&xfs_Gqm_lock);
 
        return 0;
 }
@@ -242,7 +274,7 @@ STATIC void
 xfs_qm_rele_quotafs_ref(
        struct xfs_mount *mp)
 {
-       xfs_dquot_t     *dqp, *nextdqp;
+       xfs_dquot_t     *dqp, *n;
 
        ASSERT(xfs_Gqm);
        ASSERT(xfs_Gqm->qm_nrefs > 0);
@@ -250,88 +282,48 @@ xfs_qm_rele_quotafs_ref(
        /*
         * Go thru the freelist and destroy all inactive dquots.
         */
-       xfs_qm_freelist_lock(xfs_Gqm);
+       mutex_lock(&xfs_Gqm->qm_dqfrlist_lock);
 
-       for (dqp = xfs_Gqm->qm_dqfreelist.qh_next;
-            dqp != (xfs_dquot_t *)&(xfs_Gqm->qm_dqfreelist); ) {
+       list_for_each_entry_safe(dqp, n, &xfs_Gqm->qm_dqfrlist, q_freelist) {
                xfs_dqlock(dqp);
-               nextdqp = dqp->dq_flnext;
                if (dqp->dq_flags & XFS_DQ_INACTIVE) {
                        ASSERT(dqp->q_mount == NULL);
                        ASSERT(! XFS_DQ_IS_DIRTY(dqp));
-                       ASSERT(dqp->HL_PREVP == NULL);
-                       ASSERT(dqp->MPL_PREVP == NULL);
-                       XQM_FREELIST_REMOVE(dqp);
+                       ASSERT(list_empty(&dqp->q_hashlist));
+                       ASSERT(list_empty(&dqp->q_mplist));
+                       list_del_init(&dqp->q_freelist);
+                       xfs_Gqm->qm_dqfrlist_cnt--;
                        xfs_dqunlock(dqp);
                        xfs_qm_dqdestroy(dqp);
                } else {
                        xfs_dqunlock(dqp);
                }
-               dqp = nextdqp;
        }
-       xfs_qm_freelist_unlock(xfs_Gqm);
+       mutex_unlock(&xfs_Gqm->qm_dqfrlist_lock);
 
        /*
         * Destroy the entire XQM. If somebody mounts with quotaon, this'll
         * be restarted.
         */
-       XFS_QM_LOCK(xfs_Gqm);
-       XFS_QM_RELE(xfs_Gqm);
-       if (xfs_Gqm->qm_nrefs == 0) {
+       mutex_lock(&xfs_Gqm_lock);
+       if (--xfs_Gqm->qm_nrefs == 0) {
                xfs_qm_destroy(xfs_Gqm);
                xfs_Gqm = NULL;
        }
-       XFS_QM_UNLOCK(xfs_Gqm);
-}
-
-/*
- * This is called at mount time from xfs_mountfs to initialize the quotainfo
- * structure and start the global quotamanager (xfs_Gqm) if it hasn't done
- * so already. Note that the superblock has not been read in yet.
- */
-void
-xfs_qm_mount_quotainit(
-       xfs_mount_t     *mp,
-       uint            flags)
-{
-       /*
-        * User, projects or group quotas has to be on.
-        */
-       ASSERT(flags & (XFSMNT_UQUOTA | XFSMNT_PQUOTA | XFSMNT_GQUOTA));
-
-       /*
-        * Initialize the flags in the mount structure. From this point
-        * onwards we look at m_qflags to figure out if quotas's ON/OFF, etc.
-        * Note that we enforce nothing if accounting is off.
-        * ie.  XFSMNT_*QUOTA must be ON for XFSMNT_*QUOTAENF.
-        * It isn't necessary to take the quotaoff lock to do this; this is
-        * called from mount.
-        */
-       if (flags & XFSMNT_UQUOTA) {
-               mp->m_qflags |= (XFS_UQUOTA_ACCT | XFS_UQUOTA_ACTIVE);
-               if (flags & XFSMNT_UQUOTAENF)
-                       mp->m_qflags |= XFS_UQUOTA_ENFD;
-       }
-       if (flags & XFSMNT_GQUOTA) {
-               mp->m_qflags |= (XFS_GQUOTA_ACCT | XFS_GQUOTA_ACTIVE);
-               if (flags & XFSMNT_GQUOTAENF)
-                       mp->m_qflags |= XFS_OQUOTA_ENFD;
-       } else if (flags & XFSMNT_PQUOTA) {
-               mp->m_qflags |= (XFS_PQUOTA_ACCT | XFS_PQUOTA_ACTIVE);
-               if (flags & XFSMNT_PQUOTAENF)
-                       mp->m_qflags |= XFS_OQUOTA_ENFD;
-       }
+       mutex_unlock(&xfs_Gqm_lock);
 }
 
 /*
  * Just destroy the quotainfo structure.
  */
 void
-xfs_qm_unmount_quotadestroy(
-       xfs_mount_t     *mp)
+xfs_qm_unmount(
+       struct xfs_mount        *mp)
 {
-       if (mp->m_quotainfo)
+       if (mp->m_quotainfo) {
+               xfs_qm_dqpurge_all(mp, XFS_QMOPT_QUOTALL);
                xfs_qm_destroy_quotainfo(mp);
+       }
 }
 
 
@@ -340,17 +332,17 @@ xfs_qm_unmount_quotadestroy(
  * necessary data structures like quotainfo.  This is also responsible for
  * running a quotacheck as necessary.  We are guaranteed that the superblock
  * is consistently read in at this point.
+ *
+ * If we fail here, the mount will continue with quota turned off. We don't
+ * need to inidicate success or failure at all.
  */
-int
+void
 xfs_qm_mount_quotas(
-       xfs_mount_t     *mp,
-       int             mfsi_flags)
+       xfs_mount_t     *mp)
 {
-       unsigned long   s;
        int             error = 0;
        uint            sbf;
 
-
        /*
         * If quotas on realtime volumes is not supported, we disable
         * quotas immediately.
@@ -369,7 +361,8 @@ xfs_qm_mount_quotas(
         * Allocate the quotainfo structure inside the mount struct, and
         * create quotainode(s), and change/rev superblock if necessary.
         */
-       if ((error = xfs_qm_init_quotainfo(mp))) {
+       error = xfs_qm_init_quotainfo(mp);
+       if (error) {
                /*
                 * We must turn off quotas.
                 */
@@ -380,25 +373,32 @@ xfs_qm_mount_quotas(
        /*
         * If any of the quotas are not consistent, do a quotacheck.
         */
-       if (XFS_QM_NEED_QUOTACHECK(mp) &&
-               !(mfsi_flags & XFS_MFSI_NO_QUOTACHECK)) {
-               if ((error = xfs_qm_quotacheck(mp))) {
-                       /* Quotacheck has failed and quotas have
-                        * been disabled.
-                        */
-                       return XFS_ERROR(error);
+       if (XFS_QM_NEED_QUOTACHECK(mp)) {
+               error = xfs_qm_quotacheck(mp);
+               if (error) {
+                       /* Quotacheck failed and disabled quotas. */
+                       return;
                }
        }
+       /* 
+        * If one type of quotas is off, then it will lose its
+        * quotachecked status, since we won't be doing accounting for
+        * that type anymore.
+        */
+       if (!XFS_IS_UQUOTA_ON(mp))
+               mp->m_qflags &= ~XFS_UQUOTA_CHKD;
+       if (!(XFS_IS_GQUOTA_ON(mp) || XFS_IS_PQUOTA_ON(mp)))
+               mp->m_qflags &= ~XFS_OQUOTA_CHKD;
 
  write_changes:
        /*
-        * We actually don't have to acquire the SB_LOCK at all.
+        * We actually don't have to acquire the m_sb_lock at all.
         * This can only be called from mount, and that's single threaded. XXX
         */
-       s = XFS_SB_LOCK(mp);
+       spin_lock(&mp->m_sb_lock);
        sbf = mp->m_sb.sb_qflags;
        mp->m_sb.sb_qflags = mp->m_qflags & XFS_MOUNT_QUOTA_ALL;
-       XFS_SB_UNLOCK(mp, s);
+       spin_unlock(&mp->m_sb_lock);
 
        if (sbf != (mp->m_qflags & XFS_MOUNT_QUOTA_ALL)) {
                if (xfs_qm_write_sb_changes(mp, XFS_SB_QFLAGS)) {
@@ -417,20 +417,22 @@ xfs_qm_mount_quotas(
        if (error) {
                xfs_fs_cmn_err(CE_WARN, mp,
                        "Failed to initialize disk quotas.");
+               return;
        }
-       return XFS_ERROR(error);
+
+#ifdef QUOTADEBUG
+       if (XFS_IS_QUOTA_ON(mp))
+               xfs_qm_internalqcheck(mp);
+#endif
 }
 
 /*
  * Called from the vfsops layer.
  */
-int
+void
 xfs_qm_unmount_quotas(
        xfs_mount_t     *mp)
 {
-       xfs_inode_t     *uqp, *gqp;
-       int             error = 0;
-
        /*
         * Release the dquots that root inode, et al might be holding,
         * before we flush quotas and blow away the quotainfo structure.
@@ -443,43 +445,18 @@ xfs_qm_unmount_quotas(
                xfs_qm_dqdetach(mp->m_rsumip);
 
        /*
-        * Flush out the quota inodes.
+        * Release the quota inodes.
         */
-       uqp = gqp = NULL;
        if (mp->m_quotainfo) {
-               if ((uqp = mp->m_quotainfo->qi_uquotaip) != NULL) {
-                       xfs_ilock(uqp, XFS_ILOCK_EXCL);
-                       xfs_iflock(uqp);
-                       error = xfs_iflush(uqp, XFS_IFLUSH_SYNC);
-                       xfs_iunlock(uqp, XFS_ILOCK_EXCL);
-                       if (unlikely(error == EFSCORRUPTED)) {
-                               XFS_ERROR_REPORT("xfs_qm_unmount_quotas(1)",
-                                                XFS_ERRLEVEL_LOW, mp);
-                               goto out;
-                       }
+               if (mp->m_quotainfo->qi_uquotaip) {
+                       IRELE(mp->m_quotainfo->qi_uquotaip);
+                       mp->m_quotainfo->qi_uquotaip = NULL;
                }
-               if ((gqp = mp->m_quotainfo->qi_gquotaip) != NULL) {
-                       xfs_ilock(gqp, XFS_ILOCK_EXCL);
-                       xfs_iflock(gqp);
-                       error = xfs_iflush(gqp, XFS_IFLUSH_SYNC);
-                       xfs_iunlock(gqp, XFS_ILOCK_EXCL);
-                       if (unlikely(error == EFSCORRUPTED)) {
-                               XFS_ERROR_REPORT("xfs_qm_unmount_quotas(2)",
-                                                XFS_ERRLEVEL_LOW, mp);
-                               goto out;
-                       }
+               if (mp->m_quotainfo->qi_gquotaip) {
+                       IRELE(mp->m_quotainfo->qi_gquotaip);
+                       mp->m_quotainfo->qi_gquotaip = NULL;
                }
        }
-       if (uqp) {
-                XFS_PURGE_INODE(uqp);
-                mp->m_quotainfo->qi_uquotaip = NULL;
-       }
-       if (gqp) {
-               XFS_PURGE_INODE(gqp);
-               mp->m_quotainfo->qi_gquotaip = NULL;
-       }
-out:
-       return XFS_ERROR(error);
 }
 
 /*
@@ -488,29 +465,30 @@ out:
  */
 STATIC int
 xfs_qm_dqflush_all(
-       xfs_mount_t     *mp,
-       int             flags)
+       struct xfs_mount        *mp,
+       int                     sync_mode)
 {
-       int             recl;
-       xfs_dquot_t     *dqp;
-       int             niters;
-       int             error;
+       struct xfs_quotainfo    *q = mp->m_quotainfo;
+       int                     recl;
+       struct xfs_dquot        *dqp;
+       int                     niters;
+       int                     error;
 
-       if (mp->m_quotainfo == NULL)
+       if (!q)
                return 0;
        niters = 0;
 again:
-       xfs_qm_mplist_lock(mp);
-       FOREACH_DQUOT_IN_MP(dqp, mp) {
+       mutex_lock(&q->qi_dqlist_lock);
+       list_for_each_entry(dqp, &q->qi_dqlist, q_mplist) {
                xfs_dqlock(dqp);
                if (! XFS_DQ_IS_DIRTY(dqp)) {
                        xfs_dqunlock(dqp);
                        continue;
                }
-               xfs_dqtrace_entry(dqp, "FLUSHALL: DQDIRTY");
+
                /* XXX a sentinel would be better */
-               recl = XFS_QI_MPLRECLAIMS(mp);
-               if (! xfs_qm_dqflock_nowait(dqp)) {
+               recl = q->qi_dqreclaims;
+               if (!xfs_dqflock_nowait(dqp)) {
                        /*
                         * If we can't grab the flush lock then check
                         * to see if the dquot has been flushed delayed
@@ -524,21 +502,21 @@ again:
                 * Let go of the mplist lock. We don't want to hold it
                 * across a disk write.
                 */
-               xfs_qm_mplist_unlock(mp);
-               error = xfs_qm_dqflush(dqp, flags);
+               mutex_unlock(&q->qi_dqlist_lock);
+               error = xfs_qm_dqflush(dqp, sync_mode);
                xfs_dqunlock(dqp);
                if (error)
                        return error;
 
-               xfs_qm_mplist_lock(mp);
-               if (recl != XFS_QI_MPLRECLAIMS(mp)) {
-                       xfs_qm_mplist_unlock(mp);
+               mutex_lock(&q->qi_dqlist_lock);
+               if (recl != q->qi_dqreclaims) {
+                       mutex_unlock(&q->qi_dqlist_lock);
                        /* XXX restart limit */
                        goto again;
                }
        }
 
-       xfs_qm_mplist_unlock(mp);
+       mutex_unlock(&q->qi_dqlist_lock);
        /* return ! busy */
        return 0;
 }
@@ -548,15 +526,15 @@ again:
  */
 STATIC void
 xfs_qm_detach_gdquots(
-       xfs_mount_t     *mp)
+       struct xfs_mount        *mp)
 {
-       xfs_dquot_t     *dqp, *gdqp;
-       int             nrecl;
+       struct xfs_quotainfo    *q = mp->m_quotainfo;
+       struct xfs_dquot        *dqp, *gdqp;
+       int                     nrecl;
 
  again:
-       ASSERT(XFS_QM_IS_MPLIST_LOCKED(mp));
-       dqp = XFS_QI_MPLNEXT(mp);
-       while (dqp) {
+       ASSERT(mutex_is_locked(&q->qi_dqlist_lock));
+       list_for_each_entry(dqp, &q->qi_dqlist, q_mplist) {
                xfs_dqlock(dqp);
                if ((gdqp = dqp->q_gdquot)) {
                        xfs_dqlock(gdqp);
@@ -569,15 +547,14 @@ xfs_qm_detach_gdquots(
                         * Can't hold the mplist lock across a dqput.
                         * XXXmust convert to marker based iterations here.
                         */
-                       nrecl = XFS_QI_MPLRECLAIMS(mp);
-                       xfs_qm_mplist_unlock(mp);
+                       nrecl = q->qi_dqreclaims;
+                       mutex_unlock(&q->qi_dqlist_lock);
                        xfs_qm_dqput(gdqp);
 
-                       xfs_qm_mplist_lock(mp);
-                       if (nrecl != XFS_QI_MPLRECLAIMS(mp))
+                       mutex_lock(&q->qi_dqlist_lock);
+                       if (nrecl != q->qi_dqreclaims)
                                goto again;
                }
-               dqp = dqp->MPL_NEXT;
        }
 }
 
@@ -589,23 +566,23 @@ xfs_qm_detach_gdquots(
  */
 STATIC int
 xfs_qm_dqpurge_int(
-       xfs_mount_t     *mp,
-       uint            flags) /* QUOTAOFF/UMOUNTING/UQUOTA/PQUOTA/GQUOTA */
+       struct xfs_mount        *mp,
+       uint                    flags)
 {
-       xfs_dquot_t     *dqp;
-       uint            dqtype;
-       int             nrecl;
-       xfs_dquot_t     *nextdqp;
-       int             nmisses;
+       struct xfs_quotainfo    *q = mp->m_quotainfo;
+       struct xfs_dquot        *dqp, *n;
+       uint                    dqtype;
+       int                     nrecl;
+       int                     nmisses;
 
-       if (mp->m_quotainfo == NULL)
+       if (!q)
                return 0;
 
        dqtype = (flags & XFS_QMOPT_UQUOTA) ? XFS_DQ_USER : 0;
        dqtype |= (flags & XFS_QMOPT_PQUOTA) ? XFS_DQ_PROJ : 0;
        dqtype |= (flags & XFS_QMOPT_GQUOTA) ? XFS_DQ_GROUP : 0;
 
-       xfs_qm_mplist_lock(mp);
+       mutex_lock(&q->qi_dqlist_lock);
 
        /*
         * In the first pass through all incore dquots of this filesystem,
@@ -617,28 +594,25 @@ xfs_qm_dqpurge_int(
 
       again:
        nmisses = 0;
-       ASSERT(XFS_QM_IS_MPLIST_LOCKED(mp));
+       ASSERT(mutex_is_locked(&q->qi_dqlist_lock));
        /*
         * Try to get rid of all of the unwanted dquots. The idea is to
         * get them off mplist and hashlist, but leave them on freelist.
         */
-       dqp = XFS_QI_MPLNEXT(mp);
-       while (dqp) {
+       list_for_each_entry_safe(dqp, n, &q->qi_dqlist, q_mplist) {
                /*
                 * It's OK to look at the type without taking dqlock here.
                 * We're holding the mplist lock here, and that's needed for
                 * a dqreclaim.
                 */
-               if ((dqp->dq_flags & dqtype) == 0) {
-                       dqp = dqp->MPL_NEXT;
+               if ((dqp->dq_flags & dqtype) == 0)
                        continue;
-               }
 
-               if (! xfs_qm_dqhashlock_nowait(dqp)) {
-                       nrecl = XFS_QI_MPLRECLAIMS(mp);
-                       xfs_qm_mplist_unlock(mp);
-                       XFS_DQ_HASH_LOCK(dqp->q_hash);
-                       xfs_qm_mplist_lock(mp);
+               if (!mutex_trylock(&dqp->q_hash->qh_lock)) {
+                       nrecl = q->qi_dqreclaims;
+                       mutex_unlock(&q->qi_dqlist_lock);
+                       mutex_lock(&dqp->q_hash->qh_lock);
+                       mutex_lock(&q->qi_dqlist_lock);
 
                        /*
                         * XXXTheoretically, we can get into a very long
@@ -646,8 +620,8 @@ xfs_qm_dqpurge_int(
                         * No one can be adding dquots to the mplist at
                         * this point, but somebody might be taking things off.
                         */
-                       if (nrecl != XFS_QI_MPLRECLAIMS(mp)) {
-                               XFS_DQ_HASH_UNLOCK(dqp->q_hash);
+                       if (nrecl != q->qi_dqreclaims) {
+                               mutex_unlock(&dqp->q_hash->qh_lock);
                                goto again;
                        }
                }
@@ -656,11 +630,9 @@ xfs_qm_dqpurge_int(
                 * Take the dquot off the mplist and hashlist. It may remain on
                 * freelist in INACTIVE state.
                 */
-               nextdqp = dqp->MPL_NEXT;
-               nmisses += xfs_qm_dqpurge(dqp, flags);
-               dqp = nextdqp;
+               nmisses += xfs_qm_dqpurge(dqp);
        }
-       xfs_qm_mplist_unlock(mp);
+       mutex_unlock(&q->qi_dqlist_lock);
        return nmisses;
 }
 
@@ -689,25 +661,24 @@ xfs_qm_dqattach_one(
        xfs_dqid_t      id,
        uint            type,
        uint            doalloc,
-       uint            dolock,
        xfs_dquot_t     *udqhint, /* hint */
        xfs_dquot_t     **IO_idqpp)
 {
        xfs_dquot_t     *dqp;
        int             error;
 
-       ASSERT(XFS_ISLOCKED_INODE_EXCL(ip));
+       ASSERT(xfs_isilocked(ip, XFS_ILOCK_EXCL));
        error = 0;
+
        /*
         * See if we already have it in the inode itself. IO_idqpp is
         * &i_udquot or &i_gdquot. This made the code look weird, but
         * made the logic a lot simpler.
         */
-       if ((dqp = *IO_idqpp)) {
-               if (dolock)
-                       xfs_dqlock(dqp);
-               xfs_dqtrace_entry(dqp, "DQATTACH: found in ip");
-               goto done;
+       dqp = *IO_idqpp;
+       if (dqp) {
+               trace_xfs_dqattach_found(dqp);
+               return 0;
        }
 
        /*
@@ -716,38 +687,38 @@ xfs_qm_dqattach_one(
         * lookup by dqid (xfs_qm_dqget) by caching a group dquot inside
         * the user dquot.
         */
-       ASSERT(!udqhint || type == XFS_DQ_GROUP || type == XFS_DQ_PROJ);
-       if (udqhint && !dolock)
+       if (udqhint) {
+               ASSERT(type == XFS_DQ_GROUP || type == XFS_DQ_PROJ);
                xfs_dqlock(udqhint);
 
-       /*
-        * No need to take dqlock to look at the id.
-        * The ID can't change until it gets reclaimed, and it won't
-        * be reclaimed as long as we have a ref from inode and we hold
-        * the ilock.
-        */
-       if (udqhint &&
-           (dqp = udqhint->q_gdquot) &&
-           (be32_to_cpu(dqp->q_core.d_id) == id)) {
-               ASSERT(XFS_DQ_IS_LOCKED(udqhint));
-               xfs_dqlock(dqp);
-               XFS_DQHOLD(dqp);
-               ASSERT(*IO_idqpp == NULL);
-               *IO_idqpp = dqp;
-               if (!dolock) {
+               /*
+                * No need to take dqlock to look at the id.
+                *
+                * The ID can't change until it gets reclaimed, and it won't
+                * be reclaimed as long as we have a ref from inode and we
+                * hold the ilock.
+                */
+               dqp = udqhint->q_gdquot;
+               if (dqp && be32_to_cpu(dqp->q_core.d_id) == id) {
+                       xfs_dqlock(dqp);
+                       XFS_DQHOLD(dqp);
+                       ASSERT(*IO_idqpp == NULL);
+                       *IO_idqpp = dqp;
+
                        xfs_dqunlock(dqp);
                        xfs_dqunlock(udqhint);
+                       return 0;
                }
-               goto done;
-       }
-       /*
-        * We can't hold a dquot lock when we call the dqget code.
-        * We'll deadlock in no time, because of (not conforming to)
-        * lock ordering - the inodelock comes before any dquot lock,
-        * and we may drop and reacquire the ilock in xfs_qm_dqget().
-        */
-       if (udqhint)
+
+               /*
+                * We can't hold a dquot lock when we call the dqget code.
+                * We'll deadlock in no time, because of (not conforming to)
+                * lock ordering - the inodelock comes before any dquot lock,
+                * and we may drop and reacquire the ilock in xfs_qm_dqget().
+                */
                xfs_dqunlock(udqhint);
+       }
+
        /*
         * Find the dquot from somewhere. This bumps the
         * reference count of dquot and returns it locked.
@@ -755,48 +726,19 @@ xfs_qm_dqattach_one(
         * disk and we didn't ask it to allocate;
         * ESRCH if quotas got turned off suddenly.
         */
-       if ((error = xfs_qm_dqget(ip->i_mount, ip, id, type,
-                                doalloc|XFS_QMOPT_DOWARN, &dqp))) {
-               if (udqhint && dolock)
-                       xfs_dqlock(udqhint);
-               goto done;
-       }
+       error = xfs_qm_dqget(ip->i_mount, ip, id, type, XFS_QMOPT_DOWARN, &dqp);
+       if (error)
+               return error;
+
+       trace_xfs_dqattach_get(dqp);
 
-       xfs_dqtrace_entry(dqp, "DQATTACH: found by dqget");
        /*
         * dqget may have dropped and re-acquired the ilock, but it guarantees
         * that the dquot returned is the one that should go in the inode.
         */
        *IO_idqpp = dqp;
-       ASSERT(dqp);
-       ASSERT(XFS_DQ_IS_LOCKED(dqp));
-       if (! dolock) {
-               xfs_dqunlock(dqp);
-               goto done;
-       }
-       if (! udqhint)
-               goto done;
-
-       ASSERT(udqhint);
-       ASSERT(dolock);
-       ASSERT(XFS_DQ_IS_LOCKED(dqp));
-       if (! xfs_qm_dqlock_nowait(udqhint)) {
-               xfs_dqunlock(dqp);
-               xfs_dqlock(udqhint);
-               xfs_dqlock(dqp);
-       }
-      done:
-#ifdef QUOTADEBUG
-       if (udqhint) {
-               if (dolock)
-                       ASSERT(XFS_DQ_IS_LOCKED(udqhint));
-       }
-       if (! error) {
-               if (dolock)
-                       ASSERT(XFS_DQ_IS_LOCKED(dqp));
-       }
-#endif
-       return error;
+       xfs_dqunlock(dqp);
+       return 0;
 }
 
 
@@ -804,31 +746,22 @@ xfs_qm_dqattach_one(
  * Given a udquot and gdquot, attach a ptr to the group dquot in the
  * udquot as a hint for future lookups. The idea sounds simple, but the
  * execution isn't, because the udquot might have a group dquot attached
- * already and getting rid of that gets us into lock ordering contraints.
+ * already and getting rid of that gets us into lock ordering constraints.
  * The process is complicated more by the fact that the dquots may or may not
  * be locked on entry.
  */
 STATIC void
 xfs_qm_dqattach_grouphint(
        xfs_dquot_t     *udq,
-       xfs_dquot_t     *gdq,
-       uint            locked)
+       xfs_dquot_t     *gdq)
 {
        xfs_dquot_t     *tmp;
 
-#ifdef QUOTADEBUG
-       if (locked) {
-               ASSERT(XFS_DQ_IS_LOCKED(udq));
-               ASSERT(XFS_DQ_IS_LOCKED(gdq));
-       }
-#endif
-       if (! locked)
-               xfs_dqlock(udq);
+       xfs_dqlock(udq);
 
        if ((tmp = udq->q_gdquot)) {
                if (tmp == gdq) {
-                       if (! locked)
-                               xfs_dqunlock(udq);
+                       xfs_dqunlock(udq);
                        return;
                }
 
@@ -838,8 +771,6 @@ xfs_qm_dqattach_grouphint(
                 * because the freelist lock comes before dqlocks.
                 */
                xfs_dqunlock(udq);
-               if (locked)
-                       xfs_dqunlock(gdq);
                /*
                 * we took a hard reference once upon a time in dqget,
                 * so give it back when the udquot no longer points at it
@@ -852,9 +783,7 @@ xfs_qm_dqattach_grouphint(
 
        } else {
                ASSERT(XFS_DQ_IS_LOCKED(udq));
-               if (! locked) {
-                       xfs_dqlock(gdq);
-               }
+               xfs_dqlock(gdq);
        }
 
        ASSERT(XFS_DQ_IS_LOCKED(udq));
@@ -867,10 +796,9 @@ xfs_qm_dqattach_grouphint(
                XFS_DQHOLD(gdq);
                udq->q_gdquot = gdq;
        }
-       if (! locked) {
-               xfs_dqunlock(gdq);
-               xfs_dqunlock(udq);
-       }
+
+       xfs_dqunlock(gdq);
+       xfs_dqunlock(udq);
 }
 
 
@@ -878,14 +806,11 @@ xfs_qm_dqattach_grouphint(
  * Given a locked inode, attach dquot(s) to it, taking U/G/P-QUOTAON
  * into account.
  * If XFS_QMOPT_DQALLOC, the dquot(s) will be allocated if needed.
- * If XFS_QMOPT_DQLOCK, the dquot(s) will be returned locked. This option pretty
- * much made this code a complete mess, but it has been pretty useful.
- * If XFS_QMOPT_ILOCKED, then inode sent is already locked EXCL.
  * Inode may get unlocked and relocked in here, and the caller must deal with
  * the consequences.
  */
 int
-xfs_qm_dqattach(
+xfs_qm_dqattach_locked(
        xfs_inode_t     *ip,
        uint            flags)
 {
@@ -893,37 +818,32 @@ xfs_qm_dqattach(
        uint            nquotas = 0;
        int             error = 0;
 
-       if ((! XFS_IS_QUOTA_ON(mp)) ||
-           (! XFS_NOT_DQATTACHED(mp, ip)) ||
-           (ip->i_ino == mp->m_sb.sb_uquotino) ||
-           (ip->i_ino == mp->m_sb.sb_gquotino))
+       if (!XFS_IS_QUOTA_RUNNING(mp) ||
+           !XFS_IS_QUOTA_ON(mp) ||
+           !XFS_NOT_DQATTACHED(mp, ip) ||
+           ip->i_ino == mp->m_sb.sb_uquotino ||
+           ip->i_ino == mp->m_sb.sb_gquotino)
                return 0;
 
-       ASSERT((flags & XFS_QMOPT_ILOCKED) == 0 ||
-              XFS_ISLOCKED_INODE_EXCL(ip));
-
-       if (! (flags & XFS_QMOPT_ILOCKED))
-               xfs_ilock(ip, XFS_ILOCK_EXCL);
+       ASSERT(xfs_isilocked(ip, XFS_ILOCK_EXCL));
 
        if (XFS_IS_UQUOTA_ON(mp)) {
                error = xfs_qm_dqattach_one(ip, ip->i_d.di_uid, XFS_DQ_USER,
                                                flags & XFS_QMOPT_DQALLOC,
-                                               flags & XFS_QMOPT_DQLOCK,
                                                NULL, &ip->i_udquot);
                if (error)
                        goto done;
                nquotas++;
        }
-       ASSERT(XFS_ISLOCKED_INODE_EXCL(ip));
+
+       ASSERT(xfs_isilocked(ip, XFS_ILOCK_EXCL));
        if (XFS_IS_OQUOTA_ON(mp)) {
                error = XFS_IS_GQUOTA_ON(mp) ?
                        xfs_qm_dqattach_one(ip, ip->i_d.di_gid, XFS_DQ_GROUP,
                                                flags & XFS_QMOPT_DQALLOC,
-                                               flags & XFS_QMOPT_DQLOCK,
                                                ip->i_udquot, &ip->i_gdquot) :
                        xfs_qm_dqattach_one(ip, ip->i_d.di_projid, XFS_DQ_PROJ,
                                                flags & XFS_QMOPT_DQALLOC,
-                                               flags & XFS_QMOPT_DQLOCK,
                                                ip->i_udquot, &ip->i_gdquot);
                /*
                 * Don't worry about the udquot that we may have
@@ -939,7 +859,7 @@ xfs_qm_dqattach(
         * This WON'T, in general, result in a thrash.
         */
        if (nquotas == 2) {
-               ASSERT(XFS_ISLOCKED_INODE_EXCL(ip));
+               ASSERT(xfs_isilocked(ip, XFS_ILOCK_EXCL));
                ASSERT(ip->i_udquot);
                ASSERT(ip->i_gdquot);
 
@@ -954,36 +874,33 @@ xfs_qm_dqattach(
                /*
                 * Attach i_gdquot to the gdquot hint inside the i_udquot.
                 */
-               xfs_qm_dqattach_grouphint(ip->i_udquot, ip->i_gdquot,
-                                        flags & XFS_QMOPT_DQLOCK);
+               xfs_qm_dqattach_grouphint(ip->i_udquot, ip->i_gdquot);
        }
 
-      done:
-
+ done:
 #ifdef QUOTADEBUG
        if (! error) {
-               if (ip->i_udquot) {
-                       if (flags & XFS_QMOPT_DQLOCK)
-                               ASSERT(XFS_DQ_IS_LOCKED(ip->i_udquot));
-               }
-               if (ip->i_gdquot) {
-                       if (flags & XFS_QMOPT_DQLOCK)
-                               ASSERT(XFS_DQ_IS_LOCKED(ip->i_gdquot));
-               }
                if (XFS_IS_UQUOTA_ON(mp))
                        ASSERT(ip->i_udquot);
                if (XFS_IS_OQUOTA_ON(mp))
                        ASSERT(ip->i_gdquot);
        }
+       ASSERT(xfs_isilocked(ip, XFS_ILOCK_EXCL));
 #endif
+       return error;
+}
 
-       if (! (flags & XFS_QMOPT_ILOCKED))
-               xfs_iunlock(ip, XFS_ILOCK_EXCL);
+int
+xfs_qm_dqattach(
+       struct xfs_inode        *ip,
+       uint                    flags)
+{
+       int                     error;
+
+       xfs_ilock(ip, XFS_ILOCK_EXCL);
+       error = xfs_qm_dqattach_locked(ip, flags);
+       xfs_iunlock(ip, XFS_ILOCK_EXCL);
 
-#ifdef QUOTADEBUG
-       else
-               ASSERT(XFS_ISLOCKED_INODE_EXCL(ip));
-#endif
        return error;
 }
 
@@ -999,76 +916,58 @@ xfs_qm_dqdetach(
        if (!(ip->i_udquot || ip->i_gdquot))
                return;
 
+       trace_xfs_dquot_dqdetach(ip);
+
        ASSERT(ip->i_ino != ip->i_mount->m_sb.sb_uquotino);
        ASSERT(ip->i_ino != ip->i_mount->m_sb.sb_gquotino);
        if (ip->i_udquot) {
-               xfs_dqtrace_entry_ino(ip->i_udquot, "DQDETTACH", ip);
                xfs_qm_dqrele(ip->i_udquot);
                ip->i_udquot = NULL;
        }
        if (ip->i_gdquot) {
-               xfs_dqtrace_entry_ino(ip->i_gdquot, "DQDETTACH", ip);
                xfs_qm_dqrele(ip->i_gdquot);
                ip->i_gdquot = NULL;
        }
 }
 
-/*
- * This is called by VFS_SYNC and flags arg determines the caller,
- * and its motives, as done in xfs_sync.
- *
- * vfs_sync: SYNC_FSDATA|SYNC_ATTR|SYNC_BDFLUSH 0x31
- * syscall sync: SYNC_FSDATA|SYNC_ATTR|SYNC_DELWRI 0x25
- * umountroot : SYNC_WAIT | SYNC_CLOSE | SYNC_ATTR | SYNC_FSDATA
- */
-
 int
 xfs_qm_sync(
-       xfs_mount_t     *mp,
-       short           flags)
+       struct xfs_mount        *mp,
+       int                     flags)
 {
-       int             recl, restarts;
-       xfs_dquot_t     *dqp;
-       uint            flush_flags;
-       boolean_t       nowait;
-       int             error;
+       struct xfs_quotainfo    *q = mp->m_quotainfo;
+       int                     recl, restarts;
+       struct xfs_dquot        *dqp;
+       int                     error;
+
+       if (!XFS_IS_QUOTA_RUNNING(mp) || !XFS_IS_QUOTA_ON(mp))
+               return 0;
 
        restarts = 0;
-       /*
-        * We won't block unless we are asked to.
-        */
-       nowait = (boolean_t)(flags & SYNC_BDFLUSH || (flags & SYNC_WAIT) == 0);
 
   again:
-       xfs_qm_mplist_lock(mp);
+       mutex_lock(&q->qi_dqlist_lock);
        /*
         * dqpurge_all() also takes the mplist lock and iterate thru all dquots
         * in quotaoff. However, if the QUOTA_ACTIVE bits are not cleared
         * when we have the mplist lock, we know that dquots will be consistent
         * as long as we have it locked.
         */
-       if (! XFS_IS_QUOTA_ON(mp)) {
-               xfs_qm_mplist_unlock(mp);
+       if (!XFS_IS_QUOTA_ON(mp)) {
+               mutex_unlock(&q->qi_dqlist_lock);
                return 0;
        }
-       FOREACH_DQUOT_IN_MP(dqp, mp) {
+       ASSERT(mutex_is_locked(&q->qi_dqlist_lock));
+       list_for_each_entry(dqp, &q->qi_dqlist, q_mplist) {
                /*
                 * If this is vfs_sync calling, then skip the dquots that
                 * don't 'seem' to be dirty. ie. don't acquire dqlock.
                 * This is very similar to what xfs_sync does with inodes.
                 */
-               if (flags & SYNC_BDFLUSH) {
-                       if (! XFS_DQ_IS_DIRTY(dqp))
+               if (flags & SYNC_TRYLOCK) {
+                       if (!XFS_DQ_IS_DIRTY(dqp))
                                continue;
-               }
-
-               if (nowait) {
-                       /*
-                        * Try to acquire the dquot lock. We are NOT out of
-                        * lock order, but we just don't want to wait for this
-                        * lock, unless somebody wanted us to.
-                        */
-                       if (! xfs_qm_dqlock_nowait(dqp))
+                       if (!xfs_qm_dqlock_nowait(dqp))
                                continue;
                } else {
                        xfs_dqlock(dqp);
@@ -1083,18 +982,18 @@ xfs_qm_sync(
                }
 
                /* XXX a sentinel would be better */
-               recl = XFS_QI_MPLRECLAIMS(mp);
-               if (! xfs_qm_dqflock_nowait(dqp)) {
-                       if (nowait) {
+               recl = q->qi_dqreclaims;
+               if (!xfs_dqflock_nowait(dqp)) {
+                       if (flags & SYNC_TRYLOCK) {
                                xfs_dqunlock(dqp);
                                continue;
                        }
                        /*
                         * If we can't grab the flush lock then if the caller
-                        * really wanted us to give this our best shot,
+                        * really wanted us to give this our best shot, so
                         * see if we can give a push to the buffer before we wait
                         * on the flush lock. At this point, we know that
-                        * eventhough the dquot is being flushed,
+                        * even though the dquot is being flushed,
                         * it has (new) dirty data.
                         */
                        xfs_qm_dqflock_pushbuf_wait(dqp);
@@ -1103,30 +1002,35 @@ xfs_qm_sync(
                 * Let go of the mplist lock. We don't want to hold it
                 * across a disk write
                 */
-               flush_flags = (nowait) ? XFS_QMOPT_DELWRI : XFS_QMOPT_SYNC;
-               xfs_qm_mplist_unlock(mp);
-               xfs_dqtrace_entry(dqp, "XQM_SYNC: DQFLUSH");
-               error = xfs_qm_dqflush(dqp, flush_flags);
+               mutex_unlock(&q->qi_dqlist_lock);
+               error = xfs_qm_dqflush(dqp, flags);
                xfs_dqunlock(dqp);
                if (error && XFS_FORCED_SHUTDOWN(mp))
                        return 0;       /* Need to prevent umount failure */
                else if (error)
                        return error;
 
-               xfs_qm_mplist_lock(mp);
-               if (recl != XFS_QI_MPLRECLAIMS(mp)) {
+               mutex_lock(&q->qi_dqlist_lock);
+               if (recl != q->qi_dqreclaims) {
                        if (++restarts >= XFS_QM_SYNC_MAX_RESTARTS)
                                break;
 
-                       xfs_qm_mplist_unlock(mp);
+                       mutex_unlock(&q->qi_dqlist_lock);
                        goto again;
                }
        }
 
-       xfs_qm_mplist_unlock(mp);
+       mutex_unlock(&q->qi_dqlist_lock);
        return 0;
 }
 
+/*
+ * The hash chains and the mplist use the same xfs_dqhash structure as
+ * their list head, but we can take the mplist qh_lock and one of the
+ * hash qh_locks at the same time without any problem as they aren't
+ * related.
+ */
+static struct lock_class_key xfs_quota_mplist_class;
 
 /*
  * This initializes all the quota information that's kept in the
@@ -1156,13 +1060,15 @@ xfs_qm_init_quotainfo(
         * and change the superblock accordingly.
         */
        if ((error = xfs_qm_init_quotainos(mp))) {
-               kmem_free(qinf, sizeof(xfs_quotainfo_t));
+               kmem_free(qinf);
                mp->m_quotainfo = NULL;
                return error;
        }
 
-       spinlock_init(&qinf->qi_pinlock, "xfs_qinf_pin");
-       xfs_qm_list_init(&qinf->qi_dqlist, "mpdqlist", 0);
+       INIT_LIST_HEAD(&qinf->qi_dqlist);
+       mutex_init(&qinf->qi_dqlist_lock);
+       lockdep_set_class(&qinf->qi_dqlist_lock, &xfs_quota_mplist_class);
+
        qinf->qi_dqreclaims = 0;
 
        /* mutex used to serialize quotaoffs */
@@ -1258,19 +1164,19 @@ xfs_qm_destroy_quotainfo(
         */
        xfs_qm_rele_quotafs_ref(mp);
 
-       spinlock_destroy(&qi->qi_pinlock);
-       xfs_qm_list_destroy(&qi->qi_dqlist);
+       ASSERT(list_empty(&qi->qi_dqlist));
+       mutex_destroy(&qi->qi_dqlist_lock);
 
        if (qi->qi_uquotaip) {
-               XFS_PURGE_INODE(qi->qi_uquotaip);
+               IRELE(qi->qi_uquotaip);
                qi->qi_uquotaip = NULL; /* paranoia */
        }
        if (qi->qi_gquotaip) {
-               XFS_PURGE_INODE(qi->qi_gquotaip);
+               IRELE(qi->qi_gquotaip);
                qi->qi_gquotaip = NULL;
        }
        mutex_destroy(&qi->qi_quotaofflock);
-       kmem_free(qi, sizeof(xfs_quotainfo_t));
+       kmem_free(qi);
        mp->m_quotainfo = NULL;
 }
 
@@ -1286,7 +1192,7 @@ xfs_qm_list_init(
        int             n)
 {
        mutex_init(&list->qh_lock);
-       list->qh_next = NULL;
+       INIT_LIST_HEAD(&list->qh_list);
        list->qh_version = 0;
        list->qh_nelems = 0;
 }
@@ -1314,7 +1220,7 @@ xfs_qm_dqget_noattach(
        xfs_mount_t     *mp;
        xfs_dquot_t     *udqp, *gdqp;
 
-       ASSERT(XFS_ISLOCKED_INODE_EXCL(ip));
+       ASSERT(xfs_isilocked(ip, XFS_ILOCK_EXCL));
        mp = ip->i_mount;
        udqp = NULL;
        gdqp = NULL;
@@ -1392,9 +1298,6 @@ xfs_qm_qino_alloc(
 {
        xfs_trans_t     *tp;
        int             error;
-       unsigned long   s;
-       cred_t          zerocr;
-       xfs_inode_t     zeroino;
        int             committed;
 
        tp = xfs_trans_alloc(mp, XFS_TRANS_QM_QINOCREATE);
@@ -1406,11 +1309,9 @@ xfs_qm_qino_alloc(
                xfs_trans_cancel(tp, 0);
                return error;
        }
-       memset(&zerocr, 0, sizeof(zerocr));
-       memset(&zeroino, 0, sizeof(zeroino));
 
-       if ((error = xfs_dir_ialloc(&tp, &zeroino, S_IFREG, 1, 0,
-                                  &zerocr, 0, 1, ip, &committed))) {
+       if ((error = xfs_dir_ialloc(&tp, NULL, S_IFREG, 1, 0,
+                                  &xfs_zerocr, 0, 1, ip, &committed))) {
                xfs_trans_cancel(tp, XFS_TRANS_RELEASE_LOG_RES |
                                 XFS_TRANS_ABORT);
                return error;
@@ -1420,46 +1321,37 @@ xfs_qm_qino_alloc(
         * Keep an extra reference to this quota inode. This inode is
         * locked exclusively and joined to the transaction already.
         */
-       ASSERT(XFS_ISLOCKED_INODE_EXCL(*ip));
-       VN_HOLD(XFS_ITOV((*ip)));
+       ASSERT(xfs_isilocked(*ip, XFS_ILOCK_EXCL));
+       IHOLD(*ip);
 
        /*
         * Make the changes in the superblock, and log those too.
         * sbfields arg may contain fields other than *QUOTINO;
         * VERSIONNUM for example.
         */
-       s = XFS_SB_LOCK(mp);
+       spin_lock(&mp->m_sb_lock);
        if (flags & XFS_QMOPT_SBVERSION) {
-#if defined(DEBUG) && defined(XFS_LOUD_RECOVERY)
-               unsigned oldv = mp->m_sb.sb_versionnum;
-#endif
-               ASSERT(!XFS_SB_VERSION_HASQUOTA(&mp->m_sb));
+               ASSERT(!xfs_sb_version_hasquota(&mp->m_sb));
                ASSERT((sbfields & (XFS_SB_VERSIONNUM | XFS_SB_UQUOTINO |
                                   XFS_SB_GQUOTINO | XFS_SB_QFLAGS)) ==
                       (XFS_SB_VERSIONNUM | XFS_SB_UQUOTINO |
                        XFS_SB_GQUOTINO | XFS_SB_QFLAGS));
 
-               XFS_SB_VERSION_ADDQUOTA(&mp->m_sb);
+               xfs_sb_version_addquota(&mp->m_sb);
                mp->m_sb.sb_uquotino = NULLFSINO;
                mp->m_sb.sb_gquotino = NULLFSINO;
 
                /* qflags will get updated _after_ quotacheck */
                mp->m_sb.sb_qflags = 0;
-#if defined(DEBUG) && defined(XFS_LOUD_RECOVERY)
-               cmn_err(CE_NOTE,
-                       "Old superblock version %x, converting to %x.",
-                       oldv, mp->m_sb.sb_versionnum);
-#endif
        }
        if (flags & XFS_QMOPT_UQUOTA)
                mp->m_sb.sb_uquotino = (*ip)->i_ino;
        else
                mp->m_sb.sb_gquotino = (*ip)->i_ino;
-       XFS_SB_UNLOCK(mp, s);
+       spin_unlock(&mp->m_sb_lock);
        xfs_mod_sb(tp, sbfields);
 
-       if ((error = xfs_trans_commit(tp, XFS_TRANS_RELEASE_LOG_RES,
-                                    NULL))) {
+       if ((error = xfs_trans_commit(tp, XFS_TRANS_RELEASE_LOG_RES))) {
                xfs_fs_cmn_err(CE_ALERT, mp, "XFS qino_alloc failed!");
                return error;
        }
@@ -1467,7 +1359,7 @@ xfs_qm_qino_alloc(
 }
 
 
-STATIC int
+STATIC void
 xfs_qm_reset_dqcounts(
        xfs_mount_t     *mp,
        xfs_buf_t       *bp,
@@ -1477,7 +1369,8 @@ xfs_qm_reset_dqcounts(
        xfs_disk_dquot_t        *ddq;
        int                     j;
 
-       xfs_buftrace("RESET DQUOTS", bp);
+       trace_xfs_reset_dqcounts(bp, _RET_IP_);
+
        /*
         * Reset all counters and timers. They'll be
         * started afresh by xfs_qm_quotacheck.
@@ -1485,14 +1378,14 @@ xfs_qm_reset_dqcounts(
 #ifdef DEBUG
        j = XFS_FSB_TO_B(mp, XFS_DQUOT_CLUSTER_SIZE_FSB);
        do_div(j, sizeof(xfs_dqblk_t));
-       ASSERT(XFS_QM_DQPERBLK(mp) == j);
+       ASSERT(mp->m_quotainfo->qi_dqperchunk == j);
 #endif
        ddq = (xfs_disk_dquot_t *)XFS_BUF_PTR(bp);
-       for (j = 0; j < XFS_QM_DQPERBLK(mp); j++) {
+       for (j = 0; j < mp->m_quotainfo->qi_dqperchunk; j++) {
                /*
                 * Do a sanity check, and if needed, repair the dqblk. Don't
                 * output any warnings because it's perfectly possible to
-                * find unitialized dquot blks. See comment in xfs_qm_dqcheck.
+                * find uninitialised dquot blks. See comment in xfs_qm_dqcheck.
                 */
                (void) xfs_qm_dqcheck(ddq, id+j, type, XFS_QMOPT_DQREPAIR,
                                      "xfs_quotacheck");
@@ -1507,8 +1400,6 @@ xfs_qm_reset_dqcounts(
                ddq->d_rtbwarns = 0;
                ddq = (xfs_disk_dquot_t *) ((xfs_dqblk_t *)ddq + 1);
        }
-
-       return 0;
 }
 
 STATIC int
@@ -1545,17 +1436,17 @@ xfs_qm_dqiter_bufs(
        while (blkcnt--) {
                error = xfs_trans_read_buf(mp, NULL, mp->m_ddev_targp,
                              XFS_FSB_TO_DADDR(mp, bno),
-                             (int)XFS_QI_DQCHUNKLEN(mp), 0, &bp);
+                             mp->m_quotainfo->qi_dqchunklen, 0, &bp);
                if (error)
                        break;
 
-               (void) xfs_qm_reset_dqcounts(mp, bp, firstid, type);
+               xfs_qm_reset_dqcounts(mp, bp, firstid, type);
                xfs_bdwrite(mp, bp);
                /*
                 * goto the next block.
                 */
                bno++;
-               firstid += XFS_QM_DQPERBLK(mp);
+               firstid += mp->m_quotainfo->qi_dqperchunk;
        }
        return error;
 }
@@ -1581,7 +1472,7 @@ xfs_qm_dqiterate(
 
        error = 0;
        /*
-        * This looks racey, but we can't keep an inode lock across a
+        * This looks racy, but we can't keep an inode lock across a
         * trans_reserve. But, this gets called during quotacheck, and that
         * happens only at mount time which is single threaded.
         */
@@ -1604,7 +1495,7 @@ xfs_qm_dqiterate(
                                  maxlblkcnt - lblkno,
                                  XFS_BMAPI_METADATA,
                                  NULL,
-                                 0, map, &nmaps, NULL);
+                                 0, map, &nmaps, NULL, NULL);
                xfs_iunlock(qip, XFS_ILOCK_SHARED);
                if (error)
                        break;
@@ -1621,7 +1512,7 @@ xfs_qm_dqiterate(
                                continue;
 
                        firstid = (xfs_dqid_t) map[i].br_startoff *
-                               XFS_QM_DQPERBLK(mp);
+                               mp->m_quotainfo->qi_dqperchunk;
                        /*
                         * Do a read-ahead on the next extent.
                         */
@@ -1632,7 +1523,7 @@ xfs_qm_dqiterate(
                                while (rablkcnt--) {
                                        xfs_baread(mp->m_ddev_targp,
                                               XFS_FSB_TO_DADDR(mp, rablkno),
-                                              (int)XFS_QI_DQCHUNKLEN(mp));
+                                              mp->m_quotainfo->qi_dqchunklen);
                                        rablkno++;
                                }
                        }
@@ -1653,7 +1544,7 @@ xfs_qm_dqiterate(
                        break;
        } while (nmaps > 0);
 
-       kmem_free(map, XFS_DQITER_MAP_SIZE * sizeof(*map));
+       kmem_free(map);
 
        return error;
 }
@@ -1672,26 +1563,30 @@ xfs_qm_quotacheck_dqadjust(
        xfs_qcnt_t              rtblks)
 {
        ASSERT(XFS_DQ_IS_LOCKED(dqp));
-       xfs_dqtrace_entry(dqp, "QCHECK DQADJUST");
+
+       trace_xfs_dqadjust(dqp);
+
        /*
         * Adjust the inode count and the block count to reflect this inode's
         * resource usage.
         */
-       be64_add(&dqp->q_core.d_icount, 1);
+       be64_add_cpu(&dqp->q_core.d_icount, 1);
        dqp->q_res_icount++;
        if (nblks) {
-               be64_add(&dqp->q_core.d_bcount, nblks);
+               be64_add_cpu(&dqp->q_core.d_bcount, nblks);
                dqp->q_res_bcount += nblks;
        }
        if (rtblks) {
-               be64_add(&dqp->q_core.d_rtbcount, rtblks);
+               be64_add_cpu(&dqp->q_core.d_rtbcount, rtblks);
                dqp->q_res_rtbcount += rtblks;
        }
 
        /*
         * Set default limits, adjust timers (since we changed usages)
+        *
+        * There are no timers for the default values set in the root dquot.
         */
-       if (! XFS_IS_SUSER_DQUOT(dqp)) {
+       if (dqp->q_core.d_id) {
                xfs_qm_adjust_dqlimits(dqp->q_mount, &dqp->q_core);
                xfs_qm_adjust_dqtimers(dqp->q_mount, &dqp->q_core);
        }
@@ -1705,10 +1600,9 @@ xfs_qm_get_rtblks(
        xfs_qcnt_t      *O_rtblks)
 {
        xfs_filblks_t   rtblks;                 /* total rt blks */
+       xfs_extnum_t    idx;                    /* extent record index */
        xfs_ifork_t     *ifp;                   /* inode fork pointer */
        xfs_extnum_t    nextents;               /* number of extent entries */
-       xfs_bmbt_rec_t  *base;                  /* base of extent array */
-       xfs_bmbt_rec_t  *ep;                    /* pointer to an extent entry */
        int             error;
 
        ASSERT(XFS_IS_REALTIME_INODE(ip));
@@ -1718,10 +1612,9 @@ xfs_qm_get_rtblks(
                        return error;
        }
        rtblks = 0;
-       nextents = ifp->if_bytes / sizeof(xfs_bmbt_rec_t);
-       base = &ifp->if_u1.if_extents[0];
-       for (ep = base; ep < &base[nextents]; ep++)
-               rtblks += xfs_bmbt_get_blockcount(ep);
+       nextents = ifp->if_bytes / (uint)sizeof(xfs_bmbt_rec_t);
+       for (idx = 0; idx < nextents; idx++)
+               rtblks += xfs_bmbt_get_blockcount(xfs_iext_get_ext(ifp, idx));
        *O_rtblks = (xfs_qcnt_t)rtblks;
        return 0;
 }
@@ -1770,12 +1663,6 @@ xfs_qm_dqusage_adjust(
                return error;
        }
 
-       if (ip->i_d.di_mode == 0) {
-               xfs_iput_new(ip, XFS_ILOCK_EXCL);
-               *res = BULKSTAT_RV_NOTHING;
-               return XFS_ERROR(ENOENT);
-       }
-
        /*
         * Obtain the locked dquots. In case of an error (eg. allocation
         * fails for ENOSPC), we return the negative of the error number
@@ -1824,7 +1711,7 @@ xfs_qm_dqusage_adjust(
         * we have to start from the beginning anyway.
         * Once we're done, we'll log all the dquot bufs.
         *
-        * The *QUOTA_ON checks below may look pretty racey, but quotachecks
+        * The *QUOTA_ON checks below may look pretty racy, but quotachecks
         * and quotaoffs don't race. (Quotachecks happen at mount time only).
         */
        if (XFS_IS_UQUOTA_ON(mp)) {
@@ -1841,7 +1728,7 @@ xfs_qm_dqusage_adjust(
         * Now release the inode. This will send it to 'inactive', and
         * possibly even free blocks.
         */
-       VN_RELE(XFS_ITOV(ip));
+       IRELE(ip);
 
        /*
         * Goto next inode.
@@ -1869,14 +1756,14 @@ xfs_qm_quotacheck(
        lastino = 0;
        flags = 0;
 
-       ASSERT(XFS_QI_UQIP(mp) || XFS_QI_GQIP(mp));
+       ASSERT(mp->m_quotainfo->qi_uquotaip || mp->m_quotainfo->qi_gquotaip);
        ASSERT(XFS_IS_QUOTA_RUNNING(mp));
 
        /*
         * There should be no cached dquots. The (simplistic) quotacheck
         * algorithm doesn't like that.
         */
-       ASSERT(XFS_QI_MPLNDQUOTS(mp) == 0);
+       ASSERT(list_empty(&mp->m_quotainfo->qi_dqlist));
 
        cmn_err(CE_NOTE, "XFS quotacheck %s: Please wait.", mp->m_fsname);
 
@@ -1885,15 +1772,19 @@ xfs_qm_quotacheck(
         * their counters to zero. We need a clean slate.
         * We don't log our changes till later.
         */
-       if ((uip = XFS_QI_UQIP(mp))) {
-               if ((error = xfs_qm_dqiterate(mp, uip, XFS_QMOPT_UQUOTA)))
+       uip = mp->m_quotainfo->qi_uquotaip;
+       if (uip) {
+               error = xfs_qm_dqiterate(mp, uip, XFS_QMOPT_UQUOTA);
+               if (error)
                        goto error_return;
                flags |= XFS_UQUOTA_CHKD;
        }
 
-       if ((gip = XFS_QI_GQIP(mp))) {
-               if ((error = xfs_qm_dqiterate(mp, gip, XFS_IS_GQUOTA_ON(mp) ?
-                                       XFS_QMOPT_GQUOTA : XFS_QMOPT_PQUOTA)))
+       gip = mp->m_quotainfo->qi_gquotaip;
+       if (gip) {
+               error = xfs_qm_dqiterate(mp, gip, XFS_IS_GQUOTA_ON(mp) ?
+                                       XFS_QMOPT_GQUOTA : XFS_QMOPT_PQUOTA);
+               if (error)
                        goto error_return;
                flags |= XFS_OQUOTA_CHKD;
        }
@@ -1905,14 +1796,20 @@ xfs_qm_quotacheck(
                 */
                if ((error = xfs_bulkstat(mp, &lastino, &count,
                                     xfs_qm_dqusage_adjust, NULL,
-                                    structsz, NULL,
-                                    BULKSTAT_FG_IGET|BULKSTAT_FG_VFSLOCKED,
-                                    &done)))
+                                    structsz, NULL, BULKSTAT_FG_IGET, &done)))
                        break;
 
        } while (! done);
 
        /*
+        * We've made all the changes that we need to make incore.
+        * Flush them down to disk buffers if everything was updated
+        * successfully.
+        */
+       if (!error)
+               error = xfs_qm_dqflush_all(mp, 0);
+
+       /*
         * We can get this error if we couldn't do a dquot allocation inside
         * xfs_qm_dqusage_adjust (via bulkstat). We don't care about the
         * dirty dquots that might be cached, we just want to get rid of them
@@ -1920,14 +1817,9 @@ xfs_qm_quotacheck(
         * at this point (because we intentionally didn't in dqget_noattach).
         */
        if (error) {
-               xfs_qm_dqpurge_all(mp, XFS_QMOPT_QUOTALL | XFS_QMOPT_QUOTAOFF);
+               xfs_qm_dqpurge_all(mp, XFS_QMOPT_QUOTALL);
                goto error_return;
        }
-       /*
-        * We've made all the changes that we need to make incore.
-        * Now flush_them down to disk buffers.
-        */
-       xfs_qm_dqflush_all(mp, XFS_QMOPT_DELWRI);
 
        /*
         * We didn't log anything, because if we crashed, we'll have to
@@ -1946,7 +1838,7 @@ xfs_qm_quotacheck(
        mp->m_qflags &= ~(XFS_OQUOTA_CHKD | XFS_UQUOTA_CHKD);
        mp->m_qflags |= flags;
 
-       XQM_LIST_PRINT(&(XFS_QI_MPL_LIST(mp)), MPL_NEXT, "++++ Mp list +++");
+       xfs_qm_dquot_list_print(mp);
 
  error_return:
        if (error) {
@@ -1959,7 +1851,10 @@ xfs_qm_quotacheck(
                ASSERT(mp->m_quotainfo != NULL);
                ASSERT(xfs_Gqm != NULL);
                xfs_qm_destroy_quotainfo(mp);
-               (void)xfs_mount_reset_sbqflags(mp);
+               if (xfs_mount_reset_sbqflags(mp)) {
+                       cmn_err(CE_WARN, "XFS quotacheck %s: "
+                               "Failed to reset quota flags.", mp->m_fsname);
+               }
        } else {
                cmn_err(CE_NOTE, "XFS quotacheck %s: Done.", mp->m_fsname);
        }
@@ -1987,7 +1882,7 @@ xfs_qm_init_quotainos(
        /*
         * Get the uquota and gquota inodes
         */
-       if (XFS_SB_VERSION_HASQUOTA(&mp->m_sb)) {
+       if (xfs_sb_version_hasquota(&mp->m_sb)) {
                if (XFS_IS_UQUOTA_ON(mp) &&
                    mp->m_sb.sb_uquotino != NULLFSINO) {
                        ASSERT(mp->m_sb.sb_uquotino > 0);
@@ -2001,7 +1896,7 @@ xfs_qm_init_quotainos(
                        if ((error = xfs_iget(mp, NULL, mp->m_sb.sb_gquotino,
                                             0, 0, &gip, 0))) {
                                if (uip)
-                                       VN_RELE(XFS_ITOV(uip));
+                                       IRELE(uip);
                                return XFS_ERROR(error);
                        }
                }
@@ -2032,65 +1927,59 @@ xfs_qm_init_quotainos(
                                          sbflags | XFS_SB_GQUOTINO, flags);
                if (error) {
                        if (uip)
-                               VN_RELE(XFS_ITOV(uip));
+                               IRELE(uip);
 
                        return XFS_ERROR(error);
                }
        }
 
-       XFS_QI_UQIP(mp) = uip;
-       XFS_QI_GQIP(mp) = gip;
+       mp->m_quotainfo->qi_uquotaip = uip;
+       mp->m_quotainfo->qi_gquotaip = gip;
 
        return 0;
 }
 
 
+
 /*
- * Traverse the freelist of dquots and attempt to reclaim a maximum of
- * 'howmany' dquots. This operation races with dqlookup(), and attempts to
- * favor the lookup function ...
- * XXXsup merge this with qm_reclaim_one().
+ * Just pop the least recently used dquot off the freelist and
+ * recycle it. The returned dquot is locked.
  */
-STATIC int
-xfs_qm_shake_freelist(
-       int howmany)
+STATIC xfs_dquot_t *
+xfs_qm_dqreclaim_one(void)
 {
-       int             nreclaimed;
-       xfs_dqhash_t    *hash;
-       xfs_dquot_t     *dqp, *nextdqp;
+       xfs_dquot_t     *dqpout;
+       xfs_dquot_t     *dqp;
        int             restarts;
-       int             nflushes;
-
-       if (howmany <= 0)
-               return 0;
 
-       nreclaimed = 0;
        restarts = 0;
-       nflushes = 0;
+       dqpout = NULL;
 
-#ifdef QUOTADEBUG
-       cmn_err(CE_DEBUG, "Shake free 0x%x", howmany);
-#endif
-       /* lock order is : hashchainlock, freelistlock, mplistlock */
- tryagain:
-       xfs_qm_freelist_lock(xfs_Gqm);
+       /* lockorder: hashchainlock, freelistlock, mplistlock, dqlock, dqflock */
+startagain:
+       mutex_lock(&xfs_Gqm->qm_dqfrlist_lock);
 
-       for (dqp = xfs_Gqm->qm_dqfreelist.qh_next;
-            ((dqp != (xfs_dquot_t *) &xfs_Gqm->qm_dqfreelist) &&
-             nreclaimed < howmany); ) {
+       list_for_each_entry(dqp, &xfs_Gqm->qm_dqfrlist, q_freelist) {
+               struct xfs_mount *mp = dqp->q_mount;
                xfs_dqlock(dqp);
 
                /*
                 * We are racing with dqlookup here. Naturally we don't
-                * want to reclaim a dquot that lookup wants.
+                * want to reclaim a dquot that lookup wants. We release the
+                * freelist lock and start over, so that lookup will grab
+                * both the dquot and the freelistlock.
                 */
                if (dqp->dq_flags & XFS_DQ_WANT) {
+                       ASSERT(! (dqp->dq_flags & XFS_DQ_INACTIVE));
+
+                       trace_xfs_dqreclaim_want(dqp);
+
                        xfs_dqunlock(dqp);
-                       xfs_qm_freelist_unlock(xfs_Gqm);
+                       mutex_unlock(&xfs_Gqm->qm_dqfrlist_lock);
                        if (++restarts >= XFS_QM_RECLAIM_MAX_RESTARTS)
-                               return nreclaimed;
+                               return NULL;
                        XQM_STATS_INC(xqmstats.xs_qm_dqwants);
-                       goto tryagain;
+                       goto startagain;
                }
 
                /*
@@ -2099,23 +1988,27 @@ xfs_qm_shake_freelist(
                 * life easier.
                 */
                if (dqp->dq_flags & XFS_DQ_INACTIVE) {
-                       ASSERT(dqp->q_mount == NULL);
+                       ASSERT(mp == NULL);
                        ASSERT(! XFS_DQ_IS_DIRTY(dqp));
-                       ASSERT(dqp->HL_PREVP == NULL);
-                       ASSERT(dqp->MPL_PREVP == NULL);
+                       ASSERT(list_empty(&dqp->q_hashlist));
+                       ASSERT(list_empty(&dqp->q_mplist));
+                       list_del_init(&dqp->q_freelist);
+                       xfs_Gqm->qm_dqfrlist_cnt--;
+                       xfs_dqunlock(dqp);
+                       dqpout = dqp;
                        XQM_STATS_INC(xqmstats.xs_qm_dqinact_reclaims);
-                       nextdqp = dqp->dq_flnext;
-                       goto off_freelist;
+                       break;
                }
 
-               ASSERT(dqp->MPL_PREVP);
+               ASSERT(dqp->q_hash);
+               ASSERT(!list_empty(&dqp->q_mplist));
+
                /*
                 * Try to grab the flush lock. If this dquot is in the process of
                 * getting flushed to disk, we don't want to reclaim it.
                 */
-               if (! xfs_qm_dqflock_nowait(dqp)) {
+               if (!xfs_dqflock_nowait(dqp)) {
                        xfs_dqunlock(dqp);
-                       dqp = dqp->dq_flnext;
                        continue;
                }
 
@@ -2126,16 +2019,23 @@ xfs_qm_shake_freelist(
                 * dirty dquots.
                 */
                if (XFS_DQ_IS_DIRTY(dqp)) {
-                       xfs_dqtrace_entry(dqp, "DQSHAKE: DQDIRTY");
+                       int     error;
+
+                       trace_xfs_dqreclaim_dirty(dqp);
+
                        /*
                         * We flush it delayed write, so don't bother
-                        * releasing the mplock.
+                        * releasing the freelist lock.
                         */
-                       (void) xfs_qm_dqflush(dqp, XFS_QMOPT_DELWRI);
+                       error = xfs_qm_dqflush(dqp, 0);
+                       if (error) {
+                               xfs_fs_cmn_err(CE_WARN, mp,
+                       "xfs_qm_dqreclaim: dquot %p flush failed", dqp);
+                       }
                        xfs_dqunlock(dqp); /* dqflush unlocks dqflock */
-                       dqp = dqp->dq_flnext;
                        continue;
                }
+
                /*
                 * We're trying to get the hashlock out of order. This races
                 * with dqlookup; so, we giveup and goto the next dquot if
@@ -2143,55 +2043,75 @@ xfs_qm_shake_freelist(
                 * a dqlookup process that holds the hashlock that is
                 * waiting for the freelist lock.
                 */
-               if (! xfs_qm_dqhashlock_nowait(dqp)) {
-                       xfs_dqfunlock(dqp);
-                       xfs_dqunlock(dqp);
-                       dqp = dqp->dq_flnext;
-                       continue;
+               if (!mutex_trylock(&dqp->q_hash->qh_lock)) {
+                       restarts++;
+                       goto dqfunlock;
                }
+
                /*
                 * This races with dquot allocation code as well as dqflush_all
                 * and reclaim code. So, if we failed to grab the mplist lock,
                 * giveup everything and start over.
                 */
-               hash = dqp->q_hash;
-               ASSERT(hash);
-               if (! xfs_qm_mplist_nowait(dqp->q_mount)) {
-                       /* XXX put a sentinel so that we can come back here */
+               if (!mutex_trylock(&mp->m_quotainfo->qi_dqlist_lock)) {
+                       restarts++;
+                       mutex_unlock(&dqp->q_hash->qh_lock);
                        xfs_dqfunlock(dqp);
                        xfs_dqunlock(dqp);
-                       XFS_DQ_HASH_UNLOCK(hash);
-                       xfs_qm_freelist_unlock(xfs_Gqm);
-                       if (++restarts >= XFS_QM_RECLAIM_MAX_RESTARTS)
-                               return nreclaimed;
-                       goto tryagain;
+                       mutex_unlock(&xfs_Gqm->qm_dqfrlist_lock);
+                       if (restarts++ >= XFS_QM_RECLAIM_MAX_RESTARTS)
+                               return NULL;
+                       goto startagain;
                }
-               xfs_dqtrace_entry(dqp, "DQSHAKE: UNLINKING");
-#ifdef QUOTADEBUG
-               cmn_err(CE_DEBUG, "Shake 0x%p, ID 0x%x\n",
-                       dqp, be32_to_cpu(dqp->q_core.d_id));
-#endif
+
                ASSERT(dqp->q_nrefs == 0);
-               nextdqp = dqp->dq_flnext;
-               XQM_MPLIST_REMOVE(&(XFS_QI_MPL_LIST(dqp->q_mount)), dqp);
-               XQM_HASHLIST_REMOVE(hash, dqp);
+               list_del_init(&dqp->q_mplist);
+               mp->m_quotainfo->qi_dquots--;
+               mp->m_quotainfo->qi_dqreclaims++;
+               list_del_init(&dqp->q_hashlist);
+               dqp->q_hash->qh_version++;
+               list_del_init(&dqp->q_freelist);
+               xfs_Gqm->qm_dqfrlist_cnt--;
+               dqpout = dqp;
+               mutex_unlock(&mp->m_quotainfo->qi_dqlist_lock);
+               mutex_unlock(&dqp->q_hash->qh_lock);
+dqfunlock:
                xfs_dqfunlock(dqp);
-               xfs_qm_mplist_unlock(dqp->q_mount);
-               XFS_DQ_HASH_UNLOCK(hash);
-
- off_freelist:
-               XQM_FREELIST_REMOVE(dqp);
                xfs_dqunlock(dqp);
-               nreclaimed++;
-               XQM_STATS_INC(xqmstats.xs_qm_dqshake_reclaims);
+               if (dqpout)
+                       break;
+               if (restarts >= XFS_QM_RECLAIM_MAX_RESTARTS)
+                       return NULL;
+       }
+       mutex_unlock(&xfs_Gqm->qm_dqfrlist_lock);
+       return dqpout;
+}
+
+/*
+ * Traverse the freelist of dquots and attempt to reclaim a maximum of
+ * 'howmany' dquots. This operation races with dqlookup(), and attempts to
+ * favor the lookup function ...
+ */
+STATIC int
+xfs_qm_shake_freelist(
+       int     howmany)
+{
+       int             nreclaimed = 0;
+       xfs_dquot_t     *dqp;
+
+       if (howmany <= 0)
+               return 0;
+
+       while (nreclaimed < howmany) {
+               dqp = xfs_qm_dqreclaim_one();
+               if (!dqp)
+                       return nreclaimed;
                xfs_qm_dqdestroy(dqp);
-               dqp = nextdqp;
+               nreclaimed++;
        }
-       xfs_qm_freelist_unlock(xfs_Gqm);
        return nreclaimed;
 }
 
-
 /*
  * The kmem_shake interface is invoked when memory is running low.
  */
@@ -2206,7 +2126,7 @@ xfs_qm_shake(int nr_to_scan, gfp_t gfp_mask)
        if (!xfs_Gqm)
                return 0;
 
-       nfree = xfs_Gqm->qm_dqfreelist.qh_nelems; /* free dquots */
+       nfree = xfs_Gqm->qm_dqfrlist_cnt; /* free dquots */
        /* incore dquots in all f/s's */
        ndqused = atomic_read(&xfs_Gqm->qm_totaldquots) - nfree;
 
@@ -2222,121 +2142,6 @@ xfs_qm_shake(int nr_to_scan, gfp_t gfp_mask)
 }
 
 
-/*
- * Just pop the least recently used dquot off the freelist and
- * recycle it. The returned dquot is locked.
- */
-STATIC xfs_dquot_t *
-xfs_qm_dqreclaim_one(void)
-{
-       xfs_dquot_t     *dqpout;
-       xfs_dquot_t     *dqp;
-       int             restarts;
-       int             nflushes;
-
-       restarts = 0;
-       dqpout = NULL;
-       nflushes = 0;
-
-       /* lockorder: hashchainlock, freelistlock, mplistlock, dqlock, dqflock */
- startagain:
-       xfs_qm_freelist_lock(xfs_Gqm);
-
-       FOREACH_DQUOT_IN_FREELIST(dqp, &(xfs_Gqm->qm_dqfreelist)) {
-               xfs_dqlock(dqp);
-
-               /*
-                * We are racing with dqlookup here. Naturally we don't
-                * want to reclaim a dquot that lookup wants. We release the
-                * freelist lock and start over, so that lookup will grab
-                * both the dquot and the freelistlock.
-                */
-               if (dqp->dq_flags & XFS_DQ_WANT) {
-                       ASSERT(! (dqp->dq_flags & XFS_DQ_INACTIVE));
-                       xfs_dqtrace_entry(dqp, "DQRECLAIM: DQWANT");
-                       xfs_dqunlock(dqp);
-                       xfs_qm_freelist_unlock(xfs_Gqm);
-                       if (++restarts >= XFS_QM_RECLAIM_MAX_RESTARTS)
-                               return NULL;
-                       XQM_STATS_INC(xqmstats.xs_qm_dqwants);
-                       goto startagain;
-               }
-
-               /*
-                * If the dquot is inactive, we are assured that it is
-                * not on the mplist or the hashlist, and that makes our
-                * life easier.
-                */
-               if (dqp->dq_flags & XFS_DQ_INACTIVE) {
-                       ASSERT(dqp->q_mount == NULL);
-                       ASSERT(! XFS_DQ_IS_DIRTY(dqp));
-                       ASSERT(dqp->HL_PREVP == NULL);
-                       ASSERT(dqp->MPL_PREVP == NULL);
-                       XQM_FREELIST_REMOVE(dqp);
-                       xfs_dqunlock(dqp);
-                       dqpout = dqp;
-                       XQM_STATS_INC(xqmstats.xs_qm_dqinact_reclaims);
-                       break;
-               }
-
-               ASSERT(dqp->q_hash);
-               ASSERT(dqp->MPL_PREVP);
-
-               /*
-                * Try to grab the flush lock. If this dquot is in the process of
-                * getting flushed to disk, we don't want to reclaim it.
-                */
-               if (! xfs_qm_dqflock_nowait(dqp)) {
-                       xfs_dqunlock(dqp);
-                       continue;
-               }
-
-               /*
-                * We have the flush lock so we know that this is not in the
-                * process of being flushed. So, if this is dirty, flush it
-                * DELWRI so that we don't get a freelist infested with
-                * dirty dquots.
-                */
-               if (XFS_DQ_IS_DIRTY(dqp)) {
-                       xfs_dqtrace_entry(dqp, "DQRECLAIM: DQDIRTY");
-                       /*
-                        * We flush it delayed write, so don't bother
-                        * releasing the freelist lock.
-                        */
-                       (void) xfs_qm_dqflush(dqp, XFS_QMOPT_DELWRI);
-                       xfs_dqunlock(dqp); /* dqflush unlocks dqflock */
-                       continue;
-               }
-
-               if (! xfs_qm_mplist_nowait(dqp->q_mount)) {
-                       xfs_dqfunlock(dqp);
-                       xfs_dqunlock(dqp);
-                       continue;
-               }
-
-               if (! xfs_qm_dqhashlock_nowait(dqp))
-                       goto mplistunlock;
-
-               ASSERT(dqp->q_nrefs == 0);
-               xfs_dqtrace_entry(dqp, "DQRECLAIM: UNLINKING");
-               XQM_MPLIST_REMOVE(&(XFS_QI_MPL_LIST(dqp->q_mount)), dqp);
-               XQM_HASHLIST_REMOVE(dqp->q_hash, dqp);
-               XQM_FREELIST_REMOVE(dqp);
-               dqpout = dqp;
-               XFS_DQ_HASH_UNLOCK(dqp->q_hash);
- mplistunlock:
-               xfs_qm_mplist_unlock(dqp->q_mount);
-               xfs_dqfunlock(dqp);
-               xfs_dqunlock(dqp);
-               if (dqpout)
-                       break;
-       }
-
-       xfs_qm_freelist_unlock(xfs_Gqm);
-       return dqpout;
-}
-
-
 /*------------------------------------------------------------------*/
 
 /*
@@ -2411,9 +2216,9 @@ xfs_qm_write_sb_changes(
        }
 
        xfs_mod_sb(tp, flags);
-       (void) xfs_trans_commit(tp, 0, NULL);
+       error = xfs_trans_commit(tp, 0);
 
-       return 0;
+       return error;
 }
 
 
@@ -2432,27 +2237,26 @@ xfs_qm_write_sb_changes(
  */
 int
 xfs_qm_vop_dqalloc(
-       xfs_mount_t     *mp,
-       xfs_inode_t     *ip,
-       uid_t           uid,
-       gid_t           gid,
-       prid_t          prid,
-       uint            flags,
-       xfs_dquot_t     **O_udqpp,
-       xfs_dquot_t     **O_gdqpp)
+       struct xfs_inode        *ip,
+       uid_t                   uid,
+       gid_t                   gid,
+       prid_t                  prid,
+       uint                    flags,
+       struct xfs_dquot        **O_udqpp,
+       struct xfs_dquot        **O_gdqpp)
 {
-       int             error;
-       xfs_dquot_t     *uq, *gq;
-       uint            lockflags;
+       struct xfs_mount        *mp = ip->i_mount;
+       struct xfs_dquot        *uq, *gq;
+       int                     error;
+       uint                    lockflags;
 
-       if (!XFS_IS_QUOTA_ON(mp))
+       if (!XFS_IS_QUOTA_RUNNING(mp) || !XFS_IS_QUOTA_ON(mp))
                return 0;
 
        lockflags = XFS_ILOCK_EXCL;
        xfs_ilock(ip, lockflags);
 
-       if ((flags & XFS_QMOPT_INHERIT) &&
-           XFS_INHERIT_GID(ip, XFS_MTOVFS(mp)))
+       if ((flags & XFS_QMOPT_INHERIT) && XFS_INHERIT_GID(ip))
                gid = ip->i_d.di_gid;
 
        /*
@@ -2460,8 +2264,8 @@ xfs_qm_vop_dqalloc(
         * if necessary. The dquot(s) will not be locked.
         */
        if (XFS_NOT_DQATTACHED(mp, ip)) {
-               if ((error = xfs_qm_dqattach(ip, XFS_QMOPT_DQALLOC |
-                                           XFS_QMOPT_ILOCKED))) {
+               error = xfs_qm_dqattach_locked(ip, XFS_QMOPT_DQALLOC);
+               if (error) {
                        xfs_iunlock(ip, lockflags);
                        return error;
                }
@@ -2554,7 +2358,7 @@ xfs_qm_vop_dqalloc(
                }
        }
        if (uq)
-               xfs_dqtrace_entry_ino(uq, "DQALLOC", ip);
+               trace_xfs_dquot_dqalloc(ip);
 
        xfs_iunlock(ip, lockflags);
        if (O_udqpp)
@@ -2583,7 +2387,8 @@ xfs_qm_vop_chown(
        uint            bfield = XFS_IS_REALTIME_INODE(ip) ?
                                 XFS_TRANS_DQ_RTBCOUNT : XFS_TRANS_DQ_BCOUNT;
 
-       ASSERT(XFS_ISLOCKED_INODE_EXCL(ip));
+
+       ASSERT(xfs_isilocked(ip, XFS_ILOCK_EXCL));
        ASSERT(XFS_IS_QUOTA_RUNNING(ip->i_mount));
 
        /* old dquot */
@@ -2622,13 +2427,13 @@ xfs_qm_vop_chown_reserve(
        xfs_dquot_t     *gdqp,
        uint            flags)
 {
-       int             error;
-       xfs_mount_t     *mp;
-       uint            delblks, blkflags;
+       xfs_mount_t     *mp = ip->i_mount;
+       uint            delblks, blkflags, prjflags = 0;
        xfs_dquot_t     *unresudq, *unresgdq, *delblksudq, *delblksgdq;
+       int             error;
 
-       ASSERT(XFS_ISLOCKED_INODE(ip));
-       mp = ip->i_mount;
+
+       ASSERT(xfs_isilocked(ip, XFS_ILOCK_EXCL|XFS_ILOCK_SHARED));
        ASSERT(XFS_IS_QUOTA_RUNNING(mp));
 
        delblks = ip->i_delayed_blks;
@@ -2650,10 +2455,13 @@ xfs_qm_vop_chown_reserve(
                }
        }
        if (XFS_IS_OQUOTA_ON(ip->i_mount) && gdqp) {
-               if ((XFS_IS_GQUOTA_ON(ip->i_mount) &&
-                    ip->i_d.di_gid != be32_to_cpu(gdqp->q_core.d_id)) ||
-                   (XFS_IS_PQUOTA_ON(ip->i_mount) &&
-                    ip->i_d.di_projid != be32_to_cpu(gdqp->q_core.d_id))) {
+               if (XFS_IS_PQUOTA_ON(ip->i_mount) &&
+                    ip->i_d.di_projid != be32_to_cpu(gdqp->q_core.d_id))
+                       prjflags = XFS_QMOPT_ENOSPC;
+
+               if (prjflags ||
+                   (XFS_IS_GQUOTA_ON(ip->i_mount) &&
+                    ip->i_d.di_gid != be32_to_cpu(gdqp->q_core.d_id))) {
                        delblksgdq = gdqp;
                        if (delblks) {
                                ASSERT(ip->i_gdquot);
@@ -2664,7 +2472,7 @@ xfs_qm_vop_chown_reserve(
 
        if ((error = xfs_trans_reserve_quota_bydquots(tp, ip->i_mount,
                                delblksudq, delblksgdq, ip->i_d.di_nblocks, 1,
-                               flags | blkflags)))
+                               flags | blkflags | prjflags)))
                return (error);
 
        /*
@@ -2681,7 +2489,7 @@ xfs_qm_vop_chown_reserve(
                ASSERT(unresudq || unresgdq);
                if ((error = xfs_trans_reserve_quota_bydquots(NULL, ip->i_mount,
                                delblksudq, delblksgdq, (xfs_qcnt_t)delblks, 0,
-                               flags | blkflags)))
+                               flags | blkflags | prjflags)))
                        return (error);
                xfs_trans_reserve_quota_bydquots(NULL, ip->i_mount,
                                unresudq, unresgdq, -((xfs_qcnt_t)delblks), 0,
@@ -2693,28 +2501,23 @@ xfs_qm_vop_chown_reserve(
 
 int
 xfs_qm_vop_rename_dqattach(
-       xfs_inode_t     **i_tab)
+       struct xfs_inode        **i_tab)
 {
-       xfs_inode_t     *ip;
-       int             i;
-       int             error;
-
-       ip = i_tab[0];
+       struct xfs_mount        *mp = i_tab[0]->i_mount;
+       int                     i;
 
-       if (! XFS_IS_QUOTA_ON(ip->i_mount))
+       if (!XFS_IS_QUOTA_RUNNING(mp) || !XFS_IS_QUOTA_ON(mp))
                return 0;
 
-       if (XFS_NOT_DQATTACHED(ip->i_mount, ip)) {
-               error = xfs_qm_dqattach(ip, 0);
-               if (error)
-                       return error;
-       }
-       for (i = 1; (i < 4 && i_tab[i]); i++) {
+       for (i = 0; (i < 4 && i_tab[i]); i++) {
+               struct xfs_inode        *ip = i_tab[i];
+               int                     error;
+
                /*
                 * Watch out for duplicate entries in the table.
                 */
-               if ((ip = i_tab[i]) != i_tab[i-1]) {
-                       if (XFS_NOT_DQATTACHED(ip->i_mount, ip)) {
+               if (i == 0 || ip != i_tab[i-1]) {
+                       if (XFS_NOT_DQATTACHED(mp, ip)) {
                                error = xfs_qm_dqattach(ip, 0);
                                if (error)
                                        return error;
@@ -2725,17 +2528,19 @@ xfs_qm_vop_rename_dqattach(
 }
 
 void
-xfs_qm_vop_dqattach_and_dqmod_newinode(
-       xfs_trans_t     *tp,
-       xfs_inode_t     *ip,
-       xfs_dquot_t     *udqp,
-       xfs_dquot_t     *gdqp)
+xfs_qm_vop_create_dqattach(
+       struct xfs_trans        *tp,
+       struct xfs_inode        *ip,
+       struct xfs_dquot        *udqp,
+       struct xfs_dquot        *gdqp)
 {
-       if (!XFS_IS_QUOTA_ON(tp->t_mountp))
+       struct xfs_mount        *mp = tp->t_mountp;
+
+       if (!XFS_IS_QUOTA_RUNNING(mp) || !XFS_IS_QUOTA_ON(mp))
                return;
 
-       ASSERT(XFS_ISLOCKED_INODE_EXCL(ip));
-       ASSERT(XFS_IS_QUOTA_RUNNING(tp->t_mountp));
+       ASSERT(xfs_isilocked(ip, XFS_ILOCK_EXCL));
+       ASSERT(XFS_IS_QUOTA_RUNNING(mp));
 
        if (udqp) {
                xfs_dqlock(udqp);
@@ -2743,7 +2548,7 @@ xfs_qm_vop_dqattach_and_dqmod_newinode(
                xfs_dqunlock(udqp);
                ASSERT(ip->i_udquot == NULL);
                ip->i_udquot = udqp;
-               ASSERT(XFS_IS_UQUOTA_ON(tp->t_mountp));
+               ASSERT(XFS_IS_UQUOTA_ON(mp));
                ASSERT(ip->i_d.di_uid == be32_to_cpu(udqp->q_core.d_id));
                xfs_trans_mod_dquot(tp, udqp, XFS_TRANS_DQ_ICOUNT, 1);
        }
@@ -2753,107 +2558,11 @@ xfs_qm_vop_dqattach_and_dqmod_newinode(
                xfs_dqunlock(gdqp);
                ASSERT(ip->i_gdquot == NULL);
                ip->i_gdquot = gdqp;
-               ASSERT(XFS_IS_OQUOTA_ON(tp->t_mountp));
-               ASSERT((XFS_IS_GQUOTA_ON(tp->t_mountp) ?
+               ASSERT(XFS_IS_OQUOTA_ON(mp));
+               ASSERT((XFS_IS_GQUOTA_ON(mp) ?
                        ip->i_d.di_gid : ip->i_d.di_projid) ==
                                be32_to_cpu(gdqp->q_core.d_id));
                xfs_trans_mod_dquot(tp, gdqp, XFS_TRANS_DQ_ICOUNT, 1);
        }
 }
 
-/* ------------- list stuff -----------------*/
-STATIC void
-xfs_qm_freelist_init(xfs_frlist_t *ql)
-{
-       ql->qh_next = ql->qh_prev = (xfs_dquot_t *) ql;
-       mutex_init(&ql->qh_lock);
-       ql->qh_version = 0;
-       ql->qh_nelems = 0;
-}
-
-STATIC void
-xfs_qm_freelist_destroy(xfs_frlist_t *ql)
-{
-       xfs_dquot_t     *dqp, *nextdqp;
-
-       mutex_lock(&ql->qh_lock);
-       for (dqp = ql->qh_next;
-            dqp != (xfs_dquot_t *)ql; ) {
-               xfs_dqlock(dqp);
-               nextdqp = dqp->dq_flnext;
-#ifdef QUOTADEBUG
-               cmn_err(CE_DEBUG, "FREELIST destroy 0x%p", dqp);
-#endif
-               XQM_FREELIST_REMOVE(dqp);
-               xfs_dqunlock(dqp);
-               xfs_qm_dqdestroy(dqp);
-               dqp = nextdqp;
-       }
-       /*
-        * Don't bother about unlocking.
-        */
-       mutex_destroy(&ql->qh_lock);
-
-       ASSERT(ql->qh_nelems == 0);
-}
-
-STATIC void
-xfs_qm_freelist_insert(xfs_frlist_t *ql, xfs_dquot_t *dq)
-{
-       dq->dq_flnext = ql->qh_next;
-       dq->dq_flprev = (xfs_dquot_t *)ql;
-       ql->qh_next = dq;
-       dq->dq_flnext->dq_flprev = dq;
-       xfs_Gqm->qm_dqfreelist.qh_nelems++;
-       xfs_Gqm->qm_dqfreelist.qh_version++;
-}
-
-void
-xfs_qm_freelist_unlink(xfs_dquot_t *dq)
-{
-       xfs_dquot_t *next = dq->dq_flnext;
-       xfs_dquot_t *prev = dq->dq_flprev;
-
-       next->dq_flprev = prev;
-       prev->dq_flnext = next;
-       dq->dq_flnext = dq->dq_flprev = dq;
-       xfs_Gqm->qm_dqfreelist.qh_nelems--;
-       xfs_Gqm->qm_dqfreelist.qh_version++;
-}
-
-void
-xfs_qm_freelist_append(xfs_frlist_t *ql, xfs_dquot_t *dq)
-{
-       xfs_qm_freelist_insert((xfs_frlist_t *)ql->qh_prev, dq);
-}
-
-STATIC int
-xfs_qm_dqhashlock_nowait(
-       xfs_dquot_t *dqp)
-{
-       int locked;
-
-       locked = mutex_trylock(&((dqp)->q_hash->qh_lock));
-       return locked;
-}
-
-int
-xfs_qm_freelist_lock_nowait(
-       xfs_qm_t *xqm)
-{
-       int locked;
-
-       locked = mutex_trylock(&(xqm->qm_dqfreelist.qh_lock));
-       return locked;
-}
-
-STATIC int
-xfs_qm_mplist_nowait(
-       xfs_mount_t     *mp)
-{
-       int locked;
-
-       ASSERT(mp->m_quotainfo);
-       locked = mutex_trylock(&(XFS_QI_MPLLOCK(mp)));
-       return locked;
-}