xfs: remove dead XFS_LOUD_RECOVERY code
[safe/jmp/linux-2.6] / fs / xfs / quota / xfs_qm.c
index adbc7bb..38e7641 100644 (file)
@@ -20,7 +20,6 @@
 #include "xfs_bit.h"
 #include "xfs_log.h"
 #include "xfs_inum.h"
-#include "xfs_clnt.h"
 #include "xfs_trans.h"
 #include "xfs_sb.h"
 #include "xfs_ag.h"
 #include "xfs_error.h"
 #include "xfs_bmap.h"
 #include "xfs_rw.h"
-#include "xfs_acl.h"
 #include "xfs_attr.h"
 #include "xfs_buf_item.h"
 #include "xfs_trans_space.h"
 #include "xfs_utils.h"
 #include "xfs_qm.h"
+#include "xfs_trace.h"
 
 /*
  * The global quota manager. There is only one of these for the entire
@@ -56,7 +55,7 @@
  * quota functionality, including maintaining the freelist and hash
  * tables of dquots.
  */
-mutex_t                xfs_Gqm_lock;
+struct mutex   xfs_Gqm_lock;
 struct xfs_qm  *xfs_Gqm;
 uint           ndquot;
 
@@ -68,11 +67,6 @@ static cred_t        xfs_zerocr;
 STATIC void    xfs_qm_list_init(xfs_dqlist_t *, char *, int);
 STATIC void    xfs_qm_list_destroy(xfs_dqlist_t *);
 
-STATIC void    xfs_qm_freelist_init(xfs_frlist_t *);
-STATIC void    xfs_qm_freelist_destroy(xfs_frlist_t *);
-STATIC int     xfs_qm_mplist_nowait(xfs_mount_t *);
-STATIC int     xfs_qm_dqhashlock_nowait(xfs_dquot_t *);
-
 STATIC int     xfs_qm_init_quotainos(xfs_mount_t *);
 STATIC int     xfs_qm_init_quotainfo(xfs_mount_t *);
 STATIC int     xfs_qm_shake(int, gfp_t);
@@ -83,25 +77,29 @@ static struct shrinker xfs_qm_shaker = {
 };
 
 #ifdef DEBUG
-extern mutex_t qcheck_lock;
+extern struct mutex    qcheck_lock;
 #endif
 
 #ifdef QUOTADEBUG
-#define XQM_LIST_PRINT(l, NXT, title) \
-{ \
-       xfs_dquot_t     *dqp; int i = 0; \
-       cmn_err(CE_DEBUG, "%s (#%d)", title, (int) (l)->qh_nelems); \
-       for (dqp = (l)->qh_next; dqp != NULL; dqp = dqp->NXT) { \
-               cmn_err(CE_DEBUG, "   %d.  \"%d (%s)\"   " \
-                                 "bcnt = %d, icnt = %d, refs = %d", \
-                       ++i, (int) be32_to_cpu(dqp->q_core.d_id), \
-                       DQFLAGTO_TYPESTR(dqp),       \
-                       (int) be64_to_cpu(dqp->q_core.d_bcount), \
-                       (int) be64_to_cpu(dqp->q_core.d_icount), \
-                       (int) dqp->q_nrefs);  } \
+static void
+xfs_qm_dquot_list_print(
+       struct xfs_mount *mp)
+{
+       xfs_dquot_t     *dqp;
+       int             i = 0;
+
+       list_for_each_entry(dqp, &mp->m_quotainfo->qi_dqlist_lock, qi_mplist) {
+               cmn_err(CE_DEBUG, "   %d. \"%d (%s)\"   "
+                                 "bcnt = %lld, icnt = %lld, refs = %d",
+                       i++, be32_to_cpu(dqp->q_core.d_id),
+                       DQFLAGTO_TYPESTR(dqp),
+                       (long long)be64_to_cpu(dqp->q_core.d_bcount),
+                       (long long)be64_to_cpu(dqp->q_core.d_icount),
+                       dqp->q_nrefs);
+       }
 }
 #else
-#define XQM_LIST_PRINT(l, NXT, title) do { } while (0)
+static void xfs_qm_dquot_list_print(struct xfs_mount *mp) { }
 #endif
 
 /*
@@ -121,9 +119,14 @@ xfs_Gqm_init(void)
         */
        udqhash = kmem_zalloc_greedy(&hsize,
                                     XFS_QM_HASHSIZE_LOW * sizeof(xfs_dqhash_t),
-                                    XFS_QM_HASHSIZE_HIGH * sizeof(xfs_dqhash_t),
-                                    KM_SLEEP | KM_MAYFAIL | KM_LARGE);
-       gdqhash = kmem_zalloc(hsize, KM_SLEEP | KM_LARGE);
+                                    XFS_QM_HASHSIZE_HIGH * sizeof(xfs_dqhash_t));
+       if (!udqhash)
+               goto out;
+
+       gdqhash = kmem_zalloc_large(hsize);
+       if (!gdqhash)
+               goto out_free_udqhash;
+
        hsize /= sizeof(xfs_dqhash_t);
        ndquot = hsize << 8;
 
@@ -142,7 +145,9 @@ xfs_Gqm_init(void)
        /*
         * Freelist of all dquots of all file systems
         */
-       xfs_qm_freelist_init(&(xqm->qm_dqfreelist));
+       INIT_LIST_HEAD(&xqm->qm_dqfrlist);
+       xqm->qm_dqfrlist_cnt = 0;
+       mutex_init(&xqm->qm_dqfrlist_lock);
 
        /*
         * dquot zone. we register our own low-memory callback.
@@ -173,6 +178,11 @@ xfs_Gqm_init(void)
        mutex_init(&qcheck_lock);
 #endif
        return xqm;
+
+ out_free_udqhash:
+       kmem_free_large(udqhash);
+ out:
+       return NULL;
 }
 
 /*
@@ -182,6 +192,7 @@ STATIC void
 xfs_qm_destroy(
        struct xfs_qm   *xqm)
 {
+       struct xfs_dquot *dqp, *n;
        int             hsize, i;
 
        ASSERT(xqm != NULL);
@@ -192,16 +203,30 @@ xfs_qm_destroy(
                xfs_qm_list_destroy(&(xqm->qm_usr_dqhtable[i]));
                xfs_qm_list_destroy(&(xqm->qm_grp_dqhtable[i]));
        }
-       kmem_free(xqm->qm_usr_dqhtable, hsize * sizeof(xfs_dqhash_t));
-       kmem_free(xqm->qm_grp_dqhtable, hsize * sizeof(xfs_dqhash_t));
+       kmem_free_large(xqm->qm_usr_dqhtable);
+       kmem_free_large(xqm->qm_grp_dqhtable);
        xqm->qm_usr_dqhtable = NULL;
        xqm->qm_grp_dqhtable = NULL;
        xqm->qm_dqhashmask = 0;
-       xfs_qm_freelist_destroy(&(xqm->qm_dqfreelist));
+
+       /* frlist cleanup */
+       mutex_lock(&xqm->qm_dqfrlist_lock);
+       list_for_each_entry_safe(dqp, n, &xqm->qm_dqfrlist, q_freelist) {
+               xfs_dqlock(dqp);
+#ifdef QUOTADEBUG
+               cmn_err(CE_DEBUG, "FREELIST destroy 0x%p", dqp);
+#endif
+               list_del_init(&dqp->q_freelist);
+               xfs_Gqm->qm_dqfrlist_cnt--;
+               xfs_dqunlock(dqp);
+               xfs_qm_dqdestroy(dqp);
+       }
+       mutex_unlock(&xqm->qm_dqfrlist_lock);
+       mutex_destroy(&xqm->qm_dqfrlist_lock);
 #ifdef DEBUG
        mutex_destroy(&qcheck_lock);
 #endif
-       kmem_free(xqm, sizeof(xfs_qm_t));
+       kmem_free(xqm);
 }
 
 /*
@@ -220,17 +245,21 @@ xfs_qm_hold_quotafs_ref(
         * the structure could disappear between the entry to this routine and
         * a HOLD operation if not locked.
         */
-       XFS_QM_LOCK(xfs_Gqm);
+       mutex_lock(&xfs_Gqm_lock);
 
-       if (xfs_Gqm == NULL)
+       if (!xfs_Gqm) {
                xfs_Gqm = xfs_Gqm_init();
+               if (!xfs_Gqm)
+                       return ENOMEM;
+       }
+
        /*
         * We can keep a list of all filesystems with quotas mounted for
         * debugging and statistical purposes, but ...
         * Just take a reference and get out.
         */
-       XFS_QM_HOLD(xfs_Gqm);
-       XFS_QM_UNLOCK(xfs_Gqm);
+       xfs_Gqm->qm_nrefs++;
+       mutex_unlock(&xfs_Gqm_lock);
 
        return 0;
 }
@@ -245,7 +274,7 @@ STATIC void
 xfs_qm_rele_quotafs_ref(
        struct xfs_mount *mp)
 {
-       xfs_dquot_t     *dqp, *nextdqp;
+       xfs_dquot_t     *dqp, *n;
 
        ASSERT(xfs_Gqm);
        ASSERT(xfs_Gqm->qm_nrefs > 0);
@@ -253,49 +282,48 @@ xfs_qm_rele_quotafs_ref(
        /*
         * Go thru the freelist and destroy all inactive dquots.
         */
-       xfs_qm_freelist_lock(xfs_Gqm);
+       mutex_lock(&xfs_Gqm->qm_dqfrlist_lock);
 
-       for (dqp = xfs_Gqm->qm_dqfreelist.qh_next;
-            dqp != (xfs_dquot_t *)&(xfs_Gqm->qm_dqfreelist); ) {
+       list_for_each_entry_safe(dqp, n, &xfs_Gqm->qm_dqfrlist, q_freelist) {
                xfs_dqlock(dqp);
-               nextdqp = dqp->dq_flnext;
                if (dqp->dq_flags & XFS_DQ_INACTIVE) {
                        ASSERT(dqp->q_mount == NULL);
                        ASSERT(! XFS_DQ_IS_DIRTY(dqp));
-                       ASSERT(dqp->HL_PREVP == NULL);
-                       ASSERT(dqp->MPL_PREVP == NULL);
-                       XQM_FREELIST_REMOVE(dqp);
+                       ASSERT(list_empty(&dqp->q_hashlist));
+                       ASSERT(list_empty(&dqp->q_mplist));
+                       list_del_init(&dqp->q_freelist);
+                       xfs_Gqm->qm_dqfrlist_cnt--;
                        xfs_dqunlock(dqp);
                        xfs_qm_dqdestroy(dqp);
                } else {
                        xfs_dqunlock(dqp);
                }
-               dqp = nextdqp;
        }
-       xfs_qm_freelist_unlock(xfs_Gqm);
+       mutex_unlock(&xfs_Gqm->qm_dqfrlist_lock);
 
        /*
         * Destroy the entire XQM. If somebody mounts with quotaon, this'll
         * be restarted.
         */
-       XFS_QM_LOCK(xfs_Gqm);
-       XFS_QM_RELE(xfs_Gqm);
-       if (xfs_Gqm->qm_nrefs == 0) {
+       mutex_lock(&xfs_Gqm_lock);
+       if (--xfs_Gqm->qm_nrefs == 0) {
                xfs_qm_destroy(xfs_Gqm);
                xfs_Gqm = NULL;
        }
-       XFS_QM_UNLOCK(xfs_Gqm);
+       mutex_unlock(&xfs_Gqm_lock);
 }
 
 /*
  * Just destroy the quotainfo structure.
  */
 void
-xfs_qm_unmount_quotadestroy(
-       xfs_mount_t     *mp)
+xfs_qm_unmount(
+       struct xfs_mount        *mp)
 {
-       if (mp->m_quotainfo)
+       if (mp->m_quotainfo) {
+               xfs_qm_dqpurge_all(mp, XFS_QMOPT_QUOTALL);
                xfs_qm_destroy_quotainfo(mp);
+       }
 }
 
 
@@ -304,16 +332,17 @@ xfs_qm_unmount_quotadestroy(
  * necessary data structures like quotainfo.  This is also responsible for
  * running a quotacheck as necessary.  We are guaranteed that the superblock
  * is consistently read in at this point.
+ *
+ * If we fail here, the mount will continue with quota turned off. We don't
+ * need to inidicate success or failure at all.
  */
-int
+void
 xfs_qm_mount_quotas(
-       xfs_mount_t     *mp,
-       int             mfsi_flags)
+       xfs_mount_t     *mp)
 {
        int             error = 0;
        uint            sbf;
 
-
        /*
         * If quotas on realtime volumes is not supported, we disable
         * quotas immediately.
@@ -332,7 +361,8 @@ xfs_qm_mount_quotas(
         * Allocate the quotainfo structure inside the mount struct, and
         * create quotainode(s), and change/rev superblock if necessary.
         */
-       if ((error = xfs_qm_init_quotainfo(mp))) {
+       error = xfs_qm_init_quotainfo(mp);
+       if (error) {
                /*
                 * We must turn off quotas.
                 */
@@ -343,13 +373,11 @@ xfs_qm_mount_quotas(
        /*
         * If any of the quotas are not consistent, do a quotacheck.
         */
-       if (XFS_QM_NEED_QUOTACHECK(mp) &&
-               !(mfsi_flags & XFS_MFSI_NO_QUOTACHECK)) {
-               if ((error = xfs_qm_quotacheck(mp))) {
-                       /* Quotacheck has failed and quotas have
-                        * been disabled.
-                        */
-                       return XFS_ERROR(error);
+       if (XFS_QM_NEED_QUOTACHECK(mp)) {
+               error = xfs_qm_quotacheck(mp);
+               if (error) {
+                       /* Quotacheck failed and disabled quotas. */
+                       return;
                }
        }
        /* 
@@ -357,12 +385,10 @@ xfs_qm_mount_quotas(
         * quotachecked status, since we won't be doing accounting for
         * that type anymore.
         */
-       if (!XFS_IS_UQUOTA_ON(mp)) {
+       if (!XFS_IS_UQUOTA_ON(mp))
                mp->m_qflags &= ~XFS_UQUOTA_CHKD;
-       }
-       if (!(XFS_IS_GQUOTA_ON(mp) || XFS_IS_PQUOTA_ON(mp))) {
+       if (!(XFS_IS_GQUOTA_ON(mp) || XFS_IS_PQUOTA_ON(mp)))
                mp->m_qflags &= ~XFS_OQUOTA_CHKD;
-       }
 
  write_changes:
        /*
@@ -391,20 +417,22 @@ xfs_qm_mount_quotas(
        if (error) {
                xfs_fs_cmn_err(CE_WARN, mp,
                        "Failed to initialize disk quotas.");
+               return;
        }
-       return XFS_ERROR(error);
+
+#ifdef QUOTADEBUG
+       if (XFS_IS_QUOTA_ON(mp))
+               xfs_qm_internalqcheck(mp);
+#endif
 }
 
 /*
  * Called from the vfsops layer.
  */
-int
+void
 xfs_qm_unmount_quotas(
        xfs_mount_t     *mp)
 {
-       xfs_inode_t     *uqp, *gqp;
-       int             error = 0;
-
        /*
         * Release the dquots that root inode, et al might be holding,
         * before we flush quotas and blow away the quotainfo structure.
@@ -417,43 +445,18 @@ xfs_qm_unmount_quotas(
                xfs_qm_dqdetach(mp->m_rsumip);
 
        /*
-        * Flush out the quota inodes.
+        * Release the quota inodes.
         */
-       uqp = gqp = NULL;
        if (mp->m_quotainfo) {
-               if ((uqp = mp->m_quotainfo->qi_uquotaip) != NULL) {
-                       xfs_ilock(uqp, XFS_ILOCK_EXCL);
-                       xfs_iflock(uqp);
-                       error = xfs_iflush(uqp, XFS_IFLUSH_SYNC);
-                       xfs_iunlock(uqp, XFS_ILOCK_EXCL);
-                       if (unlikely(error == EFSCORRUPTED)) {
-                               XFS_ERROR_REPORT("xfs_qm_unmount_quotas(1)",
-                                                XFS_ERRLEVEL_LOW, mp);
-                               goto out;
-                       }
+               if (mp->m_quotainfo->qi_uquotaip) {
+                       IRELE(mp->m_quotainfo->qi_uquotaip);
+                       mp->m_quotainfo->qi_uquotaip = NULL;
                }
-               if ((gqp = mp->m_quotainfo->qi_gquotaip) != NULL) {
-                       xfs_ilock(gqp, XFS_ILOCK_EXCL);
-                       xfs_iflock(gqp);
-                       error = xfs_iflush(gqp, XFS_IFLUSH_SYNC);
-                       xfs_iunlock(gqp, XFS_ILOCK_EXCL);
-                       if (unlikely(error == EFSCORRUPTED)) {
-                               XFS_ERROR_REPORT("xfs_qm_unmount_quotas(2)",
-                                                XFS_ERRLEVEL_LOW, mp);
-                               goto out;
-                       }
+               if (mp->m_quotainfo->qi_gquotaip) {
+                       IRELE(mp->m_quotainfo->qi_gquotaip);
+                       mp->m_quotainfo->qi_gquotaip = NULL;
                }
        }
-       if (uqp) {
-                XFS_PURGE_INODE(uqp);
-                mp->m_quotainfo->qi_uquotaip = NULL;
-       }
-       if (gqp) {
-               XFS_PURGE_INODE(gqp);
-               mp->m_quotainfo->qi_gquotaip = NULL;
-       }
-out:
-       return XFS_ERROR(error);
 }
 
 /*
@@ -462,29 +465,30 @@ out:
  */
 STATIC int
 xfs_qm_dqflush_all(
-       xfs_mount_t     *mp,
-       int             flags)
+       struct xfs_mount        *mp,
+       int                     sync_mode)
 {
-       int             recl;
-       xfs_dquot_t     *dqp;
-       int             niters;
-       int             error;
+       struct xfs_quotainfo    *q = mp->m_quotainfo;
+       int                     recl;
+       struct xfs_dquot        *dqp;
+       int                     niters;
+       int                     error;
 
-       if (mp->m_quotainfo == NULL)
+       if (!q)
                return 0;
        niters = 0;
 again:
-       xfs_qm_mplist_lock(mp);
-       FOREACH_DQUOT_IN_MP(dqp, mp) {
+       mutex_lock(&q->qi_dqlist_lock);
+       list_for_each_entry(dqp, &q->qi_dqlist, q_mplist) {
                xfs_dqlock(dqp);
                if (! XFS_DQ_IS_DIRTY(dqp)) {
                        xfs_dqunlock(dqp);
                        continue;
                }
-               xfs_dqtrace_entry(dqp, "FLUSHALL: DQDIRTY");
+
                /* XXX a sentinel would be better */
-               recl = XFS_QI_MPLRECLAIMS(mp);
-               if (! xfs_qm_dqflock_nowait(dqp)) {
+               recl = q->qi_dqreclaims;
+               if (!xfs_dqflock_nowait(dqp)) {
                        /*
                         * If we can't grab the flush lock then check
                         * to see if the dquot has been flushed delayed
@@ -498,21 +502,21 @@ again:
                 * Let go of the mplist lock. We don't want to hold it
                 * across a disk write.
                 */
-               xfs_qm_mplist_unlock(mp);
-               error = xfs_qm_dqflush(dqp, flags);
+               mutex_unlock(&q->qi_dqlist_lock);
+               error = xfs_qm_dqflush(dqp, sync_mode);
                xfs_dqunlock(dqp);
                if (error)
                        return error;
 
-               xfs_qm_mplist_lock(mp);
-               if (recl != XFS_QI_MPLRECLAIMS(mp)) {
-                       xfs_qm_mplist_unlock(mp);
+               mutex_lock(&q->qi_dqlist_lock);
+               if (recl != q->qi_dqreclaims) {
+                       mutex_unlock(&q->qi_dqlist_lock);
                        /* XXX restart limit */
                        goto again;
                }
        }
 
-       xfs_qm_mplist_unlock(mp);
+       mutex_unlock(&q->qi_dqlist_lock);
        /* return ! busy */
        return 0;
 }
@@ -522,15 +526,15 @@ again:
  */
 STATIC void
 xfs_qm_detach_gdquots(
-       xfs_mount_t     *mp)
+       struct xfs_mount        *mp)
 {
-       xfs_dquot_t     *dqp, *gdqp;
-       int             nrecl;
+       struct xfs_quotainfo    *q = mp->m_quotainfo;
+       struct xfs_dquot        *dqp, *gdqp;
+       int                     nrecl;
 
  again:
-       ASSERT(XFS_QM_IS_MPLIST_LOCKED(mp));
-       dqp = XFS_QI_MPLNEXT(mp);
-       while (dqp) {
+       ASSERT(mutex_is_locked(&q->qi_dqlist_lock));
+       list_for_each_entry(dqp, &q->qi_dqlist, q_mplist) {
                xfs_dqlock(dqp);
                if ((gdqp = dqp->q_gdquot)) {
                        xfs_dqlock(gdqp);
@@ -543,15 +547,14 @@ xfs_qm_detach_gdquots(
                         * Can't hold the mplist lock across a dqput.
                         * XXXmust convert to marker based iterations here.
                         */
-                       nrecl = XFS_QI_MPLRECLAIMS(mp);
-                       xfs_qm_mplist_unlock(mp);
+                       nrecl = q->qi_dqreclaims;
+                       mutex_unlock(&q->qi_dqlist_lock);
                        xfs_qm_dqput(gdqp);
 
-                       xfs_qm_mplist_lock(mp);
-                       if (nrecl != XFS_QI_MPLRECLAIMS(mp))
+                       mutex_lock(&q->qi_dqlist_lock);
+                       if (nrecl != q->qi_dqreclaims)
                                goto again;
                }
-               dqp = dqp->MPL_NEXT;
        }
 }
 
@@ -563,23 +566,23 @@ xfs_qm_detach_gdquots(
  */
 STATIC int
 xfs_qm_dqpurge_int(
-       xfs_mount_t     *mp,
-       uint            flags) /* QUOTAOFF/UMOUNTING/UQUOTA/PQUOTA/GQUOTA */
+       struct xfs_mount        *mp,
+       uint                    flags)
 {
-       xfs_dquot_t     *dqp;
-       uint            dqtype;
-       int             nrecl;
-       xfs_dquot_t     *nextdqp;
-       int             nmisses;
+       struct xfs_quotainfo    *q = mp->m_quotainfo;
+       struct xfs_dquot        *dqp, *n;
+       uint                    dqtype;
+       int                     nrecl;
+       int                     nmisses;
 
-       if (mp->m_quotainfo == NULL)
+       if (!q)
                return 0;
 
        dqtype = (flags & XFS_QMOPT_UQUOTA) ? XFS_DQ_USER : 0;
        dqtype |= (flags & XFS_QMOPT_PQUOTA) ? XFS_DQ_PROJ : 0;
        dqtype |= (flags & XFS_QMOPT_GQUOTA) ? XFS_DQ_GROUP : 0;
 
-       xfs_qm_mplist_lock(mp);
+       mutex_lock(&q->qi_dqlist_lock);
 
        /*
         * In the first pass through all incore dquots of this filesystem,
@@ -591,28 +594,25 @@ xfs_qm_dqpurge_int(
 
       again:
        nmisses = 0;
-       ASSERT(XFS_QM_IS_MPLIST_LOCKED(mp));
+       ASSERT(mutex_is_locked(&q->qi_dqlist_lock));
        /*
         * Try to get rid of all of the unwanted dquots. The idea is to
         * get them off mplist and hashlist, but leave them on freelist.
         */
-       dqp = XFS_QI_MPLNEXT(mp);
-       while (dqp) {
+       list_for_each_entry_safe(dqp, n, &q->qi_dqlist, q_mplist) {
                /*
                 * It's OK to look at the type without taking dqlock here.
                 * We're holding the mplist lock here, and that's needed for
                 * a dqreclaim.
                 */
-               if ((dqp->dq_flags & dqtype) == 0) {
-                       dqp = dqp->MPL_NEXT;
+               if ((dqp->dq_flags & dqtype) == 0)
                        continue;
-               }
 
-               if (! xfs_qm_dqhashlock_nowait(dqp)) {
-                       nrecl = XFS_QI_MPLRECLAIMS(mp);
-                       xfs_qm_mplist_unlock(mp);
-                       XFS_DQ_HASH_LOCK(dqp->q_hash);
-                       xfs_qm_mplist_lock(mp);
+               if (!mutex_trylock(&dqp->q_hash->qh_lock)) {
+                       nrecl = q->qi_dqreclaims;
+                       mutex_unlock(&q->qi_dqlist_lock);
+                       mutex_lock(&dqp->q_hash->qh_lock);
+                       mutex_lock(&q->qi_dqlist_lock);
 
                        /*
                         * XXXTheoretically, we can get into a very long
@@ -620,8 +620,8 @@ xfs_qm_dqpurge_int(
                         * No one can be adding dquots to the mplist at
                         * this point, but somebody might be taking things off.
                         */
-                       if (nrecl != XFS_QI_MPLRECLAIMS(mp)) {
-                               XFS_DQ_HASH_UNLOCK(dqp->q_hash);
+                       if (nrecl != q->qi_dqreclaims) {
+                               mutex_unlock(&dqp->q_hash->qh_lock);
                                goto again;
                        }
                }
@@ -630,11 +630,9 @@ xfs_qm_dqpurge_int(
                 * Take the dquot off the mplist and hashlist. It may remain on
                 * freelist in INACTIVE state.
                 */
-               nextdqp = dqp->MPL_NEXT;
-               nmisses += xfs_qm_dqpurge(dqp, flags);
-               dqp = nextdqp;
+               nmisses += xfs_qm_dqpurge(dqp);
        }
-       xfs_qm_mplist_unlock(mp);
+       mutex_unlock(&q->qi_dqlist_lock);
        return nmisses;
 }
 
@@ -663,25 +661,24 @@ xfs_qm_dqattach_one(
        xfs_dqid_t      id,
        uint            type,
        uint            doalloc,
-       uint            dolock,
        xfs_dquot_t     *udqhint, /* hint */
        xfs_dquot_t     **IO_idqpp)
 {
        xfs_dquot_t     *dqp;
        int             error;
 
-       ASSERT(XFS_ISLOCKED_INODE_EXCL(ip));
+       ASSERT(xfs_isilocked(ip, XFS_ILOCK_EXCL));
        error = 0;
+
        /*
         * See if we already have it in the inode itself. IO_idqpp is
         * &i_udquot or &i_gdquot. This made the code look weird, but
         * made the logic a lot simpler.
         */
-       if ((dqp = *IO_idqpp)) {
-               if (dolock)
-                       xfs_dqlock(dqp);
-               xfs_dqtrace_entry(dqp, "DQATTACH: found in ip");
-               goto done;
+       dqp = *IO_idqpp;
+       if (dqp) {
+               trace_xfs_dqattach_found(dqp);
+               return 0;
        }
 
        /*
@@ -690,38 +687,38 @@ xfs_qm_dqattach_one(
         * lookup by dqid (xfs_qm_dqget) by caching a group dquot inside
         * the user dquot.
         */
-       ASSERT(!udqhint || type == XFS_DQ_GROUP || type == XFS_DQ_PROJ);
-       if (udqhint && !dolock)
+       if (udqhint) {
+               ASSERT(type == XFS_DQ_GROUP || type == XFS_DQ_PROJ);
                xfs_dqlock(udqhint);
 
-       /*
-        * No need to take dqlock to look at the id.
-        * The ID can't change until it gets reclaimed, and it won't
-        * be reclaimed as long as we have a ref from inode and we hold
-        * the ilock.
-        */
-       if (udqhint &&
-           (dqp = udqhint->q_gdquot) &&
-           (be32_to_cpu(dqp->q_core.d_id) == id)) {
-               ASSERT(XFS_DQ_IS_LOCKED(udqhint));
-               xfs_dqlock(dqp);
-               XFS_DQHOLD(dqp);
-               ASSERT(*IO_idqpp == NULL);
-               *IO_idqpp = dqp;
-               if (!dolock) {
+               /*
+                * No need to take dqlock to look at the id.
+                *
+                * The ID can't change until it gets reclaimed, and it won't
+                * be reclaimed as long as we have a ref from inode and we
+                * hold the ilock.
+                */
+               dqp = udqhint->q_gdquot;
+               if (dqp && be32_to_cpu(dqp->q_core.d_id) == id) {
+                       xfs_dqlock(dqp);
+                       XFS_DQHOLD(dqp);
+                       ASSERT(*IO_idqpp == NULL);
+                       *IO_idqpp = dqp;
+
                        xfs_dqunlock(dqp);
                        xfs_dqunlock(udqhint);
+                       return 0;
                }
-               goto done;
-       }
-       /*
-        * We can't hold a dquot lock when we call the dqget code.
-        * We'll deadlock in no time, because of (not conforming to)
-        * lock ordering - the inodelock comes before any dquot lock,
-        * and we may drop and reacquire the ilock in xfs_qm_dqget().
-        */
-       if (udqhint)
+
+               /*
+                * We can't hold a dquot lock when we call the dqget code.
+                * We'll deadlock in no time, because of (not conforming to)
+                * lock ordering - the inodelock comes before any dquot lock,
+                * and we may drop and reacquire the ilock in xfs_qm_dqget().
+                */
                xfs_dqunlock(udqhint);
+       }
+
        /*
         * Find the dquot from somewhere. This bumps the
         * reference count of dquot and returns it locked.
@@ -729,48 +726,19 @@ xfs_qm_dqattach_one(
         * disk and we didn't ask it to allocate;
         * ESRCH if quotas got turned off suddenly.
         */
-       if ((error = xfs_qm_dqget(ip->i_mount, ip, id, type,
-                                doalloc|XFS_QMOPT_DOWARN, &dqp))) {
-               if (udqhint && dolock)
-                       xfs_dqlock(udqhint);
-               goto done;
-       }
+       error = xfs_qm_dqget(ip->i_mount, ip, id, type, XFS_QMOPT_DOWARN, &dqp);
+       if (error)
+               return error;
+
+       trace_xfs_dqattach_get(dqp);
 
-       xfs_dqtrace_entry(dqp, "DQATTACH: found by dqget");
        /*
         * dqget may have dropped and re-acquired the ilock, but it guarantees
         * that the dquot returned is the one that should go in the inode.
         */
        *IO_idqpp = dqp;
-       ASSERT(dqp);
-       ASSERT(XFS_DQ_IS_LOCKED(dqp));
-       if (! dolock) {
-               xfs_dqunlock(dqp);
-               goto done;
-       }
-       if (! udqhint)
-               goto done;
-
-       ASSERT(udqhint);
-       ASSERT(dolock);
-       ASSERT(XFS_DQ_IS_LOCKED(dqp));
-       if (! xfs_qm_dqlock_nowait(udqhint)) {
-               xfs_dqunlock(dqp);
-               xfs_dqlock(udqhint);
-               xfs_dqlock(dqp);
-       }
-      done:
-#ifdef QUOTADEBUG
-       if (udqhint) {
-               if (dolock)
-                       ASSERT(XFS_DQ_IS_LOCKED(udqhint));
-       }
-       if (! error) {
-               if (dolock)
-                       ASSERT(XFS_DQ_IS_LOCKED(dqp));
-       }
-#endif
-       return error;
+       xfs_dqunlock(dqp);
+       return 0;
 }
 
 
@@ -785,24 +753,15 @@ xfs_qm_dqattach_one(
 STATIC void
 xfs_qm_dqattach_grouphint(
        xfs_dquot_t     *udq,
-       xfs_dquot_t     *gdq,
-       uint            locked)
+       xfs_dquot_t     *gdq)
 {
        xfs_dquot_t     *tmp;
 
-#ifdef QUOTADEBUG
-       if (locked) {
-               ASSERT(XFS_DQ_IS_LOCKED(udq));
-               ASSERT(XFS_DQ_IS_LOCKED(gdq));
-       }
-#endif
-       if (! locked)
-               xfs_dqlock(udq);
+       xfs_dqlock(udq);
 
        if ((tmp = udq->q_gdquot)) {
                if (tmp == gdq) {
-                       if (! locked)
-                               xfs_dqunlock(udq);
+                       xfs_dqunlock(udq);
                        return;
                }
 
@@ -812,8 +771,6 @@ xfs_qm_dqattach_grouphint(
                 * because the freelist lock comes before dqlocks.
                 */
                xfs_dqunlock(udq);
-               if (locked)
-                       xfs_dqunlock(gdq);
                /*
                 * we took a hard reference once upon a time in dqget,
                 * so give it back when the udquot no longer points at it
@@ -826,9 +783,7 @@ xfs_qm_dqattach_grouphint(
 
        } else {
                ASSERT(XFS_DQ_IS_LOCKED(udq));
-               if (! locked) {
-                       xfs_dqlock(gdq);
-               }
+               xfs_dqlock(gdq);
        }
 
        ASSERT(XFS_DQ_IS_LOCKED(udq));
@@ -841,10 +796,9 @@ xfs_qm_dqattach_grouphint(
                XFS_DQHOLD(gdq);
                udq->q_gdquot = gdq;
        }
-       if (! locked) {
-               xfs_dqunlock(gdq);
-               xfs_dqunlock(udq);
-       }
+
+       xfs_dqunlock(gdq);
+       xfs_dqunlock(udq);
 }
 
 
@@ -852,14 +806,11 @@ xfs_qm_dqattach_grouphint(
  * Given a locked inode, attach dquot(s) to it, taking U/G/P-QUOTAON
  * into account.
  * If XFS_QMOPT_DQALLOC, the dquot(s) will be allocated if needed.
- * If XFS_QMOPT_DQLOCK, the dquot(s) will be returned locked. This option pretty
- * much made this code a complete mess, but it has been pretty useful.
- * If XFS_QMOPT_ILOCKED, then inode sent is already locked EXCL.
  * Inode may get unlocked and relocked in here, and the caller must deal with
  * the consequences.
  */
 int
-xfs_qm_dqattach(
+xfs_qm_dqattach_locked(
        xfs_inode_t     *ip,
        uint            flags)
 {
@@ -867,37 +818,32 @@ xfs_qm_dqattach(
        uint            nquotas = 0;
        int             error = 0;
 
-       if ((! XFS_IS_QUOTA_ON(mp)) ||
-           (! XFS_NOT_DQATTACHED(mp, ip)) ||
-           (ip->i_ino == mp->m_sb.sb_uquotino) ||
-           (ip->i_ino == mp->m_sb.sb_gquotino))
+       if (!XFS_IS_QUOTA_RUNNING(mp) ||
+           !XFS_IS_QUOTA_ON(mp) ||
+           !XFS_NOT_DQATTACHED(mp, ip) ||
+           ip->i_ino == mp->m_sb.sb_uquotino ||
+           ip->i_ino == mp->m_sb.sb_gquotino)
                return 0;
 
-       ASSERT((flags & XFS_QMOPT_ILOCKED) == 0 ||
-              XFS_ISLOCKED_INODE_EXCL(ip));
-
-       if (! (flags & XFS_QMOPT_ILOCKED))
-               xfs_ilock(ip, XFS_ILOCK_EXCL);
+       ASSERT(xfs_isilocked(ip, XFS_ILOCK_EXCL));
 
        if (XFS_IS_UQUOTA_ON(mp)) {
                error = xfs_qm_dqattach_one(ip, ip->i_d.di_uid, XFS_DQ_USER,
                                                flags & XFS_QMOPT_DQALLOC,
-                                               flags & XFS_QMOPT_DQLOCK,
                                                NULL, &ip->i_udquot);
                if (error)
                        goto done;
                nquotas++;
        }
-       ASSERT(XFS_ISLOCKED_INODE_EXCL(ip));
+
+       ASSERT(xfs_isilocked(ip, XFS_ILOCK_EXCL));
        if (XFS_IS_OQUOTA_ON(mp)) {
                error = XFS_IS_GQUOTA_ON(mp) ?
                        xfs_qm_dqattach_one(ip, ip->i_d.di_gid, XFS_DQ_GROUP,
                                                flags & XFS_QMOPT_DQALLOC,
-                                               flags & XFS_QMOPT_DQLOCK,
                                                ip->i_udquot, &ip->i_gdquot) :
                        xfs_qm_dqattach_one(ip, ip->i_d.di_projid, XFS_DQ_PROJ,
                                                flags & XFS_QMOPT_DQALLOC,
-                                               flags & XFS_QMOPT_DQLOCK,
                                                ip->i_udquot, &ip->i_gdquot);
                /*
                 * Don't worry about the udquot that we may have
@@ -913,7 +859,7 @@ xfs_qm_dqattach(
         * This WON'T, in general, result in a thrash.
         */
        if (nquotas == 2) {
-               ASSERT(XFS_ISLOCKED_INODE_EXCL(ip));
+               ASSERT(xfs_isilocked(ip, XFS_ILOCK_EXCL));
                ASSERT(ip->i_udquot);
                ASSERT(ip->i_gdquot);
 
@@ -928,36 +874,33 @@ xfs_qm_dqattach(
                /*
                 * Attach i_gdquot to the gdquot hint inside the i_udquot.
                 */
-               xfs_qm_dqattach_grouphint(ip->i_udquot, ip->i_gdquot,
-                                        flags & XFS_QMOPT_DQLOCK);
+               xfs_qm_dqattach_grouphint(ip->i_udquot, ip->i_gdquot);
        }
 
-      done:
-
+ done:
 #ifdef QUOTADEBUG
        if (! error) {
-               if (ip->i_udquot) {
-                       if (flags & XFS_QMOPT_DQLOCK)
-                               ASSERT(XFS_DQ_IS_LOCKED(ip->i_udquot));
-               }
-               if (ip->i_gdquot) {
-                       if (flags & XFS_QMOPT_DQLOCK)
-                               ASSERT(XFS_DQ_IS_LOCKED(ip->i_gdquot));
-               }
                if (XFS_IS_UQUOTA_ON(mp))
                        ASSERT(ip->i_udquot);
                if (XFS_IS_OQUOTA_ON(mp))
                        ASSERT(ip->i_gdquot);
        }
+       ASSERT(xfs_isilocked(ip, XFS_ILOCK_EXCL));
 #endif
+       return error;
+}
 
-       if (! (flags & XFS_QMOPT_ILOCKED))
-               xfs_iunlock(ip, XFS_ILOCK_EXCL);
+int
+xfs_qm_dqattach(
+       struct xfs_inode        *ip,
+       uint                    flags)
+{
+       int                     error;
+
+       xfs_ilock(ip, XFS_ILOCK_EXCL);
+       error = xfs_qm_dqattach_locked(ip, flags);
+       xfs_iunlock(ip, XFS_ILOCK_EXCL);
 
-#ifdef QUOTADEBUG
-       else
-               ASSERT(XFS_ISLOCKED_INODE_EXCL(ip));
-#endif
        return error;
 }
 
@@ -973,79 +916,58 @@ xfs_qm_dqdetach(
        if (!(ip->i_udquot || ip->i_gdquot))
                return;
 
+       trace_xfs_dquot_dqdetach(ip);
+
        ASSERT(ip->i_ino != ip->i_mount->m_sb.sb_uquotino);
        ASSERT(ip->i_ino != ip->i_mount->m_sb.sb_gquotino);
        if (ip->i_udquot) {
-               xfs_dqtrace_entry_ino(ip->i_udquot, "DQDETTACH", ip);
                xfs_qm_dqrele(ip->i_udquot);
                ip->i_udquot = NULL;
        }
        if (ip->i_gdquot) {
-               xfs_dqtrace_entry_ino(ip->i_gdquot, "DQDETTACH", ip);
                xfs_qm_dqrele(ip->i_gdquot);
                ip->i_gdquot = NULL;
        }
 }
 
-/*
- * This is called by VFS_SYNC and flags arg determines the caller,
- * and its motives, as done in xfs_sync.
- *
- * vfs_sync: SYNC_FSDATA|SYNC_ATTR|SYNC_BDFLUSH 0x31
- * syscall sync: SYNC_FSDATA|SYNC_ATTR|SYNC_DELWRI 0x25
- * umountroot : SYNC_WAIT | SYNC_CLOSE | SYNC_ATTR | SYNC_FSDATA
- */
-
 int
 xfs_qm_sync(
-       xfs_mount_t     *mp,
-       int             flags)
+       struct xfs_mount        *mp,
+       int                     flags)
 {
-       int             recl, restarts;
-       xfs_dquot_t     *dqp;
-       uint            flush_flags;
-       boolean_t       nowait;
-       int             error;
+       struct xfs_quotainfo    *q = mp->m_quotainfo;
+       int                     recl, restarts;
+       struct xfs_dquot        *dqp;
+       int                     error;
 
-       if (! XFS_IS_QUOTA_ON(mp))
+       if (!XFS_IS_QUOTA_RUNNING(mp) || !XFS_IS_QUOTA_ON(mp))
                return 0;
 
        restarts = 0;
-       /*
-        * We won't block unless we are asked to.
-        */
-       nowait = (boolean_t)(flags & SYNC_BDFLUSH || (flags & SYNC_WAIT) == 0);
 
   again:
-       xfs_qm_mplist_lock(mp);
+       mutex_lock(&q->qi_dqlist_lock);
        /*
         * dqpurge_all() also takes the mplist lock and iterate thru all dquots
         * in quotaoff. However, if the QUOTA_ACTIVE bits are not cleared
         * when we have the mplist lock, we know that dquots will be consistent
         * as long as we have it locked.
         */
-       if (! XFS_IS_QUOTA_ON(mp)) {
-               xfs_qm_mplist_unlock(mp);
+       if (!XFS_IS_QUOTA_ON(mp)) {
+               mutex_unlock(&q->qi_dqlist_lock);
                return 0;
        }
-       FOREACH_DQUOT_IN_MP(dqp, mp) {
+       ASSERT(mutex_is_locked(&q->qi_dqlist_lock));
+       list_for_each_entry(dqp, &q->qi_dqlist, q_mplist) {
                /*
                 * If this is vfs_sync calling, then skip the dquots that
                 * don't 'seem' to be dirty. ie. don't acquire dqlock.
                 * This is very similar to what xfs_sync does with inodes.
                 */
-               if (flags & SYNC_BDFLUSH) {
-                       if (! XFS_DQ_IS_DIRTY(dqp))
+               if (flags & SYNC_TRYLOCK) {
+                       if (!XFS_DQ_IS_DIRTY(dqp))
                                continue;
-               }
-
-               if (nowait) {
-                       /*
-                        * Try to acquire the dquot lock. We are NOT out of
-                        * lock order, but we just don't want to wait for this
-                        * lock, unless somebody wanted us to.
-                        */
-                       if (! xfs_qm_dqlock_nowait(dqp))
+                       if (!xfs_qm_dqlock_nowait(dqp))
                                continue;
                } else {
                        xfs_dqlock(dqp);
@@ -1060,9 +982,9 @@ xfs_qm_sync(
                }
 
                /* XXX a sentinel would be better */
-               recl = XFS_QI_MPLRECLAIMS(mp);
-               if (! xfs_qm_dqflock_nowait(dqp)) {
-                       if (nowait) {
+               recl = q->qi_dqreclaims;
+               if (!xfs_dqflock_nowait(dqp)) {
+                       if (flags & SYNC_TRYLOCK) {
                                xfs_dqunlock(dqp);
                                continue;
                        }
@@ -1080,30 +1002,35 @@ xfs_qm_sync(
                 * Let go of the mplist lock. We don't want to hold it
                 * across a disk write
                 */
-               flush_flags = (nowait) ? XFS_QMOPT_DELWRI : XFS_QMOPT_SYNC;
-               xfs_qm_mplist_unlock(mp);
-               xfs_dqtrace_entry(dqp, "XQM_SYNC: DQFLUSH");
-               error = xfs_qm_dqflush(dqp, flush_flags);
+               mutex_unlock(&q->qi_dqlist_lock);
+               error = xfs_qm_dqflush(dqp, flags);
                xfs_dqunlock(dqp);
                if (error && XFS_FORCED_SHUTDOWN(mp))
                        return 0;       /* Need to prevent umount failure */
                else if (error)
                        return error;
 
-               xfs_qm_mplist_lock(mp);
-               if (recl != XFS_QI_MPLRECLAIMS(mp)) {
+               mutex_lock(&q->qi_dqlist_lock);
+               if (recl != q->qi_dqreclaims) {
                        if (++restarts >= XFS_QM_SYNC_MAX_RESTARTS)
                                break;
 
-                       xfs_qm_mplist_unlock(mp);
+                       mutex_unlock(&q->qi_dqlist_lock);
                        goto again;
                }
        }
 
-       xfs_qm_mplist_unlock(mp);
+       mutex_unlock(&q->qi_dqlist_lock);
        return 0;
 }
 
+/*
+ * The hash chains and the mplist use the same xfs_dqhash structure as
+ * their list head, but we can take the mplist qh_lock and one of the
+ * hash qh_locks at the same time without any problem as they aren't
+ * related.
+ */
+static struct lock_class_key xfs_quota_mplist_class;
 
 /*
  * This initializes all the quota information that's kept in the
@@ -1133,13 +1060,15 @@ xfs_qm_init_quotainfo(
         * and change the superblock accordingly.
         */
        if ((error = xfs_qm_init_quotainos(mp))) {
-               kmem_free(qinf, sizeof(xfs_quotainfo_t));
+               kmem_free(qinf);
                mp->m_quotainfo = NULL;
                return error;
        }
 
-       spin_lock_init(&qinf->qi_pinlock);
-       xfs_qm_list_init(&qinf->qi_dqlist, "mpdqlist", 0);
+       INIT_LIST_HEAD(&qinf->qi_dqlist);
+       mutex_init(&qinf->qi_dqlist_lock);
+       lockdep_set_class(&qinf->qi_dqlist_lock, &xfs_quota_mplist_class);
+
        qinf->qi_dqreclaims = 0;
 
        /* mutex used to serialize quotaoffs */
@@ -1235,19 +1164,19 @@ xfs_qm_destroy_quotainfo(
         */
        xfs_qm_rele_quotafs_ref(mp);
 
-       spinlock_destroy(&qi->qi_pinlock);
-       xfs_qm_list_destroy(&qi->qi_dqlist);
+       ASSERT(list_empty(&qi->qi_dqlist));
+       mutex_destroy(&qi->qi_dqlist_lock);
 
        if (qi->qi_uquotaip) {
-               XFS_PURGE_INODE(qi->qi_uquotaip);
+               IRELE(qi->qi_uquotaip);
                qi->qi_uquotaip = NULL; /* paranoia */
        }
        if (qi->qi_gquotaip) {
-               XFS_PURGE_INODE(qi->qi_gquotaip);
+               IRELE(qi->qi_gquotaip);
                qi->qi_gquotaip = NULL;
        }
        mutex_destroy(&qi->qi_quotaofflock);
-       kmem_free(qi, sizeof(xfs_quotainfo_t));
+       kmem_free(qi);
        mp->m_quotainfo = NULL;
 }
 
@@ -1263,7 +1192,7 @@ xfs_qm_list_init(
        int             n)
 {
        mutex_init(&list->qh_lock);
-       list->qh_next = NULL;
+       INIT_LIST_HEAD(&list->qh_list);
        list->qh_version = 0;
        list->qh_nelems = 0;
 }
@@ -1291,7 +1220,7 @@ xfs_qm_dqget_noattach(
        xfs_mount_t     *mp;
        xfs_dquot_t     *udqp, *gdqp;
 
-       ASSERT(XFS_ISLOCKED_INODE_EXCL(ip));
+       ASSERT(xfs_isilocked(ip, XFS_ILOCK_EXCL));
        mp = ip->i_mount;
        udqp = NULL;
        gdqp = NULL;
@@ -1392,8 +1321,8 @@ xfs_qm_qino_alloc(
         * Keep an extra reference to this quota inode. This inode is
         * locked exclusively and joined to the transaction already.
         */
-       ASSERT(XFS_ISLOCKED_INODE_EXCL(*ip));
-       VN_HOLD(XFS_ITOV((*ip)));
+       ASSERT(xfs_isilocked(*ip, XFS_ILOCK_EXCL));
+       IHOLD(*ip);
 
        /*
         * Make the changes in the superblock, and log those too.
@@ -1402,9 +1331,6 @@ xfs_qm_qino_alloc(
         */
        spin_lock(&mp->m_sb_lock);
        if (flags & XFS_QMOPT_SBVERSION) {
-#if defined(DEBUG) && defined(XFS_LOUD_RECOVERY)
-               unsigned oldv = mp->m_sb.sb_versionnum;
-#endif
                ASSERT(!xfs_sb_version_hasquota(&mp->m_sb));
                ASSERT((sbfields & (XFS_SB_VERSIONNUM | XFS_SB_UQUOTINO |
                                   XFS_SB_GQUOTINO | XFS_SB_QFLAGS)) ==
@@ -1417,11 +1343,6 @@ xfs_qm_qino_alloc(
 
                /* qflags will get updated _after_ quotacheck */
                mp->m_sb.sb_qflags = 0;
-#if defined(DEBUG) && defined(XFS_LOUD_RECOVERY)
-               cmn_err(CE_NOTE,
-                       "Old superblock version %x, converting to %x.",
-                       oldv, mp->m_sb.sb_versionnum);
-#endif
        }
        if (flags & XFS_QMOPT_UQUOTA)
                mp->m_sb.sb_uquotino = (*ip)->i_ino;
@@ -1438,7 +1359,7 @@ xfs_qm_qino_alloc(
 }
 
 
-STATIC int
+STATIC void
 xfs_qm_reset_dqcounts(
        xfs_mount_t     *mp,
        xfs_buf_t       *bp,
@@ -1448,7 +1369,8 @@ xfs_qm_reset_dqcounts(
        xfs_disk_dquot_t        *ddq;
        int                     j;
 
-       xfs_buftrace("RESET DQUOTS", bp);
+       trace_xfs_reset_dqcounts(bp, _RET_IP_);
+
        /*
         * Reset all counters and timers. They'll be
         * started afresh by xfs_qm_quotacheck.
@@ -1456,10 +1378,10 @@ xfs_qm_reset_dqcounts(
 #ifdef DEBUG
        j = XFS_FSB_TO_B(mp, XFS_DQUOT_CLUSTER_SIZE_FSB);
        do_div(j, sizeof(xfs_dqblk_t));
-       ASSERT(XFS_QM_DQPERBLK(mp) == j);
+       ASSERT(mp->m_quotainfo->qi_dqperchunk == j);
 #endif
        ddq = (xfs_disk_dquot_t *)XFS_BUF_PTR(bp);
-       for (j = 0; j < XFS_QM_DQPERBLK(mp); j++) {
+       for (j = 0; j < mp->m_quotainfo->qi_dqperchunk; j++) {
                /*
                 * Do a sanity check, and if needed, repair the dqblk. Don't
                 * output any warnings because it's perfectly possible to
@@ -1478,8 +1400,6 @@ xfs_qm_reset_dqcounts(
                ddq->d_rtbwarns = 0;
                ddq = (xfs_disk_dquot_t *) ((xfs_dqblk_t *)ddq + 1);
        }
-
-       return 0;
 }
 
 STATIC int
@@ -1516,17 +1436,17 @@ xfs_qm_dqiter_bufs(
        while (blkcnt--) {
                error = xfs_trans_read_buf(mp, NULL, mp->m_ddev_targp,
                              XFS_FSB_TO_DADDR(mp, bno),
-                             (int)XFS_QI_DQCHUNKLEN(mp), 0, &bp);
+                             mp->m_quotainfo->qi_dqchunklen, 0, &bp);
                if (error)
                        break;
 
-               (void) xfs_qm_reset_dqcounts(mp, bp, firstid, type);
+               xfs_qm_reset_dqcounts(mp, bp, firstid, type);
                xfs_bdwrite(mp, bp);
                /*
                 * goto the next block.
                 */
                bno++;
-               firstid += XFS_QM_DQPERBLK(mp);
+               firstid += mp->m_quotainfo->qi_dqperchunk;
        }
        return error;
 }
@@ -1592,7 +1512,7 @@ xfs_qm_dqiterate(
                                continue;
 
                        firstid = (xfs_dqid_t) map[i].br_startoff *
-                               XFS_QM_DQPERBLK(mp);
+                               mp->m_quotainfo->qi_dqperchunk;
                        /*
                         * Do a read-ahead on the next extent.
                         */
@@ -1603,7 +1523,7 @@ xfs_qm_dqiterate(
                                while (rablkcnt--) {
                                        xfs_baread(mp->m_ddev_targp,
                                               XFS_FSB_TO_DADDR(mp, rablkno),
-                                              (int)XFS_QI_DQCHUNKLEN(mp));
+                                              mp->m_quotainfo->qi_dqchunklen);
                                        rablkno++;
                                }
                        }
@@ -1624,7 +1544,7 @@ xfs_qm_dqiterate(
                        break;
        } while (nmaps > 0);
 
-       kmem_free(map, XFS_DQITER_MAP_SIZE * sizeof(*map));
+       kmem_free(map);
 
        return error;
 }
@@ -1643,7 +1563,9 @@ xfs_qm_quotacheck_dqadjust(
        xfs_qcnt_t              rtblks)
 {
        ASSERT(XFS_DQ_IS_LOCKED(dqp));
-       xfs_dqtrace_entry(dqp, "QCHECK DQADJUST");
+
+       trace_xfs_dqadjust(dqp);
+
        /*
         * Adjust the inode count and the block count to reflect this inode's
         * resource usage.
@@ -1661,8 +1583,10 @@ xfs_qm_quotacheck_dqadjust(
 
        /*
         * Set default limits, adjust timers (since we changed usages)
+        *
+        * There are no timers for the default values set in the root dquot.
         */
-       if (! XFS_IS_SUSER_DQUOT(dqp)) {
+       if (dqp->q_core.d_id) {
                xfs_qm_adjust_dqlimits(dqp->q_mount, &dqp->q_core);
                xfs_qm_adjust_dqtimers(dqp->q_mount, &dqp->q_core);
        }
@@ -1739,12 +1663,6 @@ xfs_qm_dqusage_adjust(
                return error;
        }
 
-       if (ip->i_d.di_mode == 0) {
-               xfs_iput_new(ip, XFS_ILOCK_EXCL);
-               *res = BULKSTAT_RV_NOTHING;
-               return XFS_ERROR(ENOENT);
-       }
-
        /*
         * Obtain the locked dquots. In case of an error (eg. allocation
         * fails for ENOSPC), we return the negative of the error number
@@ -1838,14 +1756,14 @@ xfs_qm_quotacheck(
        lastino = 0;
        flags = 0;
 
-       ASSERT(XFS_QI_UQIP(mp) || XFS_QI_GQIP(mp));
+       ASSERT(mp->m_quotainfo->qi_uquotaip || mp->m_quotainfo->qi_gquotaip);
        ASSERT(XFS_IS_QUOTA_RUNNING(mp));
 
        /*
         * There should be no cached dquots. The (simplistic) quotacheck
         * algorithm doesn't like that.
         */
-       ASSERT(XFS_QI_MPLNDQUOTS(mp) == 0);
+       ASSERT(list_empty(&mp->m_quotainfo->qi_dqlist));
 
        cmn_err(CE_NOTE, "XFS quotacheck %s: Please wait.", mp->m_fsname);
 
@@ -1854,15 +1772,19 @@ xfs_qm_quotacheck(
         * their counters to zero. We need a clean slate.
         * We don't log our changes till later.
         */
-       if ((uip = XFS_QI_UQIP(mp))) {
-               if ((error = xfs_qm_dqiterate(mp, uip, XFS_QMOPT_UQUOTA)))
+       uip = mp->m_quotainfo->qi_uquotaip;
+       if (uip) {
+               error = xfs_qm_dqiterate(mp, uip, XFS_QMOPT_UQUOTA);
+               if (error)
                        goto error_return;
                flags |= XFS_UQUOTA_CHKD;
        }
 
-       if ((gip = XFS_QI_GQIP(mp))) {
-               if ((error = xfs_qm_dqiterate(mp, gip, XFS_IS_GQUOTA_ON(mp) ?
-                                       XFS_QMOPT_GQUOTA : XFS_QMOPT_PQUOTA)))
+       gip = mp->m_quotainfo->qi_gquotaip;
+       if (gip) {
+               error = xfs_qm_dqiterate(mp, gip, XFS_IS_GQUOTA_ON(mp) ?
+                                       XFS_QMOPT_GQUOTA : XFS_QMOPT_PQUOTA);
+               if (error)
                        goto error_return;
                flags |= XFS_OQUOTA_CHKD;
        }
@@ -1880,6 +1802,14 @@ xfs_qm_quotacheck(
        } while (! done);
 
        /*
+        * We've made all the changes that we need to make incore.
+        * Flush them down to disk buffers if everything was updated
+        * successfully.
+        */
+       if (!error)
+               error = xfs_qm_dqflush_all(mp, 0);
+
+       /*
         * We can get this error if we couldn't do a dquot allocation inside
         * xfs_qm_dqusage_adjust (via bulkstat). We don't care about the
         * dirty dquots that might be cached, we just want to get rid of them
@@ -1887,14 +1817,9 @@ xfs_qm_quotacheck(
         * at this point (because we intentionally didn't in dqget_noattach).
         */
        if (error) {
-               xfs_qm_dqpurge_all(mp, XFS_QMOPT_QUOTALL | XFS_QMOPT_QUOTAOFF);
+               xfs_qm_dqpurge_all(mp, XFS_QMOPT_QUOTALL);
                goto error_return;
        }
-       /*
-        * We've made all the changes that we need to make incore.
-        * Now flush_them down to disk buffers.
-        */
-       xfs_qm_dqflush_all(mp, XFS_QMOPT_DELWRI);
 
        /*
         * We didn't log anything, because if we crashed, we'll have to
@@ -1913,7 +1838,7 @@ xfs_qm_quotacheck(
        mp->m_qflags &= ~(XFS_OQUOTA_CHKD | XFS_UQUOTA_CHKD);
        mp->m_qflags |= flags;
 
-       XQM_LIST_PRINT(&(XFS_QI_MPL_LIST(mp)), MPL_NEXT, "++++ Mp list +++");
+       xfs_qm_dquot_list_print(mp);
 
  error_return:
        if (error) {
@@ -1926,7 +1851,10 @@ xfs_qm_quotacheck(
                ASSERT(mp->m_quotainfo != NULL);
                ASSERT(xfs_Gqm != NULL);
                xfs_qm_destroy_quotainfo(mp);
-               (void)xfs_mount_reset_sbqflags(mp);
+               if (xfs_mount_reset_sbqflags(mp)) {
+                       cmn_err(CE_WARN, "XFS quotacheck %s: "
+                               "Failed to reset quota flags.", mp->m_fsname);
+               }
        } else {
                cmn_err(CE_NOTE, "XFS quotacheck %s: Done.", mp->m_fsname);
        }
@@ -2005,59 +1933,53 @@ xfs_qm_init_quotainos(
                }
        }
 
-       XFS_QI_UQIP(mp) = uip;
-       XFS_QI_GQIP(mp) = gip;
+       mp->m_quotainfo->qi_uquotaip = uip;
+       mp->m_quotainfo->qi_gquotaip = gip;
 
        return 0;
 }
 
 
+
 /*
- * Traverse the freelist of dquots and attempt to reclaim a maximum of
- * 'howmany' dquots. This operation races with dqlookup(), and attempts to
- * favor the lookup function ...
- * XXXsup merge this with qm_reclaim_one().
+ * Just pop the least recently used dquot off the freelist and
+ * recycle it. The returned dquot is locked.
  */
-STATIC int
-xfs_qm_shake_freelist(
-       int howmany)
+STATIC xfs_dquot_t *
+xfs_qm_dqreclaim_one(void)
 {
-       int             nreclaimed;
-       xfs_dqhash_t    *hash;
-       xfs_dquot_t     *dqp, *nextdqp;
+       xfs_dquot_t     *dqpout;
+       xfs_dquot_t     *dqp;
        int             restarts;
-       int             nflushes;
 
-       if (howmany <= 0)
-               return 0;
-
-       nreclaimed = 0;
        restarts = 0;
-       nflushes = 0;
+       dqpout = NULL;
 
-#ifdef QUOTADEBUG
-       cmn_err(CE_DEBUG, "Shake free 0x%x", howmany);
-#endif
-       /* lock order is : hashchainlock, freelistlock, mplistlock */
- tryagain:
-       xfs_qm_freelist_lock(xfs_Gqm);
+       /* lockorder: hashchainlock, freelistlock, mplistlock, dqlock, dqflock */
+startagain:
+       mutex_lock(&xfs_Gqm->qm_dqfrlist_lock);
 
-       for (dqp = xfs_Gqm->qm_dqfreelist.qh_next;
-            ((dqp != (xfs_dquot_t *) &xfs_Gqm->qm_dqfreelist) &&
-             nreclaimed < howmany); ) {
+       list_for_each_entry(dqp, &xfs_Gqm->qm_dqfrlist, q_freelist) {
+               struct xfs_mount *mp = dqp->q_mount;
                xfs_dqlock(dqp);
 
                /*
                 * We are racing with dqlookup here. Naturally we don't
-                * want to reclaim a dquot that lookup wants.
+                * want to reclaim a dquot that lookup wants. We release the
+                * freelist lock and start over, so that lookup will grab
+                * both the dquot and the freelistlock.
                 */
                if (dqp->dq_flags & XFS_DQ_WANT) {
+                       ASSERT(! (dqp->dq_flags & XFS_DQ_INACTIVE));
+
+                       trace_xfs_dqreclaim_want(dqp);
+
                        xfs_dqunlock(dqp);
-                       xfs_qm_freelist_unlock(xfs_Gqm);
+                       mutex_unlock(&xfs_Gqm->qm_dqfrlist_lock);
                        if (++restarts >= XFS_QM_RECLAIM_MAX_RESTARTS)
-                               return nreclaimed;
+                               return NULL;
                        XQM_STATS_INC(xqmstats.xs_qm_dqwants);
-                       goto tryagain;
+                       goto startagain;
                }
 
                /*
@@ -2066,23 +1988,27 @@ xfs_qm_shake_freelist(
                 * life easier.
                 */
                if (dqp->dq_flags & XFS_DQ_INACTIVE) {
-                       ASSERT(dqp->q_mount == NULL);
+                       ASSERT(mp == NULL);
                        ASSERT(! XFS_DQ_IS_DIRTY(dqp));
-                       ASSERT(dqp->HL_PREVP == NULL);
-                       ASSERT(dqp->MPL_PREVP == NULL);
+                       ASSERT(list_empty(&dqp->q_hashlist));
+                       ASSERT(list_empty(&dqp->q_mplist));
+                       list_del_init(&dqp->q_freelist);
+                       xfs_Gqm->qm_dqfrlist_cnt--;
+                       xfs_dqunlock(dqp);
+                       dqpout = dqp;
                        XQM_STATS_INC(xqmstats.xs_qm_dqinact_reclaims);
-                       nextdqp = dqp->dq_flnext;
-                       goto off_freelist;
+                       break;
                }
 
-               ASSERT(dqp->MPL_PREVP);
+               ASSERT(dqp->q_hash);
+               ASSERT(!list_empty(&dqp->q_mplist));
+
                /*
                 * Try to grab the flush lock. If this dquot is in the process of
                 * getting flushed to disk, we don't want to reclaim it.
                 */
-               if (! xfs_qm_dqflock_nowait(dqp)) {
+               if (!xfs_dqflock_nowait(dqp)) {
                        xfs_dqunlock(dqp);
-                       dqp = dqp->dq_flnext;
                        continue;
                }
 
@@ -2093,16 +2019,23 @@ xfs_qm_shake_freelist(
                 * dirty dquots.
                 */
                if (XFS_DQ_IS_DIRTY(dqp)) {
-                       xfs_dqtrace_entry(dqp, "DQSHAKE: DQDIRTY");
+                       int     error;
+
+                       trace_xfs_dqreclaim_dirty(dqp);
+
                        /*
                         * We flush it delayed write, so don't bother
-                        * releasing the mplock.
+                        * releasing the freelist lock.
                         */
-                       (void) xfs_qm_dqflush(dqp, XFS_QMOPT_DELWRI);
+                       error = xfs_qm_dqflush(dqp, 0);
+                       if (error) {
+                               xfs_fs_cmn_err(CE_WARN, mp,
+                       "xfs_qm_dqreclaim: dquot %p flush failed", dqp);
+                       }
                        xfs_dqunlock(dqp); /* dqflush unlocks dqflock */
-                       dqp = dqp->dq_flnext;
                        continue;
                }
+
                /*
                 * We're trying to get the hashlock out of order. This races
                 * with dqlookup; so, we giveup and goto the next dquot if
@@ -2110,55 +2043,75 @@ xfs_qm_shake_freelist(
                 * a dqlookup process that holds the hashlock that is
                 * waiting for the freelist lock.
                 */
-               if (! xfs_qm_dqhashlock_nowait(dqp)) {
-                       xfs_dqfunlock(dqp);
-                       xfs_dqunlock(dqp);
-                       dqp = dqp->dq_flnext;
-                       continue;
+               if (!mutex_trylock(&dqp->q_hash->qh_lock)) {
+                       restarts++;
+                       goto dqfunlock;
                }
+
                /*
                 * This races with dquot allocation code as well as dqflush_all
                 * and reclaim code. So, if we failed to grab the mplist lock,
                 * giveup everything and start over.
                 */
-               hash = dqp->q_hash;
-               ASSERT(hash);
-               if (! xfs_qm_mplist_nowait(dqp->q_mount)) {
-                       /* XXX put a sentinel so that we can come back here */
+               if (!mutex_trylock(&mp->m_quotainfo->qi_dqlist_lock)) {
+                       restarts++;
+                       mutex_unlock(&dqp->q_hash->qh_lock);
                        xfs_dqfunlock(dqp);
                        xfs_dqunlock(dqp);
-                       XFS_DQ_HASH_UNLOCK(hash);
-                       xfs_qm_freelist_unlock(xfs_Gqm);
-                       if (++restarts >= XFS_QM_RECLAIM_MAX_RESTARTS)
-                               return nreclaimed;
-                       goto tryagain;
+                       mutex_unlock(&xfs_Gqm->qm_dqfrlist_lock);
+                       if (restarts++ >= XFS_QM_RECLAIM_MAX_RESTARTS)
+                               return NULL;
+                       goto startagain;
                }
-               xfs_dqtrace_entry(dqp, "DQSHAKE: UNLINKING");
-#ifdef QUOTADEBUG
-               cmn_err(CE_DEBUG, "Shake 0x%p, ID 0x%x\n",
-                       dqp, be32_to_cpu(dqp->q_core.d_id));
-#endif
+
                ASSERT(dqp->q_nrefs == 0);
-               nextdqp = dqp->dq_flnext;
-               XQM_MPLIST_REMOVE(&(XFS_QI_MPL_LIST(dqp->q_mount)), dqp);
-               XQM_HASHLIST_REMOVE(hash, dqp);
+               list_del_init(&dqp->q_mplist);
+               mp->m_quotainfo->qi_dquots--;
+               mp->m_quotainfo->qi_dqreclaims++;
+               list_del_init(&dqp->q_hashlist);
+               dqp->q_hash->qh_version++;
+               list_del_init(&dqp->q_freelist);
+               xfs_Gqm->qm_dqfrlist_cnt--;
+               dqpout = dqp;
+               mutex_unlock(&mp->m_quotainfo->qi_dqlist_lock);
+               mutex_unlock(&dqp->q_hash->qh_lock);
+dqfunlock:
                xfs_dqfunlock(dqp);
-               xfs_qm_mplist_unlock(dqp->q_mount);
-               XFS_DQ_HASH_UNLOCK(hash);
-
- off_freelist:
-               XQM_FREELIST_REMOVE(dqp);
                xfs_dqunlock(dqp);
-               nreclaimed++;
-               XQM_STATS_INC(xqmstats.xs_qm_dqshake_reclaims);
+               if (dqpout)
+                       break;
+               if (restarts >= XFS_QM_RECLAIM_MAX_RESTARTS)
+                       return NULL;
+       }
+       mutex_unlock(&xfs_Gqm->qm_dqfrlist_lock);
+       return dqpout;
+}
+
+/*
+ * Traverse the freelist of dquots and attempt to reclaim a maximum of
+ * 'howmany' dquots. This operation races with dqlookup(), and attempts to
+ * favor the lookup function ...
+ */
+STATIC int
+xfs_qm_shake_freelist(
+       int     howmany)
+{
+       int             nreclaimed = 0;
+       xfs_dquot_t     *dqp;
+
+       if (howmany <= 0)
+               return 0;
+
+       while (nreclaimed < howmany) {
+               dqp = xfs_qm_dqreclaim_one();
+               if (!dqp)
+                       return nreclaimed;
                xfs_qm_dqdestroy(dqp);
-               dqp = nextdqp;
+               nreclaimed++;
        }
-       xfs_qm_freelist_unlock(xfs_Gqm);
        return nreclaimed;
 }
 
-
 /*
  * The kmem_shake interface is invoked when memory is running low.
  */
@@ -2173,7 +2126,7 @@ xfs_qm_shake(int nr_to_scan, gfp_t gfp_mask)
        if (!xfs_Gqm)
                return 0;
 
-       nfree = xfs_Gqm->qm_dqfreelist.qh_nelems; /* free dquots */
+       nfree = xfs_Gqm->qm_dqfrlist_cnt; /* free dquots */
        /* incore dquots in all f/s's */
        ndqused = atomic_read(&xfs_Gqm->qm_totaldquots) - nfree;
 
@@ -2189,121 +2142,6 @@ xfs_qm_shake(int nr_to_scan, gfp_t gfp_mask)
 }
 
 
-/*
- * Just pop the least recently used dquot off the freelist and
- * recycle it. The returned dquot is locked.
- */
-STATIC xfs_dquot_t *
-xfs_qm_dqreclaim_one(void)
-{
-       xfs_dquot_t     *dqpout;
-       xfs_dquot_t     *dqp;
-       int             restarts;
-       int             nflushes;
-
-       restarts = 0;
-       dqpout = NULL;
-       nflushes = 0;
-
-       /* lockorder: hashchainlock, freelistlock, mplistlock, dqlock, dqflock */
- startagain:
-       xfs_qm_freelist_lock(xfs_Gqm);
-
-       FOREACH_DQUOT_IN_FREELIST(dqp, &(xfs_Gqm->qm_dqfreelist)) {
-               xfs_dqlock(dqp);
-
-               /*
-                * We are racing with dqlookup here. Naturally we don't
-                * want to reclaim a dquot that lookup wants. We release the
-                * freelist lock and start over, so that lookup will grab
-                * both the dquot and the freelistlock.
-                */
-               if (dqp->dq_flags & XFS_DQ_WANT) {
-                       ASSERT(! (dqp->dq_flags & XFS_DQ_INACTIVE));
-                       xfs_dqtrace_entry(dqp, "DQRECLAIM: DQWANT");
-                       xfs_dqunlock(dqp);
-                       xfs_qm_freelist_unlock(xfs_Gqm);
-                       if (++restarts >= XFS_QM_RECLAIM_MAX_RESTARTS)
-                               return NULL;
-                       XQM_STATS_INC(xqmstats.xs_qm_dqwants);
-                       goto startagain;
-               }
-
-               /*
-                * If the dquot is inactive, we are assured that it is
-                * not on the mplist or the hashlist, and that makes our
-                * life easier.
-                */
-               if (dqp->dq_flags & XFS_DQ_INACTIVE) {
-                       ASSERT(dqp->q_mount == NULL);
-                       ASSERT(! XFS_DQ_IS_DIRTY(dqp));
-                       ASSERT(dqp->HL_PREVP == NULL);
-                       ASSERT(dqp->MPL_PREVP == NULL);
-                       XQM_FREELIST_REMOVE(dqp);
-                       xfs_dqunlock(dqp);
-                       dqpout = dqp;
-                       XQM_STATS_INC(xqmstats.xs_qm_dqinact_reclaims);
-                       break;
-               }
-
-               ASSERT(dqp->q_hash);
-               ASSERT(dqp->MPL_PREVP);
-
-               /*
-                * Try to grab the flush lock. If this dquot is in the process of
-                * getting flushed to disk, we don't want to reclaim it.
-                */
-               if (! xfs_qm_dqflock_nowait(dqp)) {
-                       xfs_dqunlock(dqp);
-                       continue;
-               }
-
-               /*
-                * We have the flush lock so we know that this is not in the
-                * process of being flushed. So, if this is dirty, flush it
-                * DELWRI so that we don't get a freelist infested with
-                * dirty dquots.
-                */
-               if (XFS_DQ_IS_DIRTY(dqp)) {
-                       xfs_dqtrace_entry(dqp, "DQRECLAIM: DQDIRTY");
-                       /*
-                        * We flush it delayed write, so don't bother
-                        * releasing the freelist lock.
-                        */
-                       (void) xfs_qm_dqflush(dqp, XFS_QMOPT_DELWRI);
-                       xfs_dqunlock(dqp); /* dqflush unlocks dqflock */
-                       continue;
-               }
-
-               if (! xfs_qm_mplist_nowait(dqp->q_mount)) {
-                       xfs_dqfunlock(dqp);
-                       xfs_dqunlock(dqp);
-                       continue;
-               }
-
-               if (! xfs_qm_dqhashlock_nowait(dqp))
-                       goto mplistunlock;
-
-               ASSERT(dqp->q_nrefs == 0);
-               xfs_dqtrace_entry(dqp, "DQRECLAIM: UNLINKING");
-               XQM_MPLIST_REMOVE(&(XFS_QI_MPL_LIST(dqp->q_mount)), dqp);
-               XQM_HASHLIST_REMOVE(dqp->q_hash, dqp);
-               XQM_FREELIST_REMOVE(dqp);
-               dqpout = dqp;
-               XFS_DQ_HASH_UNLOCK(dqp->q_hash);
- mplistunlock:
-               xfs_qm_mplist_unlock(dqp->q_mount);
-               xfs_dqfunlock(dqp);
-               xfs_dqunlock(dqp);
-               if (dqpout)
-                       break;
-       }
-
-       xfs_qm_freelist_unlock(xfs_Gqm);
-       return dqpout;
-}
-
-
 /*------------------------------------------------------------------*/
 
 /*
@@ -2378,9 +2216,9 @@ xfs_qm_write_sb_changes(
        }
 
        xfs_mod_sb(tp, flags);
-       (void) xfs_trans_commit(tp, 0);
+       error = xfs_trans_commit(tp, 0);
 
-       return 0;
+       return error;
 }
 
 
@@ -2399,20 +2237,20 @@ xfs_qm_write_sb_changes(
  */
 int
 xfs_qm_vop_dqalloc(
-       xfs_mount_t     *mp,
-       xfs_inode_t     *ip,
-       uid_t           uid,
-       gid_t           gid,
-       prid_t          prid,
-       uint            flags,
-       xfs_dquot_t     **O_udqpp,
-       xfs_dquot_t     **O_gdqpp)
+       struct xfs_inode        *ip,
+       uid_t                   uid,
+       gid_t                   gid,
+       prid_t                  prid,
+       uint                    flags,
+       struct xfs_dquot        **O_udqpp,
+       struct xfs_dquot        **O_gdqpp)
 {
-       int             error;
-       xfs_dquot_t     *uq, *gq;
-       uint            lockflags;
+       struct xfs_mount        *mp = ip->i_mount;
+       struct xfs_dquot        *uq, *gq;
+       int                     error;
+       uint                    lockflags;
 
-       if (!XFS_IS_QUOTA_ON(mp))
+       if (!XFS_IS_QUOTA_RUNNING(mp) || !XFS_IS_QUOTA_ON(mp))
                return 0;
 
        lockflags = XFS_ILOCK_EXCL;
@@ -2426,8 +2264,8 @@ xfs_qm_vop_dqalloc(
         * if necessary. The dquot(s) will not be locked.
         */
        if (XFS_NOT_DQATTACHED(mp, ip)) {
-               if ((error = xfs_qm_dqattach(ip, XFS_QMOPT_DQALLOC |
-                                           XFS_QMOPT_ILOCKED))) {
+               error = xfs_qm_dqattach_locked(ip, XFS_QMOPT_DQALLOC);
+               if (error) {
                        xfs_iunlock(ip, lockflags);
                        return error;
                }
@@ -2520,7 +2358,7 @@ xfs_qm_vop_dqalloc(
                }
        }
        if (uq)
-               xfs_dqtrace_entry_ino(uq, "DQALLOC", ip);
+               trace_xfs_dquot_dqalloc(ip);
 
        xfs_iunlock(ip, lockflags);
        if (O_udqpp)
@@ -2549,7 +2387,8 @@ xfs_qm_vop_chown(
        uint            bfield = XFS_IS_REALTIME_INODE(ip) ?
                                 XFS_TRANS_DQ_RTBCOUNT : XFS_TRANS_DQ_BCOUNT;
 
-       ASSERT(XFS_ISLOCKED_INODE_EXCL(ip));
+
+       ASSERT(xfs_isilocked(ip, XFS_ILOCK_EXCL));
        ASSERT(XFS_IS_QUOTA_RUNNING(ip->i_mount));
 
        /* old dquot */
@@ -2588,13 +2427,13 @@ xfs_qm_vop_chown_reserve(
        xfs_dquot_t     *gdqp,
        uint            flags)
 {
-       int             error;
-       xfs_mount_t     *mp;
+       xfs_mount_t     *mp = ip->i_mount;
        uint            delblks, blkflags, prjflags = 0;
        xfs_dquot_t     *unresudq, *unresgdq, *delblksudq, *delblksgdq;
+       int             error;
 
-       ASSERT(XFS_ISLOCKED_INODE(ip));
-       mp = ip->i_mount;
+
+       ASSERT(xfs_isilocked(ip, XFS_ILOCK_EXCL|XFS_ILOCK_SHARED));
        ASSERT(XFS_IS_QUOTA_RUNNING(mp));
 
        delblks = ip->i_delayed_blks;
@@ -2662,28 +2501,23 @@ xfs_qm_vop_chown_reserve(
 
 int
 xfs_qm_vop_rename_dqattach(
-       xfs_inode_t     **i_tab)
+       struct xfs_inode        **i_tab)
 {
-       xfs_inode_t     *ip;
-       int             i;
-       int             error;
+       struct xfs_mount        *mp = i_tab[0]->i_mount;
+       int                     i;
 
-       ip = i_tab[0];
-
-       if (! XFS_IS_QUOTA_ON(ip->i_mount))
+       if (!XFS_IS_QUOTA_RUNNING(mp) || !XFS_IS_QUOTA_ON(mp))
                return 0;
 
-       if (XFS_NOT_DQATTACHED(ip->i_mount, ip)) {
-               error = xfs_qm_dqattach(ip, 0);
-               if (error)
-                       return error;
-       }
-       for (i = 1; (i < 4 && i_tab[i]); i++) {
+       for (i = 0; (i < 4 && i_tab[i]); i++) {
+               struct xfs_inode        *ip = i_tab[i];
+               int                     error;
+
                /*
                 * Watch out for duplicate entries in the table.
                 */
-               if ((ip = i_tab[i]) != i_tab[i-1]) {
-                       if (XFS_NOT_DQATTACHED(ip->i_mount, ip)) {
+               if (i == 0 || ip != i_tab[i-1]) {
+                       if (XFS_NOT_DQATTACHED(mp, ip)) {
                                error = xfs_qm_dqattach(ip, 0);
                                if (error)
                                        return error;
@@ -2694,17 +2528,19 @@ xfs_qm_vop_rename_dqattach(
 }
 
 void
-xfs_qm_vop_dqattach_and_dqmod_newinode(
-       xfs_trans_t     *tp,
-       xfs_inode_t     *ip,
-       xfs_dquot_t     *udqp,
-       xfs_dquot_t     *gdqp)
+xfs_qm_vop_create_dqattach(
+       struct xfs_trans        *tp,
+       struct xfs_inode        *ip,
+       struct xfs_dquot        *udqp,
+       struct xfs_dquot        *gdqp)
 {
-       if (!XFS_IS_QUOTA_ON(tp->t_mountp))
+       struct xfs_mount        *mp = tp->t_mountp;
+
+       if (!XFS_IS_QUOTA_RUNNING(mp) || !XFS_IS_QUOTA_ON(mp))
                return;
 
-       ASSERT(XFS_ISLOCKED_INODE_EXCL(ip));
-       ASSERT(XFS_IS_QUOTA_RUNNING(tp->t_mountp));
+       ASSERT(xfs_isilocked(ip, XFS_ILOCK_EXCL));
+       ASSERT(XFS_IS_QUOTA_RUNNING(mp));
 
        if (udqp) {
                xfs_dqlock(udqp);
@@ -2712,7 +2548,7 @@ xfs_qm_vop_dqattach_and_dqmod_newinode(
                xfs_dqunlock(udqp);
                ASSERT(ip->i_udquot == NULL);
                ip->i_udquot = udqp;
-               ASSERT(XFS_IS_UQUOTA_ON(tp->t_mountp));
+               ASSERT(XFS_IS_UQUOTA_ON(mp));
                ASSERT(ip->i_d.di_uid == be32_to_cpu(udqp->q_core.d_id));
                xfs_trans_mod_dquot(tp, udqp, XFS_TRANS_DQ_ICOUNT, 1);
        }
@@ -2722,105 +2558,11 @@ xfs_qm_vop_dqattach_and_dqmod_newinode(
                xfs_dqunlock(gdqp);
                ASSERT(ip->i_gdquot == NULL);
                ip->i_gdquot = gdqp;
-               ASSERT(XFS_IS_OQUOTA_ON(tp->t_mountp));
-               ASSERT((XFS_IS_GQUOTA_ON(tp->t_mountp) ?
+               ASSERT(XFS_IS_OQUOTA_ON(mp));
+               ASSERT((XFS_IS_GQUOTA_ON(mp) ?
                        ip->i_d.di_gid : ip->i_d.di_projid) ==
                                be32_to_cpu(gdqp->q_core.d_id));
                xfs_trans_mod_dquot(tp, gdqp, XFS_TRANS_DQ_ICOUNT, 1);
        }
 }
 
-/* ------------- list stuff -----------------*/
-STATIC void
-xfs_qm_freelist_init(xfs_frlist_t *ql)
-{
-       ql->qh_next = ql->qh_prev = (xfs_dquot_t *) ql;
-       mutex_init(&ql->qh_lock);
-       ql->qh_version = 0;
-       ql->qh_nelems = 0;
-}
-
-STATIC void
-xfs_qm_freelist_destroy(xfs_frlist_t *ql)
-{
-       xfs_dquot_t     *dqp, *nextdqp;
-
-       mutex_lock(&ql->qh_lock);
-       for (dqp = ql->qh_next;
-            dqp != (xfs_dquot_t *)ql; ) {
-               xfs_dqlock(dqp);
-               nextdqp = dqp->dq_flnext;
-#ifdef QUOTADEBUG
-               cmn_err(CE_DEBUG, "FREELIST destroy 0x%p", dqp);
-#endif
-               XQM_FREELIST_REMOVE(dqp);
-               xfs_dqunlock(dqp);
-               xfs_qm_dqdestroy(dqp);
-               dqp = nextdqp;
-       }
-       mutex_unlock(&ql->qh_lock);
-       mutex_destroy(&ql->qh_lock);
-
-       ASSERT(ql->qh_nelems == 0);
-}
-
-STATIC void
-xfs_qm_freelist_insert(xfs_frlist_t *ql, xfs_dquot_t *dq)
-{
-       dq->dq_flnext = ql->qh_next;
-       dq->dq_flprev = (xfs_dquot_t *)ql;
-       ql->qh_next = dq;
-       dq->dq_flnext->dq_flprev = dq;
-       xfs_Gqm->qm_dqfreelist.qh_nelems++;
-       xfs_Gqm->qm_dqfreelist.qh_version++;
-}
-
-void
-xfs_qm_freelist_unlink(xfs_dquot_t *dq)
-{
-       xfs_dquot_t *next = dq->dq_flnext;
-       xfs_dquot_t *prev = dq->dq_flprev;
-
-       next->dq_flprev = prev;
-       prev->dq_flnext = next;
-       dq->dq_flnext = dq->dq_flprev = dq;
-       xfs_Gqm->qm_dqfreelist.qh_nelems--;
-       xfs_Gqm->qm_dqfreelist.qh_version++;
-}
-
-void
-xfs_qm_freelist_append(xfs_frlist_t *ql, xfs_dquot_t *dq)
-{
-       xfs_qm_freelist_insert((xfs_frlist_t *)ql->qh_prev, dq);
-}
-
-STATIC int
-xfs_qm_dqhashlock_nowait(
-       xfs_dquot_t *dqp)
-{
-       int locked;
-
-       locked = mutex_trylock(&((dqp)->q_hash->qh_lock));
-       return locked;
-}
-
-int
-xfs_qm_freelist_lock_nowait(
-       xfs_qm_t *xqm)
-{
-       int locked;
-
-       locked = mutex_trylock(&(xqm->qm_dqfreelist.qh_lock));
-       return locked;
-}
-
-STATIC int
-xfs_qm_mplist_nowait(
-       xfs_mount_t     *mp)
-{
-       int locked;
-
-       ASSERT(mp->m_quotainfo);
-       locked = mutex_trylock(&(XFS_QI_MPLLOCK(mp)));
-       return locked;
-}