[XFS] kill struct xfs_mount_args
[safe/jmp/linux-2.6] / fs / xfs / quota / xfs_qm.c
1 /*
2  * Copyright (c) 2000-2005 Silicon Graphics, Inc.
3  * All Rights Reserved.
4  *
5  * This program is free software; you can redistribute it and/or
6  * modify it under the terms of the GNU General Public License as
7  * published by the Free Software Foundation.
8  *
9  * This program is distributed in the hope that it would be useful,
10  * but WITHOUT ANY WARRANTY; without even the implied warranty of
11  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
12  * GNU General Public License for more details.
13  *
14  * You should have received a copy of the GNU General Public License
15  * along with this program; if not, write the Free Software Foundation,
16  * Inc.,  51 Franklin St, Fifth Floor, Boston, MA  02110-1301  USA
17  */
18 #include "xfs.h"
19 #include "xfs_fs.h"
20 #include "xfs_bit.h"
21 #include "xfs_log.h"
22 #include "xfs_inum.h"
23 #include "xfs_trans.h"
24 #include "xfs_sb.h"
25 #include "xfs_ag.h"
26 #include "xfs_dir2.h"
27 #include "xfs_alloc.h"
28 #include "xfs_dmapi.h"
29 #include "xfs_quota.h"
30 #include "xfs_mount.h"
31 #include "xfs_bmap_btree.h"
32 #include "xfs_alloc_btree.h"
33 #include "xfs_ialloc_btree.h"
34 #include "xfs_dir2_sf.h"
35 #include "xfs_attr_sf.h"
36 #include "xfs_dinode.h"
37 #include "xfs_inode.h"
38 #include "xfs_btree.h"
39 #include "xfs_ialloc.h"
40 #include "xfs_itable.h"
41 #include "xfs_rtalloc.h"
42 #include "xfs_error.h"
43 #include "xfs_bmap.h"
44 #include "xfs_rw.h"
45 #include "xfs_acl.h"
46 #include "xfs_attr.h"
47 #include "xfs_buf_item.h"
48 #include "xfs_trans_space.h"
49 #include "xfs_utils.h"
50 #include "xfs_qm.h"
51
52 /*
53  * The global quota manager. There is only one of these for the entire
54  * system, _not_ one per file system. XQM keeps track of the overall
55  * quota functionality, including maintaining the freelist and hash
56  * tables of dquots.
57  */
58 mutex_t         xfs_Gqm_lock;
59 struct xfs_qm   *xfs_Gqm;
60 uint            ndquot;
61
62 kmem_zone_t     *qm_dqzone;
63 kmem_zone_t     *qm_dqtrxzone;
64
65 static cred_t   xfs_zerocr;
66
67 STATIC void     xfs_qm_list_init(xfs_dqlist_t *, char *, int);
68 STATIC void     xfs_qm_list_destroy(xfs_dqlist_t *);
69
70 STATIC void     xfs_qm_freelist_init(xfs_frlist_t *);
71 STATIC void     xfs_qm_freelist_destroy(xfs_frlist_t *);
72 STATIC int      xfs_qm_mplist_nowait(xfs_mount_t *);
73 STATIC int      xfs_qm_dqhashlock_nowait(xfs_dquot_t *);
74
75 STATIC int      xfs_qm_init_quotainos(xfs_mount_t *);
76 STATIC int      xfs_qm_init_quotainfo(xfs_mount_t *);
77 STATIC int      xfs_qm_shake(int, gfp_t);
78
79 static struct shrinker xfs_qm_shaker = {
80         .shrink = xfs_qm_shake,
81         .seeks = DEFAULT_SEEKS,
82 };
83
84 #ifdef DEBUG
85 extern mutex_t  qcheck_lock;
86 #endif
87
88 #ifdef QUOTADEBUG
89 #define XQM_LIST_PRINT(l, NXT, title) \
90 { \
91         xfs_dquot_t     *dqp; int i = 0; \
92         cmn_err(CE_DEBUG, "%s (#%d)", title, (int) (l)->qh_nelems); \
93         for (dqp = (l)->qh_next; dqp != NULL; dqp = dqp->NXT) { \
94                 cmn_err(CE_DEBUG, "   %d.  \"%d (%s)\"   " \
95                                   "bcnt = %d, icnt = %d, refs = %d", \
96                         ++i, (int) be32_to_cpu(dqp->q_core.d_id), \
97                         DQFLAGTO_TYPESTR(dqp),       \
98                         (int) be64_to_cpu(dqp->q_core.d_bcount), \
99                         (int) be64_to_cpu(dqp->q_core.d_icount), \
100                         (int) dqp->q_nrefs);  } \
101 }
102 #else
103 #define XQM_LIST_PRINT(l, NXT, title) do { } while (0)
104 #endif
105
106 /*
107  * Initialize the XQM structure.
108  * Note that there is not one quota manager per file system.
109  */
110 STATIC struct xfs_qm *
111 xfs_Gqm_init(void)
112 {
113         xfs_dqhash_t    *udqhash, *gdqhash;
114         xfs_qm_t        *xqm;
115         size_t          hsize;
116         uint            i;
117
118         /*
119          * Initialize the dquot hash tables.
120          */
121         udqhash = kmem_zalloc_greedy(&hsize,
122                                      XFS_QM_HASHSIZE_LOW * sizeof(xfs_dqhash_t),
123                                      XFS_QM_HASHSIZE_HIGH * sizeof(xfs_dqhash_t),
124                                      KM_SLEEP | KM_MAYFAIL | KM_LARGE);
125         gdqhash = kmem_zalloc(hsize, KM_SLEEP | KM_LARGE);
126         hsize /= sizeof(xfs_dqhash_t);
127         ndquot = hsize << 8;
128
129         xqm = kmem_zalloc(sizeof(xfs_qm_t), KM_SLEEP);
130         xqm->qm_dqhashmask = hsize - 1;
131         xqm->qm_usr_dqhtable = udqhash;
132         xqm->qm_grp_dqhtable = gdqhash;
133         ASSERT(xqm->qm_usr_dqhtable != NULL);
134         ASSERT(xqm->qm_grp_dqhtable != NULL);
135
136         for (i = 0; i < hsize; i++) {
137                 xfs_qm_list_init(&(xqm->qm_usr_dqhtable[i]), "uxdqh", i);
138                 xfs_qm_list_init(&(xqm->qm_grp_dqhtable[i]), "gxdqh", i);
139         }
140
141         /*
142          * Freelist of all dquots of all file systems
143          */
144         xfs_qm_freelist_init(&(xqm->qm_dqfreelist));
145
146         /*
147          * dquot zone. we register our own low-memory callback.
148          */
149         if (!qm_dqzone) {
150                 xqm->qm_dqzone = kmem_zone_init(sizeof(xfs_dquot_t),
151                                                 "xfs_dquots");
152                 qm_dqzone = xqm->qm_dqzone;
153         } else
154                 xqm->qm_dqzone = qm_dqzone;
155
156         register_shrinker(&xfs_qm_shaker);
157
158         /*
159          * The t_dqinfo portion of transactions.
160          */
161         if (!qm_dqtrxzone) {
162                 xqm->qm_dqtrxzone = kmem_zone_init(sizeof(xfs_dquot_acct_t),
163                                                    "xfs_dqtrx");
164                 qm_dqtrxzone = xqm->qm_dqtrxzone;
165         } else
166                 xqm->qm_dqtrxzone = qm_dqtrxzone;
167
168         atomic_set(&xqm->qm_totaldquots, 0);
169         xqm->qm_dqfree_ratio = XFS_QM_DQFREE_RATIO;
170         xqm->qm_nrefs = 0;
171 #ifdef DEBUG
172         mutex_init(&qcheck_lock);
173 #endif
174         return xqm;
175 }
176
177 /*
178  * Destroy the global quota manager when its reference count goes to zero.
179  */
180 STATIC void
181 xfs_qm_destroy(
182         struct xfs_qm   *xqm)
183 {
184         int             hsize, i;
185
186         ASSERT(xqm != NULL);
187         ASSERT(xqm->qm_nrefs == 0);
188         unregister_shrinker(&xfs_qm_shaker);
189         hsize = xqm->qm_dqhashmask + 1;
190         for (i = 0; i < hsize; i++) {
191                 xfs_qm_list_destroy(&(xqm->qm_usr_dqhtable[i]));
192                 xfs_qm_list_destroy(&(xqm->qm_grp_dqhtable[i]));
193         }
194         kmem_free(xqm->qm_usr_dqhtable);
195         kmem_free(xqm->qm_grp_dqhtable);
196         xqm->qm_usr_dqhtable = NULL;
197         xqm->qm_grp_dqhtable = NULL;
198         xqm->qm_dqhashmask = 0;
199         xfs_qm_freelist_destroy(&(xqm->qm_dqfreelist));
200 #ifdef DEBUG
201         mutex_destroy(&qcheck_lock);
202 #endif
203         kmem_free(xqm);
204 }
205
206 /*
207  * Called at mount time to let XQM know that another file system is
208  * starting quotas. This isn't crucial information as the individual mount
209  * structures are pretty independent, but it helps the XQM keep a
210  * global view of what's going on.
211  */
212 /* ARGSUSED */
213 STATIC int
214 xfs_qm_hold_quotafs_ref(
215         struct xfs_mount *mp)
216 {
217         /*
218          * Need to lock the xfs_Gqm structure for things like this. For example,
219          * the structure could disappear between the entry to this routine and
220          * a HOLD operation if not locked.
221          */
222         XFS_QM_LOCK(xfs_Gqm);
223
224         if (xfs_Gqm == NULL)
225                 xfs_Gqm = xfs_Gqm_init();
226         /*
227          * We can keep a list of all filesystems with quotas mounted for
228          * debugging and statistical purposes, but ...
229          * Just take a reference and get out.
230          */
231         XFS_QM_HOLD(xfs_Gqm);
232         XFS_QM_UNLOCK(xfs_Gqm);
233
234         return 0;
235 }
236
237
238 /*
239  * Release the reference that a filesystem took at mount time,
240  * so that we know when we need to destroy the entire quota manager.
241  */
242 /* ARGSUSED */
243 STATIC void
244 xfs_qm_rele_quotafs_ref(
245         struct xfs_mount *mp)
246 {
247         xfs_dquot_t     *dqp, *nextdqp;
248
249         ASSERT(xfs_Gqm);
250         ASSERT(xfs_Gqm->qm_nrefs > 0);
251
252         /*
253          * Go thru the freelist and destroy all inactive dquots.
254          */
255         xfs_qm_freelist_lock(xfs_Gqm);
256
257         for (dqp = xfs_Gqm->qm_dqfreelist.qh_next;
258              dqp != (xfs_dquot_t *)&(xfs_Gqm->qm_dqfreelist); ) {
259                 xfs_dqlock(dqp);
260                 nextdqp = dqp->dq_flnext;
261                 if (dqp->dq_flags & XFS_DQ_INACTIVE) {
262                         ASSERT(dqp->q_mount == NULL);
263                         ASSERT(! XFS_DQ_IS_DIRTY(dqp));
264                         ASSERT(dqp->HL_PREVP == NULL);
265                         ASSERT(dqp->MPL_PREVP == NULL);
266                         XQM_FREELIST_REMOVE(dqp);
267                         xfs_dqunlock(dqp);
268                         xfs_qm_dqdestroy(dqp);
269                 } else {
270                         xfs_dqunlock(dqp);
271                 }
272                 dqp = nextdqp;
273         }
274         xfs_qm_freelist_unlock(xfs_Gqm);
275
276         /*
277          * Destroy the entire XQM. If somebody mounts with quotaon, this'll
278          * be restarted.
279          */
280         XFS_QM_LOCK(xfs_Gqm);
281         XFS_QM_RELE(xfs_Gqm);
282         if (xfs_Gqm->qm_nrefs == 0) {
283                 xfs_qm_destroy(xfs_Gqm);
284                 xfs_Gqm = NULL;
285         }
286         XFS_QM_UNLOCK(xfs_Gqm);
287 }
288
289 /*
290  * Just destroy the quotainfo structure.
291  */
292 void
293 xfs_qm_unmount_quotadestroy(
294         xfs_mount_t     *mp)
295 {
296         if (mp->m_quotainfo)
297                 xfs_qm_destroy_quotainfo(mp);
298 }
299
300
301 /*
302  * This is called from xfs_mountfs to start quotas and initialize all
303  * necessary data structures like quotainfo.  This is also responsible for
304  * running a quotacheck as necessary.  We are guaranteed that the superblock
305  * is consistently read in at this point.
306  *
307  * If we fail here, the mount will continue with quota turned off. We don't
308  * need to inidicate success or failure at all.
309  */
310 void
311 xfs_qm_mount_quotas(
312         xfs_mount_t     *mp)
313 {
314         int             error = 0;
315         uint            sbf;
316
317         /*
318          * If quotas on realtime volumes is not supported, we disable
319          * quotas immediately.
320          */
321         if (mp->m_sb.sb_rextents) {
322                 cmn_err(CE_NOTE,
323                         "Cannot turn on quotas for realtime filesystem %s",
324                         mp->m_fsname);
325                 mp->m_qflags = 0;
326                 goto write_changes;
327         }
328
329         ASSERT(XFS_IS_QUOTA_RUNNING(mp));
330
331         /*
332          * Allocate the quotainfo structure inside the mount struct, and
333          * create quotainode(s), and change/rev superblock if necessary.
334          */
335         error = xfs_qm_init_quotainfo(mp);
336         if (error) {
337                 /*
338                  * We must turn off quotas.
339                  */
340                 ASSERT(mp->m_quotainfo == NULL);
341                 mp->m_qflags = 0;
342                 goto write_changes;
343         }
344         /*
345          * If any of the quotas are not consistent, do a quotacheck.
346          */
347         if (XFS_QM_NEED_QUOTACHECK(mp)) {
348                 error = xfs_qm_quotacheck(mp);
349                 if (error) {
350                         /* Quotacheck failed and disabled quotas. */
351                         return;
352                 }
353         }
354         /* 
355          * If one type of quotas is off, then it will lose its
356          * quotachecked status, since we won't be doing accounting for
357          * that type anymore.
358          */
359         if (!XFS_IS_UQUOTA_ON(mp))
360                 mp->m_qflags &= ~XFS_UQUOTA_CHKD;
361         if (!(XFS_IS_GQUOTA_ON(mp) || XFS_IS_PQUOTA_ON(mp)))
362                 mp->m_qflags &= ~XFS_OQUOTA_CHKD;
363
364  write_changes:
365         /*
366          * We actually don't have to acquire the m_sb_lock at all.
367          * This can only be called from mount, and that's single threaded. XXX
368          */
369         spin_lock(&mp->m_sb_lock);
370         sbf = mp->m_sb.sb_qflags;
371         mp->m_sb.sb_qflags = mp->m_qflags & XFS_MOUNT_QUOTA_ALL;
372         spin_unlock(&mp->m_sb_lock);
373
374         if (sbf != (mp->m_qflags & XFS_MOUNT_QUOTA_ALL)) {
375                 if (xfs_qm_write_sb_changes(mp, XFS_SB_QFLAGS)) {
376                         /*
377                          * We could only have been turning quotas off.
378                          * We aren't in very good shape actually because
379                          * the incore structures are convinced that quotas are
380                          * off, but the on disk superblock doesn't know that !
381                          */
382                         ASSERT(!(XFS_IS_QUOTA_RUNNING(mp)));
383                         xfs_fs_cmn_err(CE_ALERT, mp,
384                                 "XFS mount_quotas: Superblock update failed!");
385                 }
386         }
387
388         if (error) {
389                 xfs_fs_cmn_err(CE_WARN, mp,
390                         "Failed to initialize disk quotas.");
391         }
392         return;
393 }
394
395 /*
396  * Called from the vfsops layer.
397  */
398 int
399 xfs_qm_unmount_quotas(
400         xfs_mount_t     *mp)
401 {
402         xfs_inode_t     *uqp, *gqp;
403         int             error = 0;
404
405         /*
406          * Release the dquots that root inode, et al might be holding,
407          * before we flush quotas and blow away the quotainfo structure.
408          */
409         ASSERT(mp->m_rootip);
410         xfs_qm_dqdetach(mp->m_rootip);
411         if (mp->m_rbmip)
412                 xfs_qm_dqdetach(mp->m_rbmip);
413         if (mp->m_rsumip)
414                 xfs_qm_dqdetach(mp->m_rsumip);
415
416         /*
417          * Flush out the quota inodes.
418          */
419         uqp = gqp = NULL;
420         if (mp->m_quotainfo) {
421                 if ((uqp = mp->m_quotainfo->qi_uquotaip) != NULL) {
422                         xfs_ilock(uqp, XFS_ILOCK_EXCL);
423                         xfs_iflock(uqp);
424                         error = xfs_iflush(uqp, XFS_IFLUSH_SYNC);
425                         xfs_iunlock(uqp, XFS_ILOCK_EXCL);
426                         if (unlikely(error == EFSCORRUPTED)) {
427                                 XFS_ERROR_REPORT("xfs_qm_unmount_quotas(1)",
428                                                  XFS_ERRLEVEL_LOW, mp);
429                                 goto out;
430                         }
431                 }
432                 if ((gqp = mp->m_quotainfo->qi_gquotaip) != NULL) {
433                         xfs_ilock(gqp, XFS_ILOCK_EXCL);
434                         xfs_iflock(gqp);
435                         error = xfs_iflush(gqp, XFS_IFLUSH_SYNC);
436                         xfs_iunlock(gqp, XFS_ILOCK_EXCL);
437                         if (unlikely(error == EFSCORRUPTED)) {
438                                 XFS_ERROR_REPORT("xfs_qm_unmount_quotas(2)",
439                                                  XFS_ERRLEVEL_LOW, mp);
440                                 goto out;
441                         }
442                 }
443         }
444         if (uqp) {
445                  IRELE(uqp);
446                  mp->m_quotainfo->qi_uquotaip = NULL;
447         }
448         if (gqp) {
449                 IRELE(gqp);
450                 mp->m_quotainfo->qi_gquotaip = NULL;
451         }
452 out:
453         return XFS_ERROR(error);
454 }
455
456 /*
457  * Flush all dquots of the given file system to disk. The dquots are
458  * _not_ purged from memory here, just their data written to disk.
459  */
460 STATIC int
461 xfs_qm_dqflush_all(
462         xfs_mount_t     *mp,
463         int             flags)
464 {
465         int             recl;
466         xfs_dquot_t     *dqp;
467         int             niters;
468         int             error;
469
470         if (mp->m_quotainfo == NULL)
471                 return 0;
472         niters = 0;
473 again:
474         xfs_qm_mplist_lock(mp);
475         FOREACH_DQUOT_IN_MP(dqp, mp) {
476                 xfs_dqlock(dqp);
477                 if (! XFS_DQ_IS_DIRTY(dqp)) {
478                         xfs_dqunlock(dqp);
479                         continue;
480                 }
481                 xfs_dqtrace_entry(dqp, "FLUSHALL: DQDIRTY");
482                 /* XXX a sentinel would be better */
483                 recl = XFS_QI_MPLRECLAIMS(mp);
484                 if (!xfs_dqflock_nowait(dqp)) {
485                         /*
486                          * If we can't grab the flush lock then check
487                          * to see if the dquot has been flushed delayed
488                          * write.  If so, grab its buffer and send it
489                          * out immediately.  We'll be able to acquire
490                          * the flush lock when the I/O completes.
491                          */
492                         xfs_qm_dqflock_pushbuf_wait(dqp);
493                 }
494                 /*
495                  * Let go of the mplist lock. We don't want to hold it
496                  * across a disk write.
497                  */
498                 xfs_qm_mplist_unlock(mp);
499                 error = xfs_qm_dqflush(dqp, flags);
500                 xfs_dqunlock(dqp);
501                 if (error)
502                         return error;
503
504                 xfs_qm_mplist_lock(mp);
505                 if (recl != XFS_QI_MPLRECLAIMS(mp)) {
506                         xfs_qm_mplist_unlock(mp);
507                         /* XXX restart limit */
508                         goto again;
509                 }
510         }
511
512         xfs_qm_mplist_unlock(mp);
513         /* return ! busy */
514         return 0;
515 }
516 /*
517  * Release the group dquot pointers the user dquots may be
518  * carrying around as a hint. mplist is locked on entry and exit.
519  */
520 STATIC void
521 xfs_qm_detach_gdquots(
522         xfs_mount_t     *mp)
523 {
524         xfs_dquot_t     *dqp, *gdqp;
525         int             nrecl;
526
527  again:
528         ASSERT(XFS_QM_IS_MPLIST_LOCKED(mp));
529         dqp = XFS_QI_MPLNEXT(mp);
530         while (dqp) {
531                 xfs_dqlock(dqp);
532                 if ((gdqp = dqp->q_gdquot)) {
533                         xfs_dqlock(gdqp);
534                         dqp->q_gdquot = NULL;
535                 }
536                 xfs_dqunlock(dqp);
537
538                 if (gdqp) {
539                         /*
540                          * Can't hold the mplist lock across a dqput.
541                          * XXXmust convert to marker based iterations here.
542                          */
543                         nrecl = XFS_QI_MPLRECLAIMS(mp);
544                         xfs_qm_mplist_unlock(mp);
545                         xfs_qm_dqput(gdqp);
546
547                         xfs_qm_mplist_lock(mp);
548                         if (nrecl != XFS_QI_MPLRECLAIMS(mp))
549                                 goto again;
550                 }
551                 dqp = dqp->MPL_NEXT;
552         }
553 }
554
555 /*
556  * Go through all the incore dquots of this file system and take them
557  * off the mplist and hashlist, if the dquot type matches the dqtype
558  * parameter. This is used when turning off quota accounting for
559  * users and/or groups, as well as when the filesystem is unmounting.
560  */
561 STATIC int
562 xfs_qm_dqpurge_int(
563         xfs_mount_t     *mp,
564         uint            flags) /* QUOTAOFF/UMOUNTING/UQUOTA/PQUOTA/GQUOTA */
565 {
566         xfs_dquot_t     *dqp;
567         uint            dqtype;
568         int             nrecl;
569         xfs_dquot_t     *nextdqp;
570         int             nmisses;
571
572         if (mp->m_quotainfo == NULL)
573                 return 0;
574
575         dqtype = (flags & XFS_QMOPT_UQUOTA) ? XFS_DQ_USER : 0;
576         dqtype |= (flags & XFS_QMOPT_PQUOTA) ? XFS_DQ_PROJ : 0;
577         dqtype |= (flags & XFS_QMOPT_GQUOTA) ? XFS_DQ_GROUP : 0;
578
579         xfs_qm_mplist_lock(mp);
580
581         /*
582          * In the first pass through all incore dquots of this filesystem,
583          * we release the group dquot pointers the user dquots may be
584          * carrying around as a hint. We need to do this irrespective of
585          * what's being turned off.
586          */
587         xfs_qm_detach_gdquots(mp);
588
589       again:
590         nmisses = 0;
591         ASSERT(XFS_QM_IS_MPLIST_LOCKED(mp));
592         /*
593          * Try to get rid of all of the unwanted dquots. The idea is to
594          * get them off mplist and hashlist, but leave them on freelist.
595          */
596         dqp = XFS_QI_MPLNEXT(mp);
597         while (dqp) {
598                 /*
599                  * It's OK to look at the type without taking dqlock here.
600                  * We're holding the mplist lock here, and that's needed for
601                  * a dqreclaim.
602                  */
603                 if ((dqp->dq_flags & dqtype) == 0) {
604                         dqp = dqp->MPL_NEXT;
605                         continue;
606                 }
607
608                 if (! xfs_qm_dqhashlock_nowait(dqp)) {
609                         nrecl = XFS_QI_MPLRECLAIMS(mp);
610                         xfs_qm_mplist_unlock(mp);
611                         XFS_DQ_HASH_LOCK(dqp->q_hash);
612                         xfs_qm_mplist_lock(mp);
613
614                         /*
615                          * XXXTheoretically, we can get into a very long
616                          * ping pong game here.
617                          * No one can be adding dquots to the mplist at
618                          * this point, but somebody might be taking things off.
619                          */
620                         if (nrecl != XFS_QI_MPLRECLAIMS(mp)) {
621                                 XFS_DQ_HASH_UNLOCK(dqp->q_hash);
622                                 goto again;
623                         }
624                 }
625
626                 /*
627                  * Take the dquot off the mplist and hashlist. It may remain on
628                  * freelist in INACTIVE state.
629                  */
630                 nextdqp = dqp->MPL_NEXT;
631                 nmisses += xfs_qm_dqpurge(dqp);
632                 dqp = nextdqp;
633         }
634         xfs_qm_mplist_unlock(mp);
635         return nmisses;
636 }
637
638 int
639 xfs_qm_dqpurge_all(
640         xfs_mount_t     *mp,
641         uint            flags)
642 {
643         int             ndquots;
644
645         /*
646          * Purge the dquot cache.
647          * None of the dquots should really be busy at this point.
648          */
649         if (mp->m_quotainfo) {
650                 while ((ndquots = xfs_qm_dqpurge_int(mp, flags))) {
651                         delay(ndquots * 10);
652                 }
653         }
654         return 0;
655 }
656
657 STATIC int
658 xfs_qm_dqattach_one(
659         xfs_inode_t     *ip,
660         xfs_dqid_t      id,
661         uint            type,
662         uint            doalloc,
663         uint            dolock,
664         xfs_dquot_t     *udqhint, /* hint */
665         xfs_dquot_t     **IO_idqpp)
666 {
667         xfs_dquot_t     *dqp;
668         int             error;
669
670         ASSERT(xfs_isilocked(ip, XFS_ILOCK_EXCL));
671         error = 0;
672         /*
673          * See if we already have it in the inode itself. IO_idqpp is
674          * &i_udquot or &i_gdquot. This made the code look weird, but
675          * made the logic a lot simpler.
676          */
677         if ((dqp = *IO_idqpp)) {
678                 if (dolock)
679                         xfs_dqlock(dqp);
680                 xfs_dqtrace_entry(dqp, "DQATTACH: found in ip");
681                 goto done;
682         }
683
684         /*
685          * udqhint is the i_udquot field in inode, and is non-NULL only
686          * when the type arg is group/project. Its purpose is to save a
687          * lookup by dqid (xfs_qm_dqget) by caching a group dquot inside
688          * the user dquot.
689          */
690         ASSERT(!udqhint || type == XFS_DQ_GROUP || type == XFS_DQ_PROJ);
691         if (udqhint && !dolock)
692                 xfs_dqlock(udqhint);
693
694         /*
695          * No need to take dqlock to look at the id.
696          * The ID can't change until it gets reclaimed, and it won't
697          * be reclaimed as long as we have a ref from inode and we hold
698          * the ilock.
699          */
700         if (udqhint &&
701             (dqp = udqhint->q_gdquot) &&
702             (be32_to_cpu(dqp->q_core.d_id) == id)) {
703                 ASSERT(XFS_DQ_IS_LOCKED(udqhint));
704                 xfs_dqlock(dqp);
705                 XFS_DQHOLD(dqp);
706                 ASSERT(*IO_idqpp == NULL);
707                 *IO_idqpp = dqp;
708                 if (!dolock) {
709                         xfs_dqunlock(dqp);
710                         xfs_dqunlock(udqhint);
711                 }
712                 goto done;
713         }
714         /*
715          * We can't hold a dquot lock when we call the dqget code.
716          * We'll deadlock in no time, because of (not conforming to)
717          * lock ordering - the inodelock comes before any dquot lock,
718          * and we may drop and reacquire the ilock in xfs_qm_dqget().
719          */
720         if (udqhint)
721                 xfs_dqunlock(udqhint);
722         /*
723          * Find the dquot from somewhere. This bumps the
724          * reference count of dquot and returns it locked.
725          * This can return ENOENT if dquot didn't exist on
726          * disk and we didn't ask it to allocate;
727          * ESRCH if quotas got turned off suddenly.
728          */
729         if ((error = xfs_qm_dqget(ip->i_mount, ip, id, type,
730                                  doalloc|XFS_QMOPT_DOWARN, &dqp))) {
731                 if (udqhint && dolock)
732                         xfs_dqlock(udqhint);
733                 goto done;
734         }
735
736         xfs_dqtrace_entry(dqp, "DQATTACH: found by dqget");
737         /*
738          * dqget may have dropped and re-acquired the ilock, but it guarantees
739          * that the dquot returned is the one that should go in the inode.
740          */
741         *IO_idqpp = dqp;
742         ASSERT(dqp);
743         ASSERT(XFS_DQ_IS_LOCKED(dqp));
744         if (! dolock) {
745                 xfs_dqunlock(dqp);
746                 goto done;
747         }
748         if (! udqhint)
749                 goto done;
750
751         ASSERT(udqhint);
752         ASSERT(dolock);
753         ASSERT(XFS_DQ_IS_LOCKED(dqp));
754         if (! xfs_qm_dqlock_nowait(udqhint)) {
755                 xfs_dqunlock(dqp);
756                 xfs_dqlock(udqhint);
757                 xfs_dqlock(dqp);
758         }
759       done:
760 #ifdef QUOTADEBUG
761         if (udqhint) {
762                 if (dolock)
763                         ASSERT(XFS_DQ_IS_LOCKED(udqhint));
764         }
765         if (! error) {
766                 if (dolock)
767                         ASSERT(XFS_DQ_IS_LOCKED(dqp));
768         }
769 #endif
770         return error;
771 }
772
773
774 /*
775  * Given a udquot and gdquot, attach a ptr to the group dquot in the
776  * udquot as a hint for future lookups. The idea sounds simple, but the
777  * execution isn't, because the udquot might have a group dquot attached
778  * already and getting rid of that gets us into lock ordering constraints.
779  * The process is complicated more by the fact that the dquots may or may not
780  * be locked on entry.
781  */
782 STATIC void
783 xfs_qm_dqattach_grouphint(
784         xfs_dquot_t     *udq,
785         xfs_dquot_t     *gdq,
786         uint            locked)
787 {
788         xfs_dquot_t     *tmp;
789
790 #ifdef QUOTADEBUG
791         if (locked) {
792                 ASSERT(XFS_DQ_IS_LOCKED(udq));
793                 ASSERT(XFS_DQ_IS_LOCKED(gdq));
794         }
795 #endif
796         if (! locked)
797                 xfs_dqlock(udq);
798
799         if ((tmp = udq->q_gdquot)) {
800                 if (tmp == gdq) {
801                         if (! locked)
802                                 xfs_dqunlock(udq);
803                         return;
804                 }
805
806                 udq->q_gdquot = NULL;
807                 /*
808                  * We can't keep any dqlocks when calling dqrele,
809                  * because the freelist lock comes before dqlocks.
810                  */
811                 xfs_dqunlock(udq);
812                 if (locked)
813                         xfs_dqunlock(gdq);
814                 /*
815                  * we took a hard reference once upon a time in dqget,
816                  * so give it back when the udquot no longer points at it
817                  * dqput() does the unlocking of the dquot.
818                  */
819                 xfs_qm_dqrele(tmp);
820
821                 xfs_dqlock(udq);
822                 xfs_dqlock(gdq);
823
824         } else {
825                 ASSERT(XFS_DQ_IS_LOCKED(udq));
826                 if (! locked) {
827                         xfs_dqlock(gdq);
828                 }
829         }
830
831         ASSERT(XFS_DQ_IS_LOCKED(udq));
832         ASSERT(XFS_DQ_IS_LOCKED(gdq));
833         /*
834          * Somebody could have attached a gdquot here,
835          * when we dropped the uqlock. If so, just do nothing.
836          */
837         if (udq->q_gdquot == NULL) {
838                 XFS_DQHOLD(gdq);
839                 udq->q_gdquot = gdq;
840         }
841         if (! locked) {
842                 xfs_dqunlock(gdq);
843                 xfs_dqunlock(udq);
844         }
845 }
846
847
848 /*
849  * Given a locked inode, attach dquot(s) to it, taking U/G/P-QUOTAON
850  * into account.
851  * If XFS_QMOPT_DQALLOC, the dquot(s) will be allocated if needed.
852  * If XFS_QMOPT_DQLOCK, the dquot(s) will be returned locked. This option pretty
853  * much made this code a complete mess, but it has been pretty useful.
854  * If XFS_QMOPT_ILOCKED, then inode sent is already locked EXCL.
855  * Inode may get unlocked and relocked in here, and the caller must deal with
856  * the consequences.
857  */
858 int
859 xfs_qm_dqattach(
860         xfs_inode_t     *ip,
861         uint            flags)
862 {
863         xfs_mount_t     *mp = ip->i_mount;
864         uint            nquotas = 0;
865         int             error = 0;
866
867         if ((! XFS_IS_QUOTA_ON(mp)) ||
868             (! XFS_NOT_DQATTACHED(mp, ip)) ||
869             (ip->i_ino == mp->m_sb.sb_uquotino) ||
870             (ip->i_ino == mp->m_sb.sb_gquotino))
871                 return 0;
872
873         ASSERT((flags & XFS_QMOPT_ILOCKED) == 0 ||
874                xfs_isilocked(ip, XFS_ILOCK_EXCL));
875
876         if (! (flags & XFS_QMOPT_ILOCKED))
877                 xfs_ilock(ip, XFS_ILOCK_EXCL);
878
879         if (XFS_IS_UQUOTA_ON(mp)) {
880                 error = xfs_qm_dqattach_one(ip, ip->i_d.di_uid, XFS_DQ_USER,
881                                                 flags & XFS_QMOPT_DQALLOC,
882                                                 flags & XFS_QMOPT_DQLOCK,
883                                                 NULL, &ip->i_udquot);
884                 if (error)
885                         goto done;
886                 nquotas++;
887         }
888
889         ASSERT(xfs_isilocked(ip, XFS_ILOCK_EXCL));
890         if (XFS_IS_OQUOTA_ON(mp)) {
891                 error = XFS_IS_GQUOTA_ON(mp) ?
892                         xfs_qm_dqattach_one(ip, ip->i_d.di_gid, XFS_DQ_GROUP,
893                                                 flags & XFS_QMOPT_DQALLOC,
894                                                 flags & XFS_QMOPT_DQLOCK,
895                                                 ip->i_udquot, &ip->i_gdquot) :
896                         xfs_qm_dqattach_one(ip, ip->i_d.di_projid, XFS_DQ_PROJ,
897                                                 flags & XFS_QMOPT_DQALLOC,
898                                                 flags & XFS_QMOPT_DQLOCK,
899                                                 ip->i_udquot, &ip->i_gdquot);
900                 /*
901                  * Don't worry about the udquot that we may have
902                  * attached above. It'll get detached, if not already.
903                  */
904                 if (error)
905                         goto done;
906                 nquotas++;
907         }
908
909         /*
910          * Attach this group quota to the user quota as a hint.
911          * This WON'T, in general, result in a thrash.
912          */
913         if (nquotas == 2) {
914                 ASSERT(xfs_isilocked(ip, XFS_ILOCK_EXCL));
915                 ASSERT(ip->i_udquot);
916                 ASSERT(ip->i_gdquot);
917
918                 /*
919                  * We may or may not have the i_udquot locked at this point,
920                  * but this check is OK since we don't depend on the i_gdquot to
921                  * be accurate 100% all the time. It is just a hint, and this
922                  * will succeed in general.
923                  */
924                 if (ip->i_udquot->q_gdquot == ip->i_gdquot)
925                         goto done;
926                 /*
927                  * Attach i_gdquot to the gdquot hint inside the i_udquot.
928                  */
929                 xfs_qm_dqattach_grouphint(ip->i_udquot, ip->i_gdquot,
930                                          flags & XFS_QMOPT_DQLOCK);
931         }
932
933       done:
934
935 #ifdef QUOTADEBUG
936         if (! error) {
937                 if (ip->i_udquot) {
938                         if (flags & XFS_QMOPT_DQLOCK)
939                                 ASSERT(XFS_DQ_IS_LOCKED(ip->i_udquot));
940                 }
941                 if (ip->i_gdquot) {
942                         if (flags & XFS_QMOPT_DQLOCK)
943                                 ASSERT(XFS_DQ_IS_LOCKED(ip->i_gdquot));
944                 }
945                 if (XFS_IS_UQUOTA_ON(mp))
946                         ASSERT(ip->i_udquot);
947                 if (XFS_IS_OQUOTA_ON(mp))
948                         ASSERT(ip->i_gdquot);
949         }
950 #endif
951
952         if (! (flags & XFS_QMOPT_ILOCKED))
953                 xfs_iunlock(ip, XFS_ILOCK_EXCL);
954
955 #ifdef QUOTADEBUG
956         else
957                 ASSERT(xfs_isilocked(ip, XFS_ILOCK_EXCL));
958 #endif
959         return error;
960 }
961
962 /*
963  * Release dquots (and their references) if any.
964  * The inode should be locked EXCL except when this's called by
965  * xfs_ireclaim.
966  */
967 void
968 xfs_qm_dqdetach(
969         xfs_inode_t     *ip)
970 {
971         if (!(ip->i_udquot || ip->i_gdquot))
972                 return;
973
974         ASSERT(ip->i_ino != ip->i_mount->m_sb.sb_uquotino);
975         ASSERT(ip->i_ino != ip->i_mount->m_sb.sb_gquotino);
976         if (ip->i_udquot) {
977                 xfs_dqtrace_entry_ino(ip->i_udquot, "DQDETTACH", ip);
978                 xfs_qm_dqrele(ip->i_udquot);
979                 ip->i_udquot = NULL;
980         }
981         if (ip->i_gdquot) {
982                 xfs_dqtrace_entry_ino(ip->i_gdquot, "DQDETTACH", ip);
983                 xfs_qm_dqrele(ip->i_gdquot);
984                 ip->i_gdquot = NULL;
985         }
986 }
987
988 /*
989  * This is called to sync quotas. We can be told to use non-blocking
990  * semantics by either the SYNC_BDFLUSH flag or the absence of the
991  * SYNC_WAIT flag.
992  */
993 int
994 xfs_qm_sync(
995         xfs_mount_t     *mp,
996         int             flags)
997 {
998         int             recl, restarts;
999         xfs_dquot_t     *dqp;
1000         uint            flush_flags;
1001         boolean_t       nowait;
1002         int             error;
1003
1004         if (! XFS_IS_QUOTA_ON(mp))
1005                 return 0;
1006
1007         restarts = 0;
1008         /*
1009          * We won't block unless we are asked to.
1010          */
1011         nowait = (boolean_t)(flags & SYNC_BDFLUSH || (flags & SYNC_WAIT) == 0);
1012
1013   again:
1014         xfs_qm_mplist_lock(mp);
1015         /*
1016          * dqpurge_all() also takes the mplist lock and iterate thru all dquots
1017          * in quotaoff. However, if the QUOTA_ACTIVE bits are not cleared
1018          * when we have the mplist lock, we know that dquots will be consistent
1019          * as long as we have it locked.
1020          */
1021         if (! XFS_IS_QUOTA_ON(mp)) {
1022                 xfs_qm_mplist_unlock(mp);
1023                 return 0;
1024         }
1025         FOREACH_DQUOT_IN_MP(dqp, mp) {
1026                 /*
1027                  * If this is vfs_sync calling, then skip the dquots that
1028                  * don't 'seem' to be dirty. ie. don't acquire dqlock.
1029                  * This is very similar to what xfs_sync does with inodes.
1030                  */
1031                 if (flags & SYNC_BDFLUSH) {
1032                         if (! XFS_DQ_IS_DIRTY(dqp))
1033                                 continue;
1034                 }
1035
1036                 if (nowait) {
1037                         /*
1038                          * Try to acquire the dquot lock. We are NOT out of
1039                          * lock order, but we just don't want to wait for this
1040                          * lock, unless somebody wanted us to.
1041                          */
1042                         if (! xfs_qm_dqlock_nowait(dqp))
1043                                 continue;
1044                 } else {
1045                         xfs_dqlock(dqp);
1046                 }
1047
1048                 /*
1049                  * Now, find out for sure if this dquot is dirty or not.
1050                  */
1051                 if (! XFS_DQ_IS_DIRTY(dqp)) {
1052                         xfs_dqunlock(dqp);
1053                         continue;
1054                 }
1055
1056                 /* XXX a sentinel would be better */
1057                 recl = XFS_QI_MPLRECLAIMS(mp);
1058                 if (!xfs_dqflock_nowait(dqp)) {
1059                         if (nowait) {
1060                                 xfs_dqunlock(dqp);
1061                                 continue;
1062                         }
1063                         /*
1064                          * If we can't grab the flush lock then if the caller
1065                          * really wanted us to give this our best shot, so
1066                          * see if we can give a push to the buffer before we wait
1067                          * on the flush lock. At this point, we know that
1068                          * even though the dquot is being flushed,
1069                          * it has (new) dirty data.
1070                          */
1071                         xfs_qm_dqflock_pushbuf_wait(dqp);
1072                 }
1073                 /*
1074                  * Let go of the mplist lock. We don't want to hold it
1075                  * across a disk write
1076                  */
1077                 flush_flags = (nowait) ? XFS_QMOPT_DELWRI : XFS_QMOPT_SYNC;
1078                 xfs_qm_mplist_unlock(mp);
1079                 xfs_dqtrace_entry(dqp, "XQM_SYNC: DQFLUSH");
1080                 error = xfs_qm_dqflush(dqp, flush_flags);
1081                 xfs_dqunlock(dqp);
1082                 if (error && XFS_FORCED_SHUTDOWN(mp))
1083                         return 0;       /* Need to prevent umount failure */
1084                 else if (error)
1085                         return error;
1086
1087                 xfs_qm_mplist_lock(mp);
1088                 if (recl != XFS_QI_MPLRECLAIMS(mp)) {
1089                         if (++restarts >= XFS_QM_SYNC_MAX_RESTARTS)
1090                                 break;
1091
1092                         xfs_qm_mplist_unlock(mp);
1093                         goto again;
1094                 }
1095         }
1096
1097         xfs_qm_mplist_unlock(mp);
1098         return 0;
1099 }
1100
1101
1102 /*
1103  * This initializes all the quota information that's kept in the
1104  * mount structure
1105  */
1106 STATIC int
1107 xfs_qm_init_quotainfo(
1108         xfs_mount_t     *mp)
1109 {
1110         xfs_quotainfo_t *qinf;
1111         int             error;
1112         xfs_dquot_t     *dqp;
1113
1114         ASSERT(XFS_IS_QUOTA_RUNNING(mp));
1115
1116         /*
1117          * Tell XQM that we exist as soon as possible.
1118          */
1119         if ((error = xfs_qm_hold_quotafs_ref(mp))) {
1120                 return error;
1121         }
1122
1123         qinf = mp->m_quotainfo = kmem_zalloc(sizeof(xfs_quotainfo_t), KM_SLEEP);
1124
1125         /*
1126          * See if quotainodes are setup, and if not, allocate them,
1127          * and change the superblock accordingly.
1128          */
1129         if ((error = xfs_qm_init_quotainos(mp))) {
1130                 kmem_free(qinf);
1131                 mp->m_quotainfo = NULL;
1132                 return error;
1133         }
1134
1135         xfs_qm_list_init(&qinf->qi_dqlist, "mpdqlist", 0);
1136         qinf->qi_dqreclaims = 0;
1137
1138         /* mutex used to serialize quotaoffs */
1139         mutex_init(&qinf->qi_quotaofflock);
1140
1141         /* Precalc some constants */
1142         qinf->qi_dqchunklen = XFS_FSB_TO_BB(mp, XFS_DQUOT_CLUSTER_SIZE_FSB);
1143         ASSERT(qinf->qi_dqchunklen);
1144         qinf->qi_dqperchunk = BBTOB(qinf->qi_dqchunklen);
1145         do_div(qinf->qi_dqperchunk, sizeof(xfs_dqblk_t));
1146
1147         mp->m_qflags |= (mp->m_sb.sb_qflags & XFS_ALL_QUOTA_CHKD);
1148
1149         /*
1150          * We try to get the limits from the superuser's limits fields.
1151          * This is quite hacky, but it is standard quota practice.
1152          * We look at the USR dquot with id == 0 first, but if user quotas
1153          * are not enabled we goto the GRP dquot with id == 0.
1154          * We don't really care to keep separate default limits for user
1155          * and group quotas, at least not at this point.
1156          */
1157         error = xfs_qm_dqget(mp, NULL, (xfs_dqid_t)0,
1158                              XFS_IS_UQUOTA_RUNNING(mp) ? XFS_DQ_USER : 
1159                              (XFS_IS_GQUOTA_RUNNING(mp) ? XFS_DQ_GROUP :
1160                                 XFS_DQ_PROJ),
1161                              XFS_QMOPT_DQSUSER|XFS_QMOPT_DOWARN,
1162                              &dqp);
1163         if (! error) {
1164                 xfs_disk_dquot_t        *ddqp = &dqp->q_core;
1165
1166                 /*
1167                  * The warnings and timers set the grace period given to
1168                  * a user or group before he or she can not perform any
1169                  * more writing. If it is zero, a default is used.
1170                  */
1171                 qinf->qi_btimelimit = ddqp->d_btimer ?
1172                         be32_to_cpu(ddqp->d_btimer) : XFS_QM_BTIMELIMIT;
1173                 qinf->qi_itimelimit = ddqp->d_itimer ?
1174                         be32_to_cpu(ddqp->d_itimer) : XFS_QM_ITIMELIMIT;
1175                 qinf->qi_rtbtimelimit = ddqp->d_rtbtimer ?
1176                         be32_to_cpu(ddqp->d_rtbtimer) : XFS_QM_RTBTIMELIMIT;
1177                 qinf->qi_bwarnlimit = ddqp->d_bwarns ?
1178                         be16_to_cpu(ddqp->d_bwarns) : XFS_QM_BWARNLIMIT;
1179                 qinf->qi_iwarnlimit = ddqp->d_iwarns ?
1180                         be16_to_cpu(ddqp->d_iwarns) : XFS_QM_IWARNLIMIT;
1181                 qinf->qi_rtbwarnlimit = ddqp->d_rtbwarns ?
1182                         be16_to_cpu(ddqp->d_rtbwarns) : XFS_QM_RTBWARNLIMIT;
1183                 qinf->qi_bhardlimit = be64_to_cpu(ddqp->d_blk_hardlimit);
1184                 qinf->qi_bsoftlimit = be64_to_cpu(ddqp->d_blk_softlimit);
1185                 qinf->qi_ihardlimit = be64_to_cpu(ddqp->d_ino_hardlimit);
1186                 qinf->qi_isoftlimit = be64_to_cpu(ddqp->d_ino_softlimit);
1187                 qinf->qi_rtbhardlimit = be64_to_cpu(ddqp->d_rtb_hardlimit);
1188                 qinf->qi_rtbsoftlimit = be64_to_cpu(ddqp->d_rtb_softlimit);
1189  
1190                 /*
1191                  * We sent the XFS_QMOPT_DQSUSER flag to dqget because
1192                  * we don't want this dquot cached. We haven't done a
1193                  * quotacheck yet, and quotacheck doesn't like incore dquots.
1194                  */
1195                 xfs_qm_dqdestroy(dqp);
1196         } else {
1197                 qinf->qi_btimelimit = XFS_QM_BTIMELIMIT;
1198                 qinf->qi_itimelimit = XFS_QM_ITIMELIMIT;
1199                 qinf->qi_rtbtimelimit = XFS_QM_RTBTIMELIMIT;
1200                 qinf->qi_bwarnlimit = XFS_QM_BWARNLIMIT;
1201                 qinf->qi_iwarnlimit = XFS_QM_IWARNLIMIT;
1202                 qinf->qi_rtbwarnlimit = XFS_QM_RTBWARNLIMIT;
1203         }
1204
1205         return 0;
1206 }
1207
1208
1209 /*
1210  * Gets called when unmounting a filesystem or when all quotas get
1211  * turned off.
1212  * This purges the quota inodes, destroys locks and frees itself.
1213  */
1214 void
1215 xfs_qm_destroy_quotainfo(
1216         xfs_mount_t     *mp)
1217 {
1218         xfs_quotainfo_t *qi;
1219
1220         qi = mp->m_quotainfo;
1221         ASSERT(qi != NULL);
1222         ASSERT(xfs_Gqm != NULL);
1223
1224         /*
1225          * Release the reference that XQM kept, so that we know
1226          * when the XQM structure should be freed. We cannot assume
1227          * that xfs_Gqm is non-null after this point.
1228          */
1229         xfs_qm_rele_quotafs_ref(mp);
1230
1231         xfs_qm_list_destroy(&qi->qi_dqlist);
1232
1233         if (qi->qi_uquotaip) {
1234                 IRELE(qi->qi_uquotaip);
1235                 qi->qi_uquotaip = NULL; /* paranoia */
1236         }
1237         if (qi->qi_gquotaip) {
1238                 IRELE(qi->qi_gquotaip);
1239                 qi->qi_gquotaip = NULL;
1240         }
1241         mutex_destroy(&qi->qi_quotaofflock);
1242         kmem_free(qi);
1243         mp->m_quotainfo = NULL;
1244 }
1245
1246
1247
1248 /* ------------------- PRIVATE STATIC FUNCTIONS ----------------------- */
1249
1250 /* ARGSUSED */
1251 STATIC void
1252 xfs_qm_list_init(
1253         xfs_dqlist_t    *list,
1254         char            *str,
1255         int             n)
1256 {
1257         mutex_init(&list->qh_lock);
1258         list->qh_next = NULL;
1259         list->qh_version = 0;
1260         list->qh_nelems = 0;
1261 }
1262
1263 STATIC void
1264 xfs_qm_list_destroy(
1265         xfs_dqlist_t    *list)
1266 {
1267         mutex_destroy(&(list->qh_lock));
1268 }
1269
1270
1271 /*
1272  * Stripped down version of dqattach. This doesn't attach, or even look at the
1273  * dquots attached to the inode. The rationale is that there won't be any
1274  * attached at the time this is called from quotacheck.
1275  */
1276 STATIC int
1277 xfs_qm_dqget_noattach(
1278         xfs_inode_t     *ip,
1279         xfs_dquot_t     **O_udqpp,
1280         xfs_dquot_t     **O_gdqpp)
1281 {
1282         int             error;
1283         xfs_mount_t     *mp;
1284         xfs_dquot_t     *udqp, *gdqp;
1285
1286         ASSERT(xfs_isilocked(ip, XFS_ILOCK_EXCL));
1287         mp = ip->i_mount;
1288         udqp = NULL;
1289         gdqp = NULL;
1290
1291         if (XFS_IS_UQUOTA_ON(mp)) {
1292                 ASSERT(ip->i_udquot == NULL);
1293                 /*
1294                  * We want the dquot allocated if it doesn't exist.
1295                  */
1296                 if ((error = xfs_qm_dqget(mp, ip, ip->i_d.di_uid, XFS_DQ_USER,
1297                                          XFS_QMOPT_DQALLOC | XFS_QMOPT_DOWARN,
1298                                          &udqp))) {
1299                         /*
1300                          * Shouldn't be able to turn off quotas here.
1301                          */
1302                         ASSERT(error != ESRCH);
1303                         ASSERT(error != ENOENT);
1304                         return error;
1305                 }
1306                 ASSERT(udqp);
1307         }
1308
1309         if (XFS_IS_OQUOTA_ON(mp)) {
1310                 ASSERT(ip->i_gdquot == NULL);
1311                 if (udqp)
1312                         xfs_dqunlock(udqp);
1313                 error = XFS_IS_GQUOTA_ON(mp) ?
1314                                 xfs_qm_dqget(mp, ip,
1315                                              ip->i_d.di_gid, XFS_DQ_GROUP,
1316                                              XFS_QMOPT_DQALLOC|XFS_QMOPT_DOWARN,
1317                                              &gdqp) :
1318                                 xfs_qm_dqget(mp, ip,
1319                                              ip->i_d.di_projid, XFS_DQ_PROJ,
1320                                              XFS_QMOPT_DQALLOC|XFS_QMOPT_DOWARN,
1321                                              &gdqp);
1322                 if (error) {
1323                         if (udqp)
1324                                 xfs_qm_dqrele(udqp);
1325                         ASSERT(error != ESRCH);
1326                         ASSERT(error != ENOENT);
1327                         return error;
1328                 }
1329                 ASSERT(gdqp);
1330
1331                 /* Reacquire the locks in the right order */
1332                 if (udqp) {
1333                         if (! xfs_qm_dqlock_nowait(udqp)) {
1334                                 xfs_dqunlock(gdqp);
1335                                 xfs_dqlock(udqp);
1336                                 xfs_dqlock(gdqp);
1337                         }
1338                 }
1339         }
1340
1341         *O_udqpp = udqp;
1342         *O_gdqpp = gdqp;
1343
1344 #ifdef QUOTADEBUG
1345         if (udqp) ASSERT(XFS_DQ_IS_LOCKED(udqp));
1346         if (gdqp) ASSERT(XFS_DQ_IS_LOCKED(gdqp));
1347 #endif
1348         return 0;
1349 }
1350
1351 /*
1352  * Create an inode and return with a reference already taken, but unlocked
1353  * This is how we create quota inodes
1354  */
1355 STATIC int
1356 xfs_qm_qino_alloc(
1357         xfs_mount_t     *mp,
1358         xfs_inode_t     **ip,
1359         __int64_t       sbfields,
1360         uint            flags)
1361 {
1362         xfs_trans_t     *tp;
1363         int             error;
1364         int             committed;
1365
1366         tp = xfs_trans_alloc(mp, XFS_TRANS_QM_QINOCREATE);
1367         if ((error = xfs_trans_reserve(tp,
1368                                       XFS_QM_QINOCREATE_SPACE_RES(mp),
1369                                       XFS_CREATE_LOG_RES(mp), 0,
1370                                       XFS_TRANS_PERM_LOG_RES,
1371                                       XFS_CREATE_LOG_COUNT))) {
1372                 xfs_trans_cancel(tp, 0);
1373                 return error;
1374         }
1375
1376         if ((error = xfs_dir_ialloc(&tp, NULL, S_IFREG, 1, 0,
1377                                    &xfs_zerocr, 0, 1, ip, &committed))) {
1378                 xfs_trans_cancel(tp, XFS_TRANS_RELEASE_LOG_RES |
1379                                  XFS_TRANS_ABORT);
1380                 return error;
1381         }
1382
1383         /*
1384          * Keep an extra reference to this quota inode. This inode is
1385          * locked exclusively and joined to the transaction already.
1386          */
1387         ASSERT(xfs_isilocked(*ip, XFS_ILOCK_EXCL));
1388         IHOLD(*ip);
1389
1390         /*
1391          * Make the changes in the superblock, and log those too.
1392          * sbfields arg may contain fields other than *QUOTINO;
1393          * VERSIONNUM for example.
1394          */
1395         spin_lock(&mp->m_sb_lock);
1396         if (flags & XFS_QMOPT_SBVERSION) {
1397 #if defined(DEBUG) && defined(XFS_LOUD_RECOVERY)
1398                 unsigned oldv = mp->m_sb.sb_versionnum;
1399 #endif
1400                 ASSERT(!xfs_sb_version_hasquota(&mp->m_sb));
1401                 ASSERT((sbfields & (XFS_SB_VERSIONNUM | XFS_SB_UQUOTINO |
1402                                    XFS_SB_GQUOTINO | XFS_SB_QFLAGS)) ==
1403                        (XFS_SB_VERSIONNUM | XFS_SB_UQUOTINO |
1404                         XFS_SB_GQUOTINO | XFS_SB_QFLAGS));
1405
1406                 xfs_sb_version_addquota(&mp->m_sb);
1407                 mp->m_sb.sb_uquotino = NULLFSINO;
1408                 mp->m_sb.sb_gquotino = NULLFSINO;
1409
1410                 /* qflags will get updated _after_ quotacheck */
1411                 mp->m_sb.sb_qflags = 0;
1412 #if defined(DEBUG) && defined(XFS_LOUD_RECOVERY)
1413                 cmn_err(CE_NOTE,
1414                         "Old superblock version %x, converting to %x.",
1415                         oldv, mp->m_sb.sb_versionnum);
1416 #endif
1417         }
1418         if (flags & XFS_QMOPT_UQUOTA)
1419                 mp->m_sb.sb_uquotino = (*ip)->i_ino;
1420         else
1421                 mp->m_sb.sb_gquotino = (*ip)->i_ino;
1422         spin_unlock(&mp->m_sb_lock);
1423         xfs_mod_sb(tp, sbfields);
1424
1425         if ((error = xfs_trans_commit(tp, XFS_TRANS_RELEASE_LOG_RES))) {
1426                 xfs_fs_cmn_err(CE_ALERT, mp, "XFS qino_alloc failed!");
1427                 return error;
1428         }
1429         return 0;
1430 }
1431
1432
1433 STATIC void
1434 xfs_qm_reset_dqcounts(
1435         xfs_mount_t     *mp,
1436         xfs_buf_t       *bp,
1437         xfs_dqid_t      id,
1438         uint            type)
1439 {
1440         xfs_disk_dquot_t        *ddq;
1441         int                     j;
1442
1443         xfs_buftrace("RESET DQUOTS", bp);
1444         /*
1445          * Reset all counters and timers. They'll be
1446          * started afresh by xfs_qm_quotacheck.
1447          */
1448 #ifdef DEBUG
1449         j = XFS_FSB_TO_B(mp, XFS_DQUOT_CLUSTER_SIZE_FSB);
1450         do_div(j, sizeof(xfs_dqblk_t));
1451         ASSERT(XFS_QM_DQPERBLK(mp) == j);
1452 #endif
1453         ddq = (xfs_disk_dquot_t *)XFS_BUF_PTR(bp);
1454         for (j = 0; j < XFS_QM_DQPERBLK(mp); j++) {
1455                 /*
1456                  * Do a sanity check, and if needed, repair the dqblk. Don't
1457                  * output any warnings because it's perfectly possible to
1458                  * find uninitialised dquot blks. See comment in xfs_qm_dqcheck.
1459                  */
1460                 (void) xfs_qm_dqcheck(ddq, id+j, type, XFS_QMOPT_DQREPAIR,
1461                                       "xfs_quotacheck");
1462                 ddq->d_bcount = 0;
1463                 ddq->d_icount = 0;
1464                 ddq->d_rtbcount = 0;
1465                 ddq->d_btimer = 0;
1466                 ddq->d_itimer = 0;
1467                 ddq->d_rtbtimer = 0;
1468                 ddq->d_bwarns = 0;
1469                 ddq->d_iwarns = 0;
1470                 ddq->d_rtbwarns = 0;
1471                 ddq = (xfs_disk_dquot_t *) ((xfs_dqblk_t *)ddq + 1);
1472         }
1473 }
1474
1475 STATIC int
1476 xfs_qm_dqiter_bufs(
1477         xfs_mount_t     *mp,
1478         xfs_dqid_t      firstid,
1479         xfs_fsblock_t   bno,
1480         xfs_filblks_t   blkcnt,
1481         uint            flags)
1482 {
1483         xfs_buf_t       *bp;
1484         int             error;
1485         int             notcommitted;
1486         int             incr;
1487         int             type;
1488
1489         ASSERT(blkcnt > 0);
1490         notcommitted = 0;
1491         incr = (blkcnt > XFS_QM_MAX_DQCLUSTER_LOGSZ) ?
1492                 XFS_QM_MAX_DQCLUSTER_LOGSZ : blkcnt;
1493         type = flags & XFS_QMOPT_UQUOTA ? XFS_DQ_USER :
1494                 (flags & XFS_QMOPT_PQUOTA ? XFS_DQ_PROJ : XFS_DQ_GROUP);
1495         error = 0;
1496
1497         /*
1498          * Blkcnt arg can be a very big number, and might even be
1499          * larger than the log itself. So, we have to break it up into
1500          * manageable-sized transactions.
1501          * Note that we don't start a permanent transaction here; we might
1502          * not be able to get a log reservation for the whole thing up front,
1503          * and we don't really care to either, because we just discard
1504          * everything if we were to crash in the middle of this loop.
1505          */
1506         while (blkcnt--) {
1507                 error = xfs_trans_read_buf(mp, NULL, mp->m_ddev_targp,
1508                               XFS_FSB_TO_DADDR(mp, bno),
1509                               (int)XFS_QI_DQCHUNKLEN(mp), 0, &bp);
1510                 if (error)
1511                         break;
1512
1513                 xfs_qm_reset_dqcounts(mp, bp, firstid, type);
1514                 xfs_bdwrite(mp, bp);
1515                 /*
1516                  * goto the next block.
1517                  */
1518                 bno++;
1519                 firstid += XFS_QM_DQPERBLK(mp);
1520         }
1521         return error;
1522 }
1523
1524 /*
1525  * Iterate over all allocated USR/GRP/PRJ dquots in the system, calling a
1526  * caller supplied function for every chunk of dquots that we find.
1527  */
1528 STATIC int
1529 xfs_qm_dqiterate(
1530         xfs_mount_t     *mp,
1531         xfs_inode_t     *qip,
1532         uint            flags)
1533 {
1534         xfs_bmbt_irec_t         *map;
1535         int                     i, nmaps;       /* number of map entries */
1536         int                     error;          /* return value */
1537         xfs_fileoff_t           lblkno;
1538         xfs_filblks_t           maxlblkcnt;
1539         xfs_dqid_t              firstid;
1540         xfs_fsblock_t           rablkno;
1541         xfs_filblks_t           rablkcnt;
1542
1543         error = 0;
1544         /*
1545          * This looks racy, but we can't keep an inode lock across a
1546          * trans_reserve. But, this gets called during quotacheck, and that
1547          * happens only at mount time which is single threaded.
1548          */
1549         if (qip->i_d.di_nblocks == 0)
1550                 return 0;
1551
1552         map = kmem_alloc(XFS_DQITER_MAP_SIZE * sizeof(*map), KM_SLEEP);
1553
1554         lblkno = 0;
1555         maxlblkcnt = XFS_B_TO_FSB(mp, (xfs_ufsize_t)XFS_MAXIOFFSET(mp));
1556         do {
1557                 nmaps = XFS_DQITER_MAP_SIZE;
1558                 /*
1559                  * We aren't changing the inode itself. Just changing
1560                  * some of its data. No new blocks are added here, and
1561                  * the inode is never added to the transaction.
1562                  */
1563                 xfs_ilock(qip, XFS_ILOCK_SHARED);
1564                 error = xfs_bmapi(NULL, qip, lblkno,
1565                                   maxlblkcnt - lblkno,
1566                                   XFS_BMAPI_METADATA,
1567                                   NULL,
1568                                   0, map, &nmaps, NULL, NULL);
1569                 xfs_iunlock(qip, XFS_ILOCK_SHARED);
1570                 if (error)
1571                         break;
1572
1573                 ASSERT(nmaps <= XFS_DQITER_MAP_SIZE);
1574                 for (i = 0; i < nmaps; i++) {
1575                         ASSERT(map[i].br_startblock != DELAYSTARTBLOCK);
1576                         ASSERT(map[i].br_blockcount);
1577
1578
1579                         lblkno += map[i].br_blockcount;
1580
1581                         if (map[i].br_startblock == HOLESTARTBLOCK)
1582                                 continue;
1583
1584                         firstid = (xfs_dqid_t) map[i].br_startoff *
1585                                 XFS_QM_DQPERBLK(mp);
1586                         /*
1587                          * Do a read-ahead on the next extent.
1588                          */
1589                         if ((i+1 < nmaps) &&
1590                             (map[i+1].br_startblock != HOLESTARTBLOCK)) {
1591                                 rablkcnt =  map[i+1].br_blockcount;
1592                                 rablkno = map[i+1].br_startblock;
1593                                 while (rablkcnt--) {
1594                                         xfs_baread(mp->m_ddev_targp,
1595                                                XFS_FSB_TO_DADDR(mp, rablkno),
1596                                                (int)XFS_QI_DQCHUNKLEN(mp));
1597                                         rablkno++;
1598                                 }
1599                         }
1600                         /*
1601                          * Iterate thru all the blks in the extent and
1602                          * reset the counters of all the dquots inside them.
1603                          */
1604                         if ((error = xfs_qm_dqiter_bufs(mp,
1605                                                        firstid,
1606                                                        map[i].br_startblock,
1607                                                        map[i].br_blockcount,
1608                                                        flags))) {
1609                                 break;
1610                         }
1611                 }
1612
1613                 if (error)
1614                         break;
1615         } while (nmaps > 0);
1616
1617         kmem_free(map);
1618
1619         return error;
1620 }
1621
1622 /*
1623  * Called by dqusage_adjust in doing a quotacheck.
1624  * Given the inode, and a dquot (either USR or GRP, doesn't matter),
1625  * this updates its incore copy as well as the buffer copy. This is
1626  * so that once the quotacheck is done, we can just log all the buffers,
1627  * as opposed to logging numerous updates to individual dquots.
1628  */
1629 STATIC void
1630 xfs_qm_quotacheck_dqadjust(
1631         xfs_dquot_t             *dqp,
1632         xfs_qcnt_t              nblks,
1633         xfs_qcnt_t              rtblks)
1634 {
1635         ASSERT(XFS_DQ_IS_LOCKED(dqp));
1636         xfs_dqtrace_entry(dqp, "QCHECK DQADJUST");
1637         /*
1638          * Adjust the inode count and the block count to reflect this inode's
1639          * resource usage.
1640          */
1641         be64_add_cpu(&dqp->q_core.d_icount, 1);
1642         dqp->q_res_icount++;
1643         if (nblks) {
1644                 be64_add_cpu(&dqp->q_core.d_bcount, nblks);
1645                 dqp->q_res_bcount += nblks;
1646         }
1647         if (rtblks) {
1648                 be64_add_cpu(&dqp->q_core.d_rtbcount, rtblks);
1649                 dqp->q_res_rtbcount += rtblks;
1650         }
1651
1652         /*
1653          * Set default limits, adjust timers (since we changed usages)
1654          */
1655         if (! XFS_IS_SUSER_DQUOT(dqp)) {
1656                 xfs_qm_adjust_dqlimits(dqp->q_mount, &dqp->q_core);
1657                 xfs_qm_adjust_dqtimers(dqp->q_mount, &dqp->q_core);
1658         }
1659
1660         dqp->dq_flags |= XFS_DQ_DIRTY;
1661 }
1662
1663 STATIC int
1664 xfs_qm_get_rtblks(
1665         xfs_inode_t     *ip,
1666         xfs_qcnt_t      *O_rtblks)
1667 {
1668         xfs_filblks_t   rtblks;                 /* total rt blks */
1669         xfs_extnum_t    idx;                    /* extent record index */
1670         xfs_ifork_t     *ifp;                   /* inode fork pointer */
1671         xfs_extnum_t    nextents;               /* number of extent entries */
1672         int             error;
1673
1674         ASSERT(XFS_IS_REALTIME_INODE(ip));
1675         ifp = XFS_IFORK_PTR(ip, XFS_DATA_FORK);
1676         if (!(ifp->if_flags & XFS_IFEXTENTS)) {
1677                 if ((error = xfs_iread_extents(NULL, ip, XFS_DATA_FORK)))
1678                         return error;
1679         }
1680         rtblks = 0;
1681         nextents = ifp->if_bytes / (uint)sizeof(xfs_bmbt_rec_t);
1682         for (idx = 0; idx < nextents; idx++)
1683                 rtblks += xfs_bmbt_get_blockcount(xfs_iext_get_ext(ifp, idx));
1684         *O_rtblks = (xfs_qcnt_t)rtblks;
1685         return 0;
1686 }
1687
1688 /*
1689  * callback routine supplied to bulkstat(). Given an inumber, find its
1690  * dquots and update them to account for resources taken by that inode.
1691  */
1692 /* ARGSUSED */
1693 STATIC int
1694 xfs_qm_dqusage_adjust(
1695         xfs_mount_t     *mp,            /* mount point for filesystem */
1696         xfs_ino_t       ino,            /* inode number to get data for */
1697         void            __user *buffer, /* not used */
1698         int             ubsize,         /* not used */
1699         void            *private_data,  /* not used */
1700         xfs_daddr_t     bno,            /* starting block of inode cluster */
1701         int             *ubused,        /* not used */
1702         void            *dip,           /* on-disk inode pointer (not used) */
1703         int             *res)           /* result code value */
1704 {
1705         xfs_inode_t     *ip;
1706         xfs_dquot_t     *udqp, *gdqp;
1707         xfs_qcnt_t      nblks, rtblks;
1708         int             error;
1709
1710         ASSERT(XFS_IS_QUOTA_RUNNING(mp));
1711
1712         /*
1713          * rootino must have its resources accounted for, not so with the quota
1714          * inodes.
1715          */
1716         if (ino == mp->m_sb.sb_uquotino || ino == mp->m_sb.sb_gquotino) {
1717                 *res = BULKSTAT_RV_NOTHING;
1718                 return XFS_ERROR(EINVAL);
1719         }
1720
1721         /*
1722          * We don't _need_ to take the ilock EXCL. However, the xfs_qm_dqget
1723          * interface expects the inode to be exclusively locked because that's
1724          * the case in all other instances. It's OK that we do this because
1725          * quotacheck is done only at mount time.
1726          */
1727         if ((error = xfs_iget(mp, NULL, ino, 0, XFS_ILOCK_EXCL, &ip, bno))) {
1728                 *res = BULKSTAT_RV_NOTHING;
1729                 return error;
1730         }
1731
1732         /*
1733          * Obtain the locked dquots. In case of an error (eg. allocation
1734          * fails for ENOSPC), we return the negative of the error number
1735          * to bulkstat, so that it can get propagated to quotacheck() and
1736          * making us disable quotas for the file system.
1737          */
1738         if ((error = xfs_qm_dqget_noattach(ip, &udqp, &gdqp))) {
1739                 xfs_iput(ip, XFS_ILOCK_EXCL);
1740                 *res = BULKSTAT_RV_GIVEUP;
1741                 return error;
1742         }
1743
1744         rtblks = 0;
1745         if (! XFS_IS_REALTIME_INODE(ip)) {
1746                 nblks = (xfs_qcnt_t)ip->i_d.di_nblocks;
1747         } else {
1748                 /*
1749                  * Walk thru the extent list and count the realtime blocks.
1750                  */
1751                 if ((error = xfs_qm_get_rtblks(ip, &rtblks))) {
1752                         xfs_iput(ip, XFS_ILOCK_EXCL);
1753                         if (udqp)
1754                                 xfs_qm_dqput(udqp);
1755                         if (gdqp)
1756                                 xfs_qm_dqput(gdqp);
1757                         *res = BULKSTAT_RV_GIVEUP;
1758                         return error;
1759                 }
1760                 nblks = (xfs_qcnt_t)ip->i_d.di_nblocks - rtblks;
1761         }
1762         ASSERT(ip->i_delayed_blks == 0);
1763
1764         /*
1765          * We can't release the inode while holding its dquot locks.
1766          * The inode can go into inactive and might try to acquire the dquotlocks.
1767          * So, just unlock here and do a vn_rele at the end.
1768          */
1769         xfs_iunlock(ip, XFS_ILOCK_EXCL);
1770
1771         /*
1772          * Add the (disk blocks and inode) resources occupied by this
1773          * inode to its dquots. We do this adjustment in the incore dquot,
1774          * and also copy the changes to its buffer.
1775          * We don't care about putting these changes in a transaction
1776          * envelope because if we crash in the middle of a 'quotacheck'
1777          * we have to start from the beginning anyway.
1778          * Once we're done, we'll log all the dquot bufs.
1779          *
1780          * The *QUOTA_ON checks below may look pretty racy, but quotachecks
1781          * and quotaoffs don't race. (Quotachecks happen at mount time only).
1782          */
1783         if (XFS_IS_UQUOTA_ON(mp)) {
1784                 ASSERT(udqp);
1785                 xfs_qm_quotacheck_dqadjust(udqp, nblks, rtblks);
1786                 xfs_qm_dqput(udqp);
1787         }
1788         if (XFS_IS_OQUOTA_ON(mp)) {
1789                 ASSERT(gdqp);
1790                 xfs_qm_quotacheck_dqadjust(gdqp, nblks, rtblks);
1791                 xfs_qm_dqput(gdqp);
1792         }
1793         /*
1794          * Now release the inode. This will send it to 'inactive', and
1795          * possibly even free blocks.
1796          */
1797         IRELE(ip);
1798
1799         /*
1800          * Goto next inode.
1801          */
1802         *res = BULKSTAT_RV_DIDONE;
1803         return 0;
1804 }
1805
1806 /*
1807  * Walk thru all the filesystem inodes and construct a consistent view
1808  * of the disk quota world. If the quotacheck fails, disable quotas.
1809  */
1810 int
1811 xfs_qm_quotacheck(
1812         xfs_mount_t     *mp)
1813 {
1814         int             done, count, error;
1815         xfs_ino_t       lastino;
1816         size_t          structsz;
1817         xfs_inode_t     *uip, *gip;
1818         uint            flags;
1819
1820         count = INT_MAX;
1821         structsz = 1;
1822         lastino = 0;
1823         flags = 0;
1824
1825         ASSERT(XFS_QI_UQIP(mp) || XFS_QI_GQIP(mp));
1826         ASSERT(XFS_IS_QUOTA_RUNNING(mp));
1827
1828         /*
1829          * There should be no cached dquots. The (simplistic) quotacheck
1830          * algorithm doesn't like that.
1831          */
1832         ASSERT(XFS_QI_MPLNDQUOTS(mp) == 0);
1833
1834         cmn_err(CE_NOTE, "XFS quotacheck %s: Please wait.", mp->m_fsname);
1835
1836         /*
1837          * First we go thru all the dquots on disk, USR and GRP/PRJ, and reset
1838          * their counters to zero. We need a clean slate.
1839          * We don't log our changes till later.
1840          */
1841         if ((uip = XFS_QI_UQIP(mp))) {
1842                 if ((error = xfs_qm_dqiterate(mp, uip, XFS_QMOPT_UQUOTA)))
1843                         goto error_return;
1844                 flags |= XFS_UQUOTA_CHKD;
1845         }
1846
1847         if ((gip = XFS_QI_GQIP(mp))) {
1848                 if ((error = xfs_qm_dqiterate(mp, gip, XFS_IS_GQUOTA_ON(mp) ?
1849                                         XFS_QMOPT_GQUOTA : XFS_QMOPT_PQUOTA)))
1850                         goto error_return;
1851                 flags |= XFS_OQUOTA_CHKD;
1852         }
1853
1854         do {
1855                 /*
1856                  * Iterate thru all the inodes in the file system,
1857                  * adjusting the corresponding dquot counters in core.
1858                  */
1859                 if ((error = xfs_bulkstat(mp, &lastino, &count,
1860                                      xfs_qm_dqusage_adjust, NULL,
1861                                      structsz, NULL, BULKSTAT_FG_IGET, &done)))
1862                         break;
1863
1864         } while (! done);
1865
1866         /*
1867          * We've made all the changes that we need to make incore.
1868          * Flush them down to disk buffers if everything was updated
1869          * successfully.
1870          */
1871         if (!error)
1872                 error = xfs_qm_dqflush_all(mp, XFS_QMOPT_DELWRI);
1873
1874         /*
1875          * We can get this error if we couldn't do a dquot allocation inside
1876          * xfs_qm_dqusage_adjust (via bulkstat). We don't care about the
1877          * dirty dquots that might be cached, we just want to get rid of them
1878          * and turn quotaoff. The dquots won't be attached to any of the inodes
1879          * at this point (because we intentionally didn't in dqget_noattach).
1880          */
1881         if (error) {
1882                 xfs_qm_dqpurge_all(mp, XFS_QMOPT_QUOTALL | XFS_QMOPT_QUOTAOFF);
1883                 goto error_return;
1884         }
1885
1886         /*
1887          * We didn't log anything, because if we crashed, we'll have to
1888          * start the quotacheck from scratch anyway. However, we must make
1889          * sure that our dquot changes are secure before we put the
1890          * quotacheck'd stamp on the superblock. So, here we do a synchronous
1891          * flush.
1892          */
1893         XFS_bflush(mp->m_ddev_targp);
1894
1895         /*
1896          * If one type of quotas is off, then it will lose its
1897          * quotachecked status, since we won't be doing accounting for
1898          * that type anymore.
1899          */
1900         mp->m_qflags &= ~(XFS_OQUOTA_CHKD | XFS_UQUOTA_CHKD);
1901         mp->m_qflags |= flags;
1902
1903         XQM_LIST_PRINT(&(XFS_QI_MPL_LIST(mp)), MPL_NEXT, "++++ Mp list +++");
1904
1905  error_return:
1906         if (error) {
1907                 cmn_err(CE_WARN, "XFS quotacheck %s: Unsuccessful (Error %d): "
1908                         "Disabling quotas.",
1909                         mp->m_fsname, error);
1910                 /*
1911                  * We must turn off quotas.
1912                  */
1913                 ASSERT(mp->m_quotainfo != NULL);
1914                 ASSERT(xfs_Gqm != NULL);
1915                 xfs_qm_destroy_quotainfo(mp);
1916                 if (xfs_mount_reset_sbqflags(mp)) {
1917                         cmn_err(CE_WARN, "XFS quotacheck %s: "
1918                                 "Failed to reset quota flags.", mp->m_fsname);
1919                 }
1920         } else {
1921                 cmn_err(CE_NOTE, "XFS quotacheck %s: Done.", mp->m_fsname);
1922         }
1923         return (error);
1924 }
1925
1926 /*
1927  * This is called after the superblock has been read in and we're ready to
1928  * iget the quota inodes.
1929  */
1930 STATIC int
1931 xfs_qm_init_quotainos(
1932         xfs_mount_t     *mp)
1933 {
1934         xfs_inode_t     *uip, *gip;
1935         int             error;
1936         __int64_t       sbflags;
1937         uint            flags;
1938
1939         ASSERT(mp->m_quotainfo);
1940         uip = gip = NULL;
1941         sbflags = 0;
1942         flags = 0;
1943
1944         /*
1945          * Get the uquota and gquota inodes
1946          */
1947         if (xfs_sb_version_hasquota(&mp->m_sb)) {
1948                 if (XFS_IS_UQUOTA_ON(mp) &&
1949                     mp->m_sb.sb_uquotino != NULLFSINO) {
1950                         ASSERT(mp->m_sb.sb_uquotino > 0);
1951                         if ((error = xfs_iget(mp, NULL, mp->m_sb.sb_uquotino,
1952                                              0, 0, &uip, 0)))
1953                                 return XFS_ERROR(error);
1954                 }
1955                 if (XFS_IS_OQUOTA_ON(mp) &&
1956                     mp->m_sb.sb_gquotino != NULLFSINO) {
1957                         ASSERT(mp->m_sb.sb_gquotino > 0);
1958                         if ((error = xfs_iget(mp, NULL, mp->m_sb.sb_gquotino,
1959                                              0, 0, &gip, 0))) {
1960                                 if (uip)
1961                                         IRELE(uip);
1962                                 return XFS_ERROR(error);
1963                         }
1964                 }
1965         } else {
1966                 flags |= XFS_QMOPT_SBVERSION;
1967                 sbflags |= (XFS_SB_VERSIONNUM | XFS_SB_UQUOTINO |
1968                             XFS_SB_GQUOTINO | XFS_SB_QFLAGS);
1969         }
1970
1971         /*
1972          * Create the two inodes, if they don't exist already. The changes
1973          * made above will get added to a transaction and logged in one of
1974          * the qino_alloc calls below.  If the device is readonly,
1975          * temporarily switch to read-write to do this.
1976          */
1977         if (XFS_IS_UQUOTA_ON(mp) && uip == NULL) {
1978                 if ((error = xfs_qm_qino_alloc(mp, &uip,
1979                                               sbflags | XFS_SB_UQUOTINO,
1980                                               flags | XFS_QMOPT_UQUOTA)))
1981                         return XFS_ERROR(error);
1982
1983                 flags &= ~XFS_QMOPT_SBVERSION;
1984         }
1985         if (XFS_IS_OQUOTA_ON(mp) && gip == NULL) {
1986                 flags |= (XFS_IS_GQUOTA_ON(mp) ?
1987                                 XFS_QMOPT_GQUOTA : XFS_QMOPT_PQUOTA);
1988                 error = xfs_qm_qino_alloc(mp, &gip,
1989                                           sbflags | XFS_SB_GQUOTINO, flags);
1990                 if (error) {
1991                         if (uip)
1992                                 IRELE(uip);
1993
1994                         return XFS_ERROR(error);
1995                 }
1996         }
1997
1998         XFS_QI_UQIP(mp) = uip;
1999         XFS_QI_GQIP(mp) = gip;
2000
2001         return 0;
2002 }
2003
2004
2005 /*
2006  * Traverse the freelist of dquots and attempt to reclaim a maximum of
2007  * 'howmany' dquots. This operation races with dqlookup(), and attempts to
2008  * favor the lookup function ...
2009  * XXXsup merge this with qm_reclaim_one().
2010  */
2011 STATIC int
2012 xfs_qm_shake_freelist(
2013         int howmany)
2014 {
2015         int             nreclaimed;
2016         xfs_dqhash_t    *hash;
2017         xfs_dquot_t     *dqp, *nextdqp;
2018         int             restarts;
2019         int             nflushes;
2020
2021         if (howmany <= 0)
2022                 return 0;
2023
2024         nreclaimed = 0;
2025         restarts = 0;
2026         nflushes = 0;
2027
2028 #ifdef QUOTADEBUG
2029         cmn_err(CE_DEBUG, "Shake free 0x%x", howmany);
2030 #endif
2031         /* lock order is : hashchainlock, freelistlock, mplistlock */
2032  tryagain:
2033         xfs_qm_freelist_lock(xfs_Gqm);
2034
2035         for (dqp = xfs_Gqm->qm_dqfreelist.qh_next;
2036              ((dqp != (xfs_dquot_t *) &xfs_Gqm->qm_dqfreelist) &&
2037               nreclaimed < howmany); ) {
2038                 xfs_dqlock(dqp);
2039
2040                 /*
2041                  * We are racing with dqlookup here. Naturally we don't
2042                  * want to reclaim a dquot that lookup wants.
2043                  */
2044                 if (dqp->dq_flags & XFS_DQ_WANT) {
2045                         xfs_dqunlock(dqp);
2046                         xfs_qm_freelist_unlock(xfs_Gqm);
2047                         if (++restarts >= XFS_QM_RECLAIM_MAX_RESTARTS)
2048                                 return nreclaimed;
2049                         XQM_STATS_INC(xqmstats.xs_qm_dqwants);
2050                         goto tryagain;
2051                 }
2052
2053                 /*
2054                  * If the dquot is inactive, we are assured that it is
2055                  * not on the mplist or the hashlist, and that makes our
2056                  * life easier.
2057                  */
2058                 if (dqp->dq_flags & XFS_DQ_INACTIVE) {
2059                         ASSERT(dqp->q_mount == NULL);
2060                         ASSERT(! XFS_DQ_IS_DIRTY(dqp));
2061                         ASSERT(dqp->HL_PREVP == NULL);
2062                         ASSERT(dqp->MPL_PREVP == NULL);
2063                         XQM_STATS_INC(xqmstats.xs_qm_dqinact_reclaims);
2064                         nextdqp = dqp->dq_flnext;
2065                         goto off_freelist;
2066                 }
2067
2068                 ASSERT(dqp->MPL_PREVP);
2069                 /*
2070                  * Try to grab the flush lock. If this dquot is in the process of
2071                  * getting flushed to disk, we don't want to reclaim it.
2072                  */
2073                 if (!xfs_dqflock_nowait(dqp)) {
2074                         xfs_dqunlock(dqp);
2075                         dqp = dqp->dq_flnext;
2076                         continue;
2077                 }
2078
2079                 /*
2080                  * We have the flush lock so we know that this is not in the
2081                  * process of being flushed. So, if this is dirty, flush it
2082                  * DELWRI so that we don't get a freelist infested with
2083                  * dirty dquots.
2084                  */
2085                 if (XFS_DQ_IS_DIRTY(dqp)) {
2086                         int     error;
2087                         xfs_dqtrace_entry(dqp, "DQSHAKE: DQDIRTY");
2088                         /*
2089                          * We flush it delayed write, so don't bother
2090                          * releasing the mplock.
2091                          */
2092                         error = xfs_qm_dqflush(dqp, XFS_QMOPT_DELWRI);
2093                         if (error) {
2094                                 xfs_fs_cmn_err(CE_WARN, dqp->q_mount,
2095                         "xfs_qm_dqflush_all: dquot %p flush failed", dqp);
2096                         }
2097                         xfs_dqunlock(dqp); /* dqflush unlocks dqflock */
2098                         dqp = dqp->dq_flnext;
2099                         continue;
2100                 }
2101                 /*
2102                  * We're trying to get the hashlock out of order. This races
2103                  * with dqlookup; so, we giveup and goto the next dquot if
2104                  * we couldn't get the hashlock. This way, we won't starve
2105                  * a dqlookup process that holds the hashlock that is
2106                  * waiting for the freelist lock.
2107                  */
2108                 if (! xfs_qm_dqhashlock_nowait(dqp)) {
2109                         xfs_dqfunlock(dqp);
2110                         xfs_dqunlock(dqp);
2111                         dqp = dqp->dq_flnext;
2112                         continue;
2113                 }
2114                 /*
2115                  * This races with dquot allocation code as well as dqflush_all
2116                  * and reclaim code. So, if we failed to grab the mplist lock,
2117                  * giveup everything and start over.
2118                  */
2119                 hash = dqp->q_hash;
2120                 ASSERT(hash);
2121                 if (! xfs_qm_mplist_nowait(dqp->q_mount)) {
2122                         /* XXX put a sentinel so that we can come back here */
2123                         xfs_dqfunlock(dqp);
2124                         xfs_dqunlock(dqp);
2125                         XFS_DQ_HASH_UNLOCK(hash);
2126                         xfs_qm_freelist_unlock(xfs_Gqm);
2127                         if (++restarts >= XFS_QM_RECLAIM_MAX_RESTARTS)
2128                                 return nreclaimed;
2129                         goto tryagain;
2130                 }
2131                 xfs_dqtrace_entry(dqp, "DQSHAKE: UNLINKING");
2132 #ifdef QUOTADEBUG
2133                 cmn_err(CE_DEBUG, "Shake 0x%p, ID 0x%x\n",
2134                         dqp, be32_to_cpu(dqp->q_core.d_id));
2135 #endif
2136                 ASSERT(dqp->q_nrefs == 0);
2137                 nextdqp = dqp->dq_flnext;
2138                 XQM_MPLIST_REMOVE(&(XFS_QI_MPL_LIST(dqp->q_mount)), dqp);
2139                 XQM_HASHLIST_REMOVE(hash, dqp);
2140                 xfs_dqfunlock(dqp);
2141                 xfs_qm_mplist_unlock(dqp->q_mount);
2142                 XFS_DQ_HASH_UNLOCK(hash);
2143
2144  off_freelist:
2145                 XQM_FREELIST_REMOVE(dqp);
2146                 xfs_dqunlock(dqp);
2147                 nreclaimed++;
2148                 XQM_STATS_INC(xqmstats.xs_qm_dqshake_reclaims);
2149                 xfs_qm_dqdestroy(dqp);
2150                 dqp = nextdqp;
2151         }
2152         xfs_qm_freelist_unlock(xfs_Gqm);
2153         return nreclaimed;
2154 }
2155
2156
2157 /*
2158  * The kmem_shake interface is invoked when memory is running low.
2159  */
2160 /* ARGSUSED */
2161 STATIC int
2162 xfs_qm_shake(int nr_to_scan, gfp_t gfp_mask)
2163 {
2164         int     ndqused, nfree, n;
2165
2166         if (!kmem_shake_allow(gfp_mask))
2167                 return 0;
2168         if (!xfs_Gqm)
2169                 return 0;
2170
2171         nfree = xfs_Gqm->qm_dqfreelist.qh_nelems; /* free dquots */
2172         /* incore dquots in all f/s's */
2173         ndqused = atomic_read(&xfs_Gqm->qm_totaldquots) - nfree;
2174
2175         ASSERT(ndqused >= 0);
2176
2177         if (nfree <= ndqused && nfree < ndquot)
2178                 return 0;
2179
2180         ndqused *= xfs_Gqm->qm_dqfree_ratio;    /* target # of free dquots */
2181         n = nfree - ndqused - ndquot;           /* # over target */
2182
2183         return xfs_qm_shake_freelist(MAX(nfree, n));
2184 }
2185
2186
2187 /*
2188  * Just pop the least recently used dquot off the freelist and
2189  * recycle it. The returned dquot is locked.
2190  */
2191 STATIC xfs_dquot_t *
2192 xfs_qm_dqreclaim_one(void)
2193 {
2194         xfs_dquot_t     *dqpout;
2195         xfs_dquot_t     *dqp;
2196         int             restarts;
2197         int             nflushes;
2198
2199         restarts = 0;
2200         dqpout = NULL;
2201         nflushes = 0;
2202
2203         /* lockorder: hashchainlock, freelistlock, mplistlock, dqlock, dqflock */
2204  startagain:
2205         xfs_qm_freelist_lock(xfs_Gqm);
2206
2207         FOREACH_DQUOT_IN_FREELIST(dqp, &(xfs_Gqm->qm_dqfreelist)) {
2208                 xfs_dqlock(dqp);
2209
2210                 /*
2211                  * We are racing with dqlookup here. Naturally we don't
2212                  * want to reclaim a dquot that lookup wants. We release the
2213                  * freelist lock and start over, so that lookup will grab
2214                  * both the dquot and the freelistlock.
2215                  */
2216                 if (dqp->dq_flags & XFS_DQ_WANT) {
2217                         ASSERT(! (dqp->dq_flags & XFS_DQ_INACTIVE));
2218                         xfs_dqtrace_entry(dqp, "DQRECLAIM: DQWANT");
2219                         xfs_dqunlock(dqp);
2220                         xfs_qm_freelist_unlock(xfs_Gqm);
2221                         if (++restarts >= XFS_QM_RECLAIM_MAX_RESTARTS)
2222                                 return NULL;
2223                         XQM_STATS_INC(xqmstats.xs_qm_dqwants);
2224                         goto startagain;
2225                 }
2226
2227                 /*
2228                  * If the dquot is inactive, we are assured that it is
2229                  * not on the mplist or the hashlist, and that makes our
2230                  * life easier.
2231                  */
2232                 if (dqp->dq_flags & XFS_DQ_INACTIVE) {
2233                         ASSERT(dqp->q_mount == NULL);
2234                         ASSERT(! XFS_DQ_IS_DIRTY(dqp));
2235                         ASSERT(dqp->HL_PREVP == NULL);
2236                         ASSERT(dqp->MPL_PREVP == NULL);
2237                         XQM_FREELIST_REMOVE(dqp);
2238                         xfs_dqunlock(dqp);
2239                         dqpout = dqp;
2240                         XQM_STATS_INC(xqmstats.xs_qm_dqinact_reclaims);
2241                         break;
2242                 }
2243
2244                 ASSERT(dqp->q_hash);
2245                 ASSERT(dqp->MPL_PREVP);
2246
2247                 /*
2248                  * Try to grab the flush lock. If this dquot is in the process of
2249                  * getting flushed to disk, we don't want to reclaim it.
2250                  */
2251                 if (!xfs_dqflock_nowait(dqp)) {
2252                         xfs_dqunlock(dqp);
2253                         continue;
2254                 }
2255
2256                 /*
2257                  * We have the flush lock so we know that this is not in the
2258                  * process of being flushed. So, if this is dirty, flush it
2259                  * DELWRI so that we don't get a freelist infested with
2260                  * dirty dquots.
2261                  */
2262                 if (XFS_DQ_IS_DIRTY(dqp)) {
2263                         int     error;
2264                         xfs_dqtrace_entry(dqp, "DQRECLAIM: DQDIRTY");
2265                         /*
2266                          * We flush it delayed write, so don't bother
2267                          * releasing the freelist lock.
2268                          */
2269                         error = xfs_qm_dqflush(dqp, XFS_QMOPT_DELWRI);
2270                         if (error) {
2271                                 xfs_fs_cmn_err(CE_WARN, dqp->q_mount,
2272                         "xfs_qm_dqreclaim: dquot %p flush failed", dqp);
2273                         }
2274                         xfs_dqunlock(dqp); /* dqflush unlocks dqflock */
2275                         continue;
2276                 }
2277
2278                 if (! xfs_qm_mplist_nowait(dqp->q_mount)) {
2279                         xfs_dqfunlock(dqp);
2280                         xfs_dqunlock(dqp);
2281                         continue;
2282                 }
2283
2284                 if (! xfs_qm_dqhashlock_nowait(dqp))
2285                         goto mplistunlock;
2286
2287                 ASSERT(dqp->q_nrefs == 0);
2288                 xfs_dqtrace_entry(dqp, "DQRECLAIM: UNLINKING");
2289                 XQM_MPLIST_REMOVE(&(XFS_QI_MPL_LIST(dqp->q_mount)), dqp);
2290                 XQM_HASHLIST_REMOVE(dqp->q_hash, dqp);
2291                 XQM_FREELIST_REMOVE(dqp);
2292                 dqpout = dqp;
2293                 XFS_DQ_HASH_UNLOCK(dqp->q_hash);
2294  mplistunlock:
2295                 xfs_qm_mplist_unlock(dqp->q_mount);
2296                 xfs_dqfunlock(dqp);
2297                 xfs_dqunlock(dqp);
2298                 if (dqpout)
2299                         break;
2300         }
2301
2302         xfs_qm_freelist_unlock(xfs_Gqm);
2303         return dqpout;
2304 }
2305
2306
2307 /*------------------------------------------------------------------*/
2308
2309 /*
2310  * Return a new incore dquot. Depending on the number of
2311  * dquots in the system, we either allocate a new one on the kernel heap,
2312  * or reclaim a free one.
2313  * Return value is B_TRUE if we allocated a new dquot, B_FALSE if we managed
2314  * to reclaim an existing one from the freelist.
2315  */
2316 boolean_t
2317 xfs_qm_dqalloc_incore(
2318         xfs_dquot_t **O_dqpp)
2319 {
2320         xfs_dquot_t     *dqp;
2321
2322         /*
2323          * Check against high water mark to see if we want to pop
2324          * a nincompoop dquot off the freelist.
2325          */
2326         if (atomic_read(&xfs_Gqm->qm_totaldquots) >= ndquot) {
2327                 /*
2328                  * Try to recycle a dquot from the freelist.
2329                  */
2330                 if ((dqp = xfs_qm_dqreclaim_one())) {
2331                         XQM_STATS_INC(xqmstats.xs_qm_dqreclaims);
2332                         /*
2333                          * Just zero the core here. The rest will get
2334                          * reinitialized by caller. XXX we shouldn't even
2335                          * do this zero ...
2336                          */
2337                         memset(&dqp->q_core, 0, sizeof(dqp->q_core));
2338                         *O_dqpp = dqp;
2339                         return B_FALSE;
2340                 }
2341                 XQM_STATS_INC(xqmstats.xs_qm_dqreclaim_misses);
2342         }
2343
2344         /*
2345          * Allocate a brand new dquot on the kernel heap and return it
2346          * to the caller to initialize.
2347          */
2348         ASSERT(xfs_Gqm->qm_dqzone != NULL);
2349         *O_dqpp = kmem_zone_zalloc(xfs_Gqm->qm_dqzone, KM_SLEEP);
2350         atomic_inc(&xfs_Gqm->qm_totaldquots);
2351
2352         return B_TRUE;
2353 }
2354
2355
2356 /*
2357  * Start a transaction and write the incore superblock changes to
2358  * disk. flags parameter indicates which fields have changed.
2359  */
2360 int
2361 xfs_qm_write_sb_changes(
2362         xfs_mount_t     *mp,
2363         __int64_t       flags)
2364 {
2365         xfs_trans_t     *tp;
2366         int             error;
2367
2368 #ifdef QUOTADEBUG
2369         cmn_err(CE_NOTE, "Writing superblock quota changes :%s", mp->m_fsname);
2370 #endif
2371         tp = xfs_trans_alloc(mp, XFS_TRANS_QM_SBCHANGE);
2372         if ((error = xfs_trans_reserve(tp, 0,
2373                                       mp->m_sb.sb_sectsize + 128, 0,
2374                                       0,
2375                                       XFS_DEFAULT_LOG_COUNT))) {
2376                 xfs_trans_cancel(tp, 0);
2377                 return error;
2378         }
2379
2380         xfs_mod_sb(tp, flags);
2381         error = xfs_trans_commit(tp, 0);
2382
2383         return error;
2384 }
2385
2386
2387 /* --------------- utility functions for vnodeops ---------------- */
2388
2389
2390 /*
2391  * Given an inode, a uid and gid (from cred_t) make sure that we have
2392  * allocated relevant dquot(s) on disk, and that we won't exceed inode
2393  * quotas by creating this file.
2394  * This also attaches dquot(s) to the given inode after locking it,
2395  * and returns the dquots corresponding to the uid and/or gid.
2396  *
2397  * in   : inode (unlocked)
2398  * out  : udquot, gdquot with references taken and unlocked
2399  */
2400 int
2401 xfs_qm_vop_dqalloc(
2402         xfs_mount_t     *mp,
2403         xfs_inode_t     *ip,
2404         uid_t           uid,
2405         gid_t           gid,
2406         prid_t          prid,
2407         uint            flags,
2408         xfs_dquot_t     **O_udqpp,
2409         xfs_dquot_t     **O_gdqpp)
2410 {
2411         int             error;
2412         xfs_dquot_t     *uq, *gq;
2413         uint            lockflags;
2414
2415         if (!XFS_IS_QUOTA_ON(mp))
2416                 return 0;
2417
2418         lockflags = XFS_ILOCK_EXCL;
2419         xfs_ilock(ip, lockflags);
2420
2421         if ((flags & XFS_QMOPT_INHERIT) && XFS_INHERIT_GID(ip))
2422                 gid = ip->i_d.di_gid;
2423
2424         /*
2425          * Attach the dquot(s) to this inode, doing a dquot allocation
2426          * if necessary. The dquot(s) will not be locked.
2427          */
2428         if (XFS_NOT_DQATTACHED(mp, ip)) {
2429                 if ((error = xfs_qm_dqattach(ip, XFS_QMOPT_DQALLOC |
2430                                             XFS_QMOPT_ILOCKED))) {
2431                         xfs_iunlock(ip, lockflags);
2432                         return error;
2433                 }
2434         }
2435
2436         uq = gq = NULL;
2437         if ((flags & XFS_QMOPT_UQUOTA) && XFS_IS_UQUOTA_ON(mp)) {
2438                 if (ip->i_d.di_uid != uid) {
2439                         /*
2440                          * What we need is the dquot that has this uid, and
2441                          * if we send the inode to dqget, the uid of the inode
2442                          * takes priority over what's sent in the uid argument.
2443                          * We must unlock inode here before calling dqget if
2444                          * we're not sending the inode, because otherwise
2445                          * we'll deadlock by doing trans_reserve while
2446                          * holding ilock.
2447                          */
2448                         xfs_iunlock(ip, lockflags);
2449                         if ((error = xfs_qm_dqget(mp, NULL, (xfs_dqid_t) uid,
2450                                                  XFS_DQ_USER,
2451                                                  XFS_QMOPT_DQALLOC |
2452                                                  XFS_QMOPT_DOWARN,
2453                                                  &uq))) {
2454                                 ASSERT(error != ENOENT);
2455                                 return error;
2456                         }
2457                         /*
2458                          * Get the ilock in the right order.
2459                          */
2460                         xfs_dqunlock(uq);
2461                         lockflags = XFS_ILOCK_SHARED;
2462                         xfs_ilock(ip, lockflags);
2463                 } else {
2464                         /*
2465                          * Take an extra reference, because we'll return
2466                          * this to caller
2467                          */
2468                         ASSERT(ip->i_udquot);
2469                         uq = ip->i_udquot;
2470                         xfs_dqlock(uq);
2471                         XFS_DQHOLD(uq);
2472                         xfs_dqunlock(uq);
2473                 }
2474         }
2475         if ((flags & XFS_QMOPT_GQUOTA) && XFS_IS_GQUOTA_ON(mp)) {
2476                 if (ip->i_d.di_gid != gid) {
2477                         xfs_iunlock(ip, lockflags);
2478                         if ((error = xfs_qm_dqget(mp, NULL, (xfs_dqid_t)gid,
2479                                                  XFS_DQ_GROUP,
2480                                                  XFS_QMOPT_DQALLOC |
2481                                                  XFS_QMOPT_DOWARN,
2482                                                  &gq))) {
2483                                 if (uq)
2484                                         xfs_qm_dqrele(uq);
2485                                 ASSERT(error != ENOENT);
2486                                 return error;
2487                         }
2488                         xfs_dqunlock(gq);
2489                         lockflags = XFS_ILOCK_SHARED;
2490                         xfs_ilock(ip, lockflags);
2491                 } else {
2492                         ASSERT(ip->i_gdquot);
2493                         gq = ip->i_gdquot;
2494                         xfs_dqlock(gq);
2495                         XFS_DQHOLD(gq);
2496                         xfs_dqunlock(gq);
2497                 }
2498         } else if ((flags & XFS_QMOPT_PQUOTA) && XFS_IS_PQUOTA_ON(mp)) {
2499                 if (ip->i_d.di_projid != prid) {
2500                         xfs_iunlock(ip, lockflags);
2501                         if ((error = xfs_qm_dqget(mp, NULL, (xfs_dqid_t)prid,
2502                                                  XFS_DQ_PROJ,
2503                                                  XFS_QMOPT_DQALLOC |
2504                                                  XFS_QMOPT_DOWARN,
2505                                                  &gq))) {
2506                                 if (uq)
2507                                         xfs_qm_dqrele(uq);
2508                                 ASSERT(error != ENOENT);
2509                                 return (error);
2510                         }
2511                         xfs_dqunlock(gq);
2512                         lockflags = XFS_ILOCK_SHARED;
2513                         xfs_ilock(ip, lockflags);
2514                 } else {
2515                         ASSERT(ip->i_gdquot);
2516                         gq = ip->i_gdquot;
2517                         xfs_dqlock(gq);
2518                         XFS_DQHOLD(gq);
2519                         xfs_dqunlock(gq);
2520                 }
2521         }
2522         if (uq)
2523                 xfs_dqtrace_entry_ino(uq, "DQALLOC", ip);
2524
2525         xfs_iunlock(ip, lockflags);
2526         if (O_udqpp)
2527                 *O_udqpp = uq;
2528         else if (uq)
2529                 xfs_qm_dqrele(uq);
2530         if (O_gdqpp)
2531                 *O_gdqpp = gq;
2532         else if (gq)
2533                 xfs_qm_dqrele(gq);
2534         return 0;
2535 }
2536
2537 /*
2538  * Actually transfer ownership, and do dquot modifications.
2539  * These were already reserved.
2540  */
2541 xfs_dquot_t *
2542 xfs_qm_vop_chown(
2543         xfs_trans_t     *tp,
2544         xfs_inode_t     *ip,
2545         xfs_dquot_t     **IO_olddq,
2546         xfs_dquot_t     *newdq)
2547 {
2548         xfs_dquot_t     *prevdq;
2549         uint            bfield = XFS_IS_REALTIME_INODE(ip) ?
2550                                  XFS_TRANS_DQ_RTBCOUNT : XFS_TRANS_DQ_BCOUNT;
2551
2552         ASSERT(xfs_isilocked(ip, XFS_ILOCK_EXCL));
2553         ASSERT(XFS_IS_QUOTA_RUNNING(ip->i_mount));
2554
2555         /* old dquot */
2556         prevdq = *IO_olddq;
2557         ASSERT(prevdq);
2558         ASSERT(prevdq != newdq);
2559
2560         xfs_trans_mod_dquot(tp, prevdq, bfield, -(ip->i_d.di_nblocks));
2561         xfs_trans_mod_dquot(tp, prevdq, XFS_TRANS_DQ_ICOUNT, -1);
2562
2563         /* the sparkling new dquot */
2564         xfs_trans_mod_dquot(tp, newdq, bfield, ip->i_d.di_nblocks);
2565         xfs_trans_mod_dquot(tp, newdq, XFS_TRANS_DQ_ICOUNT, 1);
2566
2567         /*
2568          * Take an extra reference, because the inode
2569          * is going to keep this dquot pointer even
2570          * after the trans_commit.
2571          */
2572         xfs_dqlock(newdq);
2573         XFS_DQHOLD(newdq);
2574         xfs_dqunlock(newdq);
2575         *IO_olddq = newdq;
2576
2577         return prevdq;
2578 }
2579
2580 /*
2581  * Quota reservations for setattr(AT_UID|AT_GID|AT_PROJID).
2582  */
2583 int
2584 xfs_qm_vop_chown_reserve(
2585         xfs_trans_t     *tp,
2586         xfs_inode_t     *ip,
2587         xfs_dquot_t     *udqp,
2588         xfs_dquot_t     *gdqp,
2589         uint            flags)
2590 {
2591         int             error;
2592         xfs_mount_t     *mp;
2593         uint            delblks, blkflags, prjflags = 0;
2594         xfs_dquot_t     *unresudq, *unresgdq, *delblksudq, *delblksgdq;
2595
2596         ASSERT(xfs_isilocked(ip, XFS_ILOCK_EXCL|XFS_ILOCK_SHARED));
2597         mp = ip->i_mount;
2598         ASSERT(XFS_IS_QUOTA_RUNNING(mp));
2599
2600         delblks = ip->i_delayed_blks;
2601         delblksudq = delblksgdq = unresudq = unresgdq = NULL;
2602         blkflags = XFS_IS_REALTIME_INODE(ip) ?
2603                         XFS_QMOPT_RES_RTBLKS : XFS_QMOPT_RES_REGBLKS;
2604
2605         if (XFS_IS_UQUOTA_ON(mp) && udqp &&
2606             ip->i_d.di_uid != (uid_t)be32_to_cpu(udqp->q_core.d_id)) {
2607                 delblksudq = udqp;
2608                 /*
2609                  * If there are delayed allocation blocks, then we have to
2610                  * unreserve those from the old dquot, and add them to the
2611                  * new dquot.
2612                  */
2613                 if (delblks) {
2614                         ASSERT(ip->i_udquot);
2615                         unresudq = ip->i_udquot;
2616                 }
2617         }
2618         if (XFS_IS_OQUOTA_ON(ip->i_mount) && gdqp) {
2619                 if (XFS_IS_PQUOTA_ON(ip->i_mount) &&
2620                      ip->i_d.di_projid != be32_to_cpu(gdqp->q_core.d_id))
2621                         prjflags = XFS_QMOPT_ENOSPC;
2622
2623                 if (prjflags ||
2624                     (XFS_IS_GQUOTA_ON(ip->i_mount) &&
2625                      ip->i_d.di_gid != be32_to_cpu(gdqp->q_core.d_id))) {
2626                         delblksgdq = gdqp;
2627                         if (delblks) {
2628                                 ASSERT(ip->i_gdquot);
2629                                 unresgdq = ip->i_gdquot;
2630                         }
2631                 }
2632         }
2633
2634         if ((error = xfs_trans_reserve_quota_bydquots(tp, ip->i_mount,
2635                                 delblksudq, delblksgdq, ip->i_d.di_nblocks, 1,
2636                                 flags | blkflags | prjflags)))
2637                 return (error);
2638
2639         /*
2640          * Do the delayed blks reservations/unreservations now. Since, these
2641          * are done without the help of a transaction, if a reservation fails
2642          * its previous reservations won't be automatically undone by trans
2643          * code. So, we have to do it manually here.
2644          */
2645         if (delblks) {
2646                 /*
2647                  * Do the reservations first. Unreservation can't fail.
2648                  */
2649                 ASSERT(delblksudq || delblksgdq);
2650                 ASSERT(unresudq || unresgdq);
2651                 if ((error = xfs_trans_reserve_quota_bydquots(NULL, ip->i_mount,
2652                                 delblksudq, delblksgdq, (xfs_qcnt_t)delblks, 0,
2653                                 flags | blkflags | prjflags)))
2654                         return (error);
2655                 xfs_trans_reserve_quota_bydquots(NULL, ip->i_mount,
2656                                 unresudq, unresgdq, -((xfs_qcnt_t)delblks), 0,
2657                                 blkflags);
2658         }
2659
2660         return (0);
2661 }
2662
2663 int
2664 xfs_qm_vop_rename_dqattach(
2665         xfs_inode_t     **i_tab)
2666 {
2667         xfs_inode_t     *ip;
2668         int             i;
2669         int             error;
2670
2671         ip = i_tab[0];
2672
2673         if (! XFS_IS_QUOTA_ON(ip->i_mount))
2674                 return 0;
2675
2676         if (XFS_NOT_DQATTACHED(ip->i_mount, ip)) {
2677                 error = xfs_qm_dqattach(ip, 0);
2678                 if (error)
2679                         return error;
2680         }
2681         for (i = 1; (i < 4 && i_tab[i]); i++) {
2682                 /*
2683                  * Watch out for duplicate entries in the table.
2684                  */
2685                 if ((ip = i_tab[i]) != i_tab[i-1]) {
2686                         if (XFS_NOT_DQATTACHED(ip->i_mount, ip)) {
2687                                 error = xfs_qm_dqattach(ip, 0);
2688                                 if (error)
2689                                         return error;
2690                         }
2691                 }
2692         }
2693         return 0;
2694 }
2695
2696 void
2697 xfs_qm_vop_dqattach_and_dqmod_newinode(
2698         xfs_trans_t     *tp,
2699         xfs_inode_t     *ip,
2700         xfs_dquot_t     *udqp,
2701         xfs_dquot_t     *gdqp)
2702 {
2703         if (!XFS_IS_QUOTA_ON(tp->t_mountp))
2704                 return;
2705
2706         ASSERT(xfs_isilocked(ip, XFS_ILOCK_EXCL));
2707         ASSERT(XFS_IS_QUOTA_RUNNING(tp->t_mountp));
2708
2709         if (udqp) {
2710                 xfs_dqlock(udqp);
2711                 XFS_DQHOLD(udqp);
2712                 xfs_dqunlock(udqp);
2713                 ASSERT(ip->i_udquot == NULL);
2714                 ip->i_udquot = udqp;
2715                 ASSERT(XFS_IS_UQUOTA_ON(tp->t_mountp));
2716                 ASSERT(ip->i_d.di_uid == be32_to_cpu(udqp->q_core.d_id));
2717                 xfs_trans_mod_dquot(tp, udqp, XFS_TRANS_DQ_ICOUNT, 1);
2718         }
2719         if (gdqp) {
2720                 xfs_dqlock(gdqp);
2721                 XFS_DQHOLD(gdqp);
2722                 xfs_dqunlock(gdqp);
2723                 ASSERT(ip->i_gdquot == NULL);
2724                 ip->i_gdquot = gdqp;
2725                 ASSERT(XFS_IS_OQUOTA_ON(tp->t_mountp));
2726                 ASSERT((XFS_IS_GQUOTA_ON(tp->t_mountp) ?
2727                         ip->i_d.di_gid : ip->i_d.di_projid) ==
2728                                 be32_to_cpu(gdqp->q_core.d_id));
2729                 xfs_trans_mod_dquot(tp, gdqp, XFS_TRANS_DQ_ICOUNT, 1);
2730         }
2731 }
2732
2733 /* ------------- list stuff -----------------*/
2734 STATIC void
2735 xfs_qm_freelist_init(xfs_frlist_t *ql)
2736 {
2737         ql->qh_next = ql->qh_prev = (xfs_dquot_t *) ql;
2738         mutex_init(&ql->qh_lock);
2739         ql->qh_version = 0;
2740         ql->qh_nelems = 0;
2741 }
2742
2743 STATIC void
2744 xfs_qm_freelist_destroy(xfs_frlist_t *ql)
2745 {
2746         xfs_dquot_t     *dqp, *nextdqp;
2747
2748         mutex_lock(&ql->qh_lock);
2749         for (dqp = ql->qh_next;
2750              dqp != (xfs_dquot_t *)ql; ) {
2751                 xfs_dqlock(dqp);
2752                 nextdqp = dqp->dq_flnext;
2753 #ifdef QUOTADEBUG
2754                 cmn_err(CE_DEBUG, "FREELIST destroy 0x%p", dqp);
2755 #endif
2756                 XQM_FREELIST_REMOVE(dqp);
2757                 xfs_dqunlock(dqp);
2758                 xfs_qm_dqdestroy(dqp);
2759                 dqp = nextdqp;
2760         }
2761         mutex_unlock(&ql->qh_lock);
2762         mutex_destroy(&ql->qh_lock);
2763
2764         ASSERT(ql->qh_nelems == 0);
2765 }
2766
2767 STATIC void
2768 xfs_qm_freelist_insert(xfs_frlist_t *ql, xfs_dquot_t *dq)
2769 {
2770         dq->dq_flnext = ql->qh_next;
2771         dq->dq_flprev = (xfs_dquot_t *)ql;
2772         ql->qh_next = dq;
2773         dq->dq_flnext->dq_flprev = dq;
2774         xfs_Gqm->qm_dqfreelist.qh_nelems++;
2775         xfs_Gqm->qm_dqfreelist.qh_version++;
2776 }
2777
2778 void
2779 xfs_qm_freelist_unlink(xfs_dquot_t *dq)
2780 {
2781         xfs_dquot_t *next = dq->dq_flnext;
2782         xfs_dquot_t *prev = dq->dq_flprev;
2783
2784         next->dq_flprev = prev;
2785         prev->dq_flnext = next;
2786         dq->dq_flnext = dq->dq_flprev = dq;
2787         xfs_Gqm->qm_dqfreelist.qh_nelems--;
2788         xfs_Gqm->qm_dqfreelist.qh_version++;
2789 }
2790
2791 void
2792 xfs_qm_freelist_append(xfs_frlist_t *ql, xfs_dquot_t *dq)
2793 {
2794         xfs_qm_freelist_insert((xfs_frlist_t *)ql->qh_prev, dq);
2795 }
2796
2797 STATIC int
2798 xfs_qm_dqhashlock_nowait(
2799         xfs_dquot_t *dqp)
2800 {
2801         int locked;
2802
2803         locked = mutex_trylock(&((dqp)->q_hash->qh_lock));
2804         return locked;
2805 }
2806
2807 int
2808 xfs_qm_freelist_lock_nowait(
2809         xfs_qm_t *xqm)
2810 {
2811         int locked;
2812
2813         locked = mutex_trylock(&(xqm->qm_dqfreelist.qh_lock));
2814         return locked;
2815 }
2816
2817 STATIC int
2818 xfs_qm_mplist_nowait(
2819         xfs_mount_t     *mp)
2820 {
2821         int locked;
2822
2823         ASSERT(mp->m_quotainfo);
2824         locked = mutex_trylock(&(XFS_QI_MPLLOCK(mp)));
2825         return locked;
2826 }