2 * Copyright (c) 2000-2005 Silicon Graphics, Inc.
5 * This program is free software; you can redistribute it and/or
6 * modify it under the terms of the GNU General Public License as
7 * published by the Free Software Foundation.
9 * This program is distributed in the hope that it would be useful,
10 * but WITHOUT ANY WARRANTY; without even the implied warranty of
11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
12 * GNU General Public License for more details.
14 * You should have received a copy of the GNU General Public License
15 * along with this program; if not, write the Free Software Foundation,
16 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
20 #include "xfs_types.h"
24 #include "xfs_trans.h"
28 #include "xfs_dmapi.h"
29 #include "xfs_mount.h"
30 #include "xfs_bmap_btree.h"
31 #include "xfs_alloc_btree.h"
32 #include "xfs_ialloc_btree.h"
33 #include "xfs_btree.h"
34 #include "xfs_dir2_sf.h"
35 #include "xfs_attr_sf.h"
36 #include "xfs_inode.h"
37 #include "xfs_dinode.h"
38 #include "xfs_error.h"
39 #include "xfs_mru_cache.h"
40 #include "xfs_filestream.h"
41 #include "xfs_vnodeops.h"
42 #include "xfs_utils.h"
43 #include "xfs_buf_item.h"
44 #include "xfs_inode_item.h"
47 #include <linux/kthread.h>
48 #include <linux/freezer.h>
51 * Sync all the inodes in the given AG according to the
52 * direction given by the flags.
60 xfs_perag_t *pag = &mp->m_perag[ag];
62 uint32_t first_index = 0;
65 int fflag = XFS_B_ASYNC;
66 int lock_flags = XFS_ILOCK_SHARED;
68 if (flags & SYNC_DELWRI)
70 if (flags & SYNC_WAIT)
71 fflag = 0; /* synchronous overrides all */
73 if (flags & SYNC_DELWRI) {
75 * We need the I/O lock if we're going to call any of
76 * the flush/inval routines.
78 lock_flags |= XFS_IOLOCK_SHARED;
83 boolean_t inode_refed;
84 xfs_inode_t *ip = NULL;
87 * use a gang lookup to find the next inode in the tree
88 * as the tree is sparse and a gang lookup walks to find
89 * the number of objects requested.
91 read_lock(&pag->pag_ici_lock);
92 nr_found = radix_tree_gang_lookup(&pag->pag_ici_root,
93 (void**)&ip, first_index, 1);
96 read_unlock(&pag->pag_ici_lock);
101 * Update the index for the next lookup. Catch overflows
102 * into the next AG range which can occur if we have inodes
103 * in the last block of the AG and we are currently
104 * pointing to the last inode.
106 first_index = XFS_INO_TO_AGINO(mp, ip->i_ino + 1);
107 if (first_index < XFS_INO_TO_AGINO(mp, ip->i_ino)) {
108 read_unlock(&pag->pag_ici_lock);
113 * skip inodes in reclaim. Let xfs_syncsub do that for
114 * us so we don't need to worry.
116 if (xfs_iflags_test(ip, (XFS_IRECLAIM|XFS_IRECLAIMABLE))) {
117 read_unlock(&pag->pag_ici_lock);
121 /* bad inodes are dealt with elsewhere */
123 if (is_bad_inode(inode)) {
124 read_unlock(&pag->pag_ici_lock);
128 /* nothing to sync during shutdown */
129 if (XFS_FORCED_SHUTDOWN(mp)) {
130 read_unlock(&pag->pag_ici_lock);
135 * If we can't get a reference on the VFS_I, the inode must be
136 * in reclaim. If we can get the inode lock without blocking,
137 * it is safe to flush the inode because we hold the tree lock
138 * and xfs_iextract will block right now. Hence if we lock the
139 * inode while holding the tree lock, xfs_ireclaim() is
140 * guaranteed to block on the inode lock we now hold and hence
141 * it is safe to reference the inode until we drop the inode
144 inode_refed = B_FALSE;
146 read_unlock(&pag->pag_ici_lock);
147 xfs_ilock(ip, lock_flags);
148 inode_refed = B_TRUE;
150 if (!xfs_ilock_nowait(ip, lock_flags)) {
151 /* leave it to reclaim */
152 read_unlock(&pag->pag_ici_lock);
155 read_unlock(&pag->pag_ici_lock);
159 * If we have to flush data or wait for I/O completion
160 * we need to drop the ilock that we currently hold.
161 * If we need to drop the lock, insert a marker if we
162 * have not already done so.
164 if ((flags & SYNC_DELWRI) && VN_DIRTY(inode)) {
165 xfs_iunlock(ip, XFS_ILOCK_SHARED);
166 error = xfs_flush_pages(ip, 0, -1, fflag, FI_NONE);
167 if (flags & SYNC_IOWAIT)
169 xfs_ilock(ip, XFS_ILOCK_SHARED);
172 if ((flags & SYNC_ATTR) && !xfs_inode_clean(ip)) {
173 if (flags & SYNC_WAIT) {
175 if (!xfs_inode_clean(ip))
176 error = xfs_iflush(ip, XFS_IFLUSH_SYNC);
179 } else if (xfs_iflock_nowait(ip)) {
180 if (!xfs_inode_clean(ip))
181 error = xfs_iflush(ip, XFS_IFLUSH_DELWRI);
188 xfs_iunlock(ip, lock_flags);
197 * bail out if the filesystem is corrupted.
199 if (error == EFSCORRUPTED)
200 return XFS_ERROR(error);
215 int lflags = XFS_LOG_FORCE;
217 if (mp->m_flags & XFS_MOUNT_RDONLY)
222 if (flags & SYNC_WAIT)
223 lflags |= XFS_LOG_SYNC;
225 for (i = 0; i < mp->m_sb.sb_agcount; i++) {
226 if (!mp->m_perag[i].pag_ici_init)
228 error = xfs_sync_inodes_ag(mp, i, flags);
231 if (error == EFSCORRUPTED)
234 if (flags & SYNC_DELWRI)
235 xfs_log_force(mp, 0, lflags);
237 return XFS_ERROR(last_error);
241 xfs_commit_dummy_trans(
242 struct xfs_mount *mp,
245 struct xfs_inode *ip = mp->m_rootip;
246 struct xfs_trans *tp;
250 * Put a dummy transaction in the log to tell recovery
251 * that all others are OK.
253 tp = xfs_trans_alloc(mp, XFS_TRANS_DUMMY1);
254 error = xfs_trans_reserve(tp, 0, XFS_ICHANGE_LOG_RES(mp), 0, 0, 0);
256 xfs_trans_cancel(tp, 0);
260 xfs_ilock(ip, XFS_ILOCK_EXCL);
262 xfs_trans_ijoin(tp, ip, XFS_ILOCK_EXCL);
263 xfs_trans_ihold(tp, ip);
264 xfs_trans_log_inode(tp, ip, XFS_ILOG_CORE);
265 /* XXX(hch): ignoring the error here.. */
266 error = xfs_trans_commit(tp, 0);
268 xfs_iunlock(ip, XFS_ILOCK_EXCL);
270 xfs_log_force(mp, 0, log_flags);
276 struct xfs_mount *mp,
280 struct xfs_buf_log_item *bip;
284 * If this is xfssyncd() then only sync the superblock if we can
285 * lock it without sleeping and it is not pinned.
287 if (flags & SYNC_BDFLUSH) {
288 ASSERT(!(flags & SYNC_WAIT));
290 bp = xfs_getsb(mp, XFS_BUF_TRYLOCK);
294 bip = XFS_BUF_FSPRIVATE(bp, struct xfs_buf_log_item *);
295 if (!bip || !xfs_buf_item_dirty(bip) || XFS_BUF_ISPINNED(bp))
298 bp = xfs_getsb(mp, 0);
301 * If the buffer is pinned then push on the log so we won't
302 * get stuck waiting in the write for someone, maybe
303 * ourselves, to flush the log.
305 * Even though we just pushed the log above, we did not have
306 * the superblock buffer locked at that point so it can
307 * become pinned in between there and here.
309 if (XFS_BUF_ISPINNED(bp))
310 xfs_log_force(mp, 0, XFS_LOG_FORCE);
314 if (flags & SYNC_WAIT)
319 return xfs_bwrite(mp, bp);
328 * When remounting a filesystem read-only or freezing the filesystem, we have
329 * two phases to execute. This first phase is syncing the data before we
330 * quiesce the filesystem, and the second is flushing all the inodes out after
331 * we've waited for all the transactions created by the first phase to
332 * complete. The second phase ensures that the inodes are written to their
333 * location on disk rather than just existing in transactions in the log. This
334 * means after a quiesce there is no log replay required to write the inodes to
335 * disk (this is the main difference between a sync and a quiesce).
338 * First stage of freeze - no writers will make progress now we are here,
339 * so we flush delwri and delalloc buffers here, then wait for all I/O to
340 * complete. Data is frozen at that point. Metadata is not frozen,
341 * transactions can still occur here so don't bother flushing the buftarg
342 * because it'll just get dirty again.
346 struct xfs_mount *mp)
350 /* push non-blocking */
351 xfs_sync_inodes(mp, SYNC_DELWRI|SYNC_BDFLUSH);
352 XFS_QM_DQSYNC(mp, SYNC_BDFLUSH);
353 xfs_filestream_flush(mp);
356 xfs_sync_inodes(mp, SYNC_DELWRI|SYNC_WAIT|SYNC_IOWAIT);
357 XFS_QM_DQSYNC(mp, SYNC_WAIT);
359 /* write superblock and hoover up shutdown errors */
360 error = xfs_sync_fsdata(mp, 0);
362 /* flush data-only devices */
363 if (mp->m_rtdev_targp)
364 XFS_bflush(mp->m_rtdev_targp);
371 struct xfs_mount *mp)
373 int count = 0, pincount;
375 xfs_flush_buftarg(mp->m_ddev_targp, 0);
376 xfs_reclaim_inodes(mp, 0, XFS_IFLUSH_DELWRI_ELSE_ASYNC);
379 * This loop must run at least twice. The first instance of the loop
380 * will flush most meta data but that will generate more meta data
381 * (typically directory updates). Which then must be flushed and
382 * logged before we can write the unmount record.
385 xfs_sync_inodes(mp, SYNC_ATTR|SYNC_WAIT);
386 pincount = xfs_flush_buftarg(mp->m_ddev_targp, 1);
395 * Second stage of a quiesce. The data is already synced, now we have to take
396 * care of the metadata. New transactions are already blocked, so we need to
397 * wait for any remaining transactions to drain out before proceding.
401 struct xfs_mount *mp)
405 /* wait for all modifications to complete */
406 while (atomic_read(&mp->m_active_trans) > 0)
409 /* flush inodes and push all remaining buffers out to disk */
412 ASSERT_ALWAYS(atomic_read(&mp->m_active_trans) == 0);
414 /* Push the superblock and write an unmount record */
415 error = xfs_log_sbcount(mp, 1);
417 xfs_fs_cmn_err(CE_WARN, mp,
418 "xfs_attr_quiesce: failed to log sb changes. "
419 "Frozen image may not be consistent.");
420 xfs_log_unmount_write(mp);
421 xfs_unmountfs_writesb(mp);
425 * Enqueue a work item to be picked up by the vfs xfssyncd thread.
426 * Doing this has two advantages:
427 * - It saves on stack space, which is tight in certain situations
428 * - It can be used (with care) as a mechanism to avoid deadlocks.
429 * Flushing while allocating in a full filesystem requires both.
432 xfs_syncd_queue_work(
433 struct xfs_mount *mp,
435 void (*syncer)(struct xfs_mount *, void *))
437 struct bhv_vfs_sync_work *work;
439 work = kmem_alloc(sizeof(struct bhv_vfs_sync_work), KM_SLEEP);
440 INIT_LIST_HEAD(&work->w_list);
441 work->w_syncer = syncer;
444 spin_lock(&mp->m_sync_lock);
445 list_add_tail(&work->w_list, &mp->m_sync_list);
446 spin_unlock(&mp->m_sync_lock);
447 wake_up_process(mp->m_sync_task);
451 * Flush delayed allocate data, attempting to free up reserved space
452 * from existing allocations. At this point a new allocation attempt
453 * has failed with ENOSPC and we are in the process of scratching our
454 * heads, looking about for more room...
457 xfs_flush_inode_work(
458 struct xfs_mount *mp,
461 struct inode *inode = arg;
462 filemap_flush(inode->i_mapping);
470 struct inode *inode = VFS_I(ip);
473 xfs_syncd_queue_work(ip->i_mount, inode, xfs_flush_inode_work);
474 delay(msecs_to_jiffies(500));
478 * This is the "bigger hammer" version of xfs_flush_inode_work...
479 * (IOW, "If at first you don't succeed, use a Bigger Hammer").
482 xfs_flush_device_work(
483 struct xfs_mount *mp,
486 struct inode *inode = arg;
487 sync_blockdev(mp->m_super->s_bdev);
495 struct inode *inode = VFS_I(ip);
498 xfs_syncd_queue_work(ip->i_mount, inode, xfs_flush_device_work);
499 delay(msecs_to_jiffies(500));
500 xfs_log_force(ip->i_mount, (xfs_lsn_t)0, XFS_LOG_FORCE|XFS_LOG_SYNC);
504 * Every sync period we need to unpin all items, reclaim inodes, sync
505 * quota and write out the superblock. We might need to cover the log
506 * to indicate it is idle.
510 struct xfs_mount *mp,
515 if (!(mp->m_flags & XFS_MOUNT_RDONLY)) {
516 xfs_log_force(mp, (xfs_lsn_t)0, XFS_LOG_FORCE);
517 xfs_reclaim_inodes(mp, 0, XFS_IFLUSH_DELWRI_ELSE_ASYNC);
518 /* dgc: errors ignored here */
519 error = XFS_QM_DQSYNC(mp, SYNC_BDFLUSH);
520 error = xfs_sync_fsdata(mp, SYNC_BDFLUSH);
521 if (xfs_log_need_covered(mp))
522 error = xfs_commit_dummy_trans(mp, XFS_LOG_FORCE);
525 wake_up(&mp->m_wait_single_sync_task);
532 struct xfs_mount *mp = arg;
534 bhv_vfs_sync_work_t *work, *n;
538 timeleft = xfs_syncd_centisecs * msecs_to_jiffies(10);
540 timeleft = schedule_timeout_interruptible(timeleft);
543 if (kthread_should_stop() && list_empty(&mp->m_sync_list))
546 spin_lock(&mp->m_sync_lock);
548 * We can get woken by laptop mode, to do a sync -
549 * that's the (only!) case where the list would be
550 * empty with time remaining.
552 if (!timeleft || list_empty(&mp->m_sync_list)) {
554 timeleft = xfs_syncd_centisecs *
555 msecs_to_jiffies(10);
556 INIT_LIST_HEAD(&mp->m_sync_work.w_list);
557 list_add_tail(&mp->m_sync_work.w_list,
560 list_for_each_entry_safe(work, n, &mp->m_sync_list, w_list)
561 list_move(&work->w_list, &tmp);
562 spin_unlock(&mp->m_sync_lock);
564 list_for_each_entry_safe(work, n, &tmp, w_list) {
565 (*work->w_syncer)(mp, work->w_data);
566 list_del(&work->w_list);
567 if (work == &mp->m_sync_work)
578 struct xfs_mount *mp)
580 mp->m_sync_work.w_syncer = xfs_sync_worker;
581 mp->m_sync_work.w_mount = mp;
582 mp->m_sync_task = kthread_run(xfssyncd, mp, "xfssyncd");
583 if (IS_ERR(mp->m_sync_task))
584 return -PTR_ERR(mp->m_sync_task);
590 struct xfs_mount *mp)
592 kthread_stop(mp->m_sync_task);
601 xfs_perag_t *pag = xfs_get_perag(ip->i_mount, ip->i_ino);
603 /* The hash lock here protects a thread in xfs_iget_core from
604 * racing with us on linking the inode back with a vnode.
605 * Once we have the XFS_IRECLAIM flag set it will not touch
608 write_lock(&pag->pag_ici_lock);
609 spin_lock(&ip->i_flags_lock);
610 if (__xfs_iflags_test(ip, XFS_IRECLAIM) ||
611 !__xfs_iflags_test(ip, XFS_IRECLAIMABLE)) {
612 spin_unlock(&ip->i_flags_lock);
613 write_unlock(&pag->pag_ici_lock);
616 xfs_iunlock(ip, XFS_ILOCK_EXCL);
620 __xfs_iflags_set(ip, XFS_IRECLAIM);
621 spin_unlock(&ip->i_flags_lock);
622 write_unlock(&pag->pag_ici_lock);
623 xfs_put_perag(ip->i_mount, pag);
626 * If the inode is still dirty, then flush it out. If the inode
627 * is not in the AIL, then it will be OK to flush it delwri as
628 * long as xfs_iflush() does not keep any references to the inode.
629 * We leave that decision up to xfs_iflush() since it has the
630 * knowledge of whether it's OK to simply do a delwri flush of
631 * the inode or whether we need to wait until the inode is
632 * pulled from the AIL.
633 * We get the flush lock regardless, though, just to make sure
634 * we don't free it while it is being flushed.
637 xfs_ilock(ip, XFS_ILOCK_EXCL);
642 * In the case of a forced shutdown we rely on xfs_iflush() to
643 * wait for the inode to be unpinned before returning an error.
645 if (!is_bad_inode(VFS_I(ip)) && xfs_iflush(ip, sync_mode) == 0) {
646 /* synchronize with xfs_iflush_done */
651 xfs_iunlock(ip, XFS_ILOCK_EXCL);
657 * We set the inode flag atomically with the radix tree tag.
658 * Once we get tag lookups on the radix tree, this inode flag
662 xfs_inode_set_reclaim_tag(
665 xfs_mount_t *mp = ip->i_mount;
666 xfs_perag_t *pag = xfs_get_perag(mp, ip->i_ino);
668 read_lock(&pag->pag_ici_lock);
669 spin_lock(&ip->i_flags_lock);
670 radix_tree_tag_set(&pag->pag_ici_root,
671 XFS_INO_TO_AGINO(mp, ip->i_ino), XFS_ICI_RECLAIM_TAG);
672 __xfs_iflags_set(ip, XFS_IRECLAIMABLE);
673 spin_unlock(&ip->i_flags_lock);
674 read_unlock(&pag->pag_ici_lock);
675 xfs_put_perag(mp, pag);
679 __xfs_inode_clear_reclaim_tag(
684 radix_tree_tag_clear(&pag->pag_ici_root,
685 XFS_INO_TO_AGINO(mp, ip->i_ino), XFS_ICI_RECLAIM_TAG);
689 xfs_inode_clear_reclaim_tag(
692 xfs_mount_t *mp = ip->i_mount;
693 xfs_perag_t *pag = xfs_get_perag(mp, ip->i_ino);
695 read_lock(&pag->pag_ici_lock);
696 spin_lock(&ip->i_flags_lock);
697 __xfs_inode_clear_reclaim_tag(mp, pag, ip);
698 spin_unlock(&ip->i_flags_lock);
699 read_unlock(&pag->pag_ici_lock);
700 xfs_put_perag(mp, pag);
705 xfs_reclaim_inodes_ag(
711 xfs_inode_t *ip = NULL;
712 xfs_perag_t *pag = &mp->m_perag[ag];
714 uint32_t first_index;
722 * use a gang lookup to find the next inode in the tree
723 * as the tree is sparse and a gang lookup walks to find
724 * the number of objects requested.
726 read_lock(&pag->pag_ici_lock);
727 nr_found = radix_tree_gang_lookup_tag(&pag->pag_ici_root,
728 (void**)&ip, first_index, 1,
729 XFS_ICI_RECLAIM_TAG);
732 read_unlock(&pag->pag_ici_lock);
737 * Update the index for the next lookup. Catch overflows
738 * into the next AG range which can occur if we have inodes
739 * in the last block of the AG and we are currently
740 * pointing to the last inode.
742 first_index = XFS_INO_TO_AGINO(mp, ip->i_ino + 1);
743 if (first_index < XFS_INO_TO_AGINO(mp, ip->i_ino)) {
744 read_unlock(&pag->pag_ici_lock);
748 ASSERT(xfs_iflags_test(ip, (XFS_IRECLAIMABLE|XFS_IRECLAIM)));
750 /* ignore if already under reclaim */
751 if (xfs_iflags_test(ip, XFS_IRECLAIM)) {
752 read_unlock(&pag->pag_ici_lock);
757 if (!xfs_ilock_nowait(ip, XFS_ILOCK_EXCL)) {
758 read_unlock(&pag->pag_ici_lock);
761 if (xfs_ipincount(ip) ||
762 !xfs_iflock_nowait(ip)) {
763 xfs_iunlock(ip, XFS_ILOCK_EXCL);
764 read_unlock(&pag->pag_ici_lock);
768 read_unlock(&pag->pag_ici_lock);
771 * hmmm - this is an inode already in reclaim. Do
772 * we even bother catching it here?
774 if (xfs_reclaim_inode(ip, noblock, mode))
794 for (i = 0; i < mp->m_sb.sb_agcount; i++) {
795 if (!mp->m_perag[i].pag_ici_init)
797 xfs_reclaim_inodes_ag(mp, i, noblock, mode);