2 * Copyright (c) 2000-2005 Silicon Graphics, Inc.
5 * This program is free software; you can redistribute it and/or
6 * modify it under the terms of the GNU General Public License as
7 * published by the Free Software Foundation.
9 * This program is distributed in the hope that it would be useful,
10 * but WITHOUT ANY WARRANTY; without even the implied warranty of
11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
12 * GNU General Public License for more details.
14 * You should have received a copy of the GNU General Public License
15 * along with this program; if not, write the Free Software Foundation,
16 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
20 #include "xfs_types.h"
24 #include "xfs_trans.h"
28 #include "xfs_dmapi.h"
29 #include "xfs_mount.h"
30 #include "xfs_bmap_btree.h"
31 #include "xfs_alloc_btree.h"
32 #include "xfs_ialloc_btree.h"
33 #include "xfs_btree.h"
34 #include "xfs_dir2_sf.h"
35 #include "xfs_attr_sf.h"
36 #include "xfs_inode.h"
37 #include "xfs_dinode.h"
38 #include "xfs_error.h"
39 #include "xfs_mru_cache.h"
40 #include "xfs_filestream.h"
41 #include "xfs_vnodeops.h"
42 #include "xfs_utils.h"
43 #include "xfs_buf_item.h"
44 #include "xfs_inode_item.h"
47 #include <linux/kthread.h>
48 #include <linux/freezer.h>
51 * Sync all the inodes in the given AG according to the
52 * direction given by the flags.
60 xfs_perag_t *pag = &mp->m_perag[ag];
62 uint32_t first_index = 0;
65 int fflag = XFS_B_ASYNC;
67 if (flags & SYNC_DELWRI)
69 if (flags & SYNC_WAIT)
70 fflag = 0; /* synchronous overrides all */
74 xfs_inode_t *ip = NULL;
75 int lock_flags = XFS_ILOCK_SHARED;
78 * use a gang lookup to find the next inode in the tree
79 * as the tree is sparse and a gang lookup walks to find
80 * the number of objects requested.
82 read_lock(&pag->pag_ici_lock);
83 nr_found = radix_tree_gang_lookup(&pag->pag_ici_root,
84 (void**)&ip, first_index, 1);
87 read_unlock(&pag->pag_ici_lock);
92 * Update the index for the next lookup. Catch overflows
93 * into the next AG range which can occur if we have inodes
94 * in the last block of the AG and we are currently
95 * pointing to the last inode.
97 first_index = XFS_INO_TO_AGINO(mp, ip->i_ino + 1);
98 if (first_index < XFS_INO_TO_AGINO(mp, ip->i_ino)) {
99 read_unlock(&pag->pag_ici_lock);
103 /* nothing to sync during shutdown */
104 if (XFS_FORCED_SHUTDOWN(mp)) {
105 read_unlock(&pag->pag_ici_lock);
110 * If we can't get a reference on the inode, it must be
111 * in reclaim. Leave it for the reclaim code to flush.
115 read_unlock(&pag->pag_ici_lock);
118 read_unlock(&pag->pag_ici_lock);
120 /* bad inodes are dealt with elsewhere */
121 if (is_bad_inode(inode)) {
127 * If we have to flush data or wait for I/O completion
128 * we need to hold the iolock.
130 if ((flags & SYNC_DELWRI) && VN_DIRTY(inode)) {
131 xfs_ilock(ip, XFS_IOLOCK_SHARED);
132 lock_flags |= XFS_IOLOCK_SHARED;
133 error = xfs_flush_pages(ip, 0, -1, fflag, FI_NONE);
134 if (flags & SYNC_IOWAIT)
137 xfs_ilock(ip, XFS_ILOCK_SHARED);
139 if ((flags & SYNC_ATTR) && !xfs_inode_clean(ip)) {
140 if (flags & SYNC_WAIT) {
142 if (!xfs_inode_clean(ip))
143 error = xfs_iflush(ip, XFS_IFLUSH_SYNC);
146 } else if (xfs_iflock_nowait(ip)) {
147 if (!xfs_inode_clean(ip))
148 error = xfs_iflush(ip, XFS_IFLUSH_DELWRI);
153 xfs_iput(ip, lock_flags);
158 * bail out if the filesystem is corrupted.
160 if (error == EFSCORRUPTED)
161 return XFS_ERROR(error);
176 int lflags = XFS_LOG_FORCE;
178 if (mp->m_flags & XFS_MOUNT_RDONLY)
183 if (flags & SYNC_WAIT)
184 lflags |= XFS_LOG_SYNC;
186 for (i = 0; i < mp->m_sb.sb_agcount; i++) {
187 if (!mp->m_perag[i].pag_ici_init)
189 error = xfs_sync_inodes_ag(mp, i, flags);
192 if (error == EFSCORRUPTED)
195 if (flags & SYNC_DELWRI)
196 xfs_log_force(mp, 0, lflags);
198 return XFS_ERROR(last_error);
202 xfs_commit_dummy_trans(
203 struct xfs_mount *mp,
206 struct xfs_inode *ip = mp->m_rootip;
207 struct xfs_trans *tp;
211 * Put a dummy transaction in the log to tell recovery
212 * that all others are OK.
214 tp = xfs_trans_alloc(mp, XFS_TRANS_DUMMY1);
215 error = xfs_trans_reserve(tp, 0, XFS_ICHANGE_LOG_RES(mp), 0, 0, 0);
217 xfs_trans_cancel(tp, 0);
221 xfs_ilock(ip, XFS_ILOCK_EXCL);
223 xfs_trans_ijoin(tp, ip, XFS_ILOCK_EXCL);
224 xfs_trans_ihold(tp, ip);
225 xfs_trans_log_inode(tp, ip, XFS_ILOG_CORE);
226 /* XXX(hch): ignoring the error here.. */
227 error = xfs_trans_commit(tp, 0);
229 xfs_iunlock(ip, XFS_ILOCK_EXCL);
231 xfs_log_force(mp, 0, log_flags);
237 struct xfs_mount *mp,
241 struct xfs_buf_log_item *bip;
245 * If this is xfssyncd() then only sync the superblock if we can
246 * lock it without sleeping and it is not pinned.
248 if (flags & SYNC_BDFLUSH) {
249 ASSERT(!(flags & SYNC_WAIT));
251 bp = xfs_getsb(mp, XFS_BUF_TRYLOCK);
255 bip = XFS_BUF_FSPRIVATE(bp, struct xfs_buf_log_item *);
256 if (!bip || !xfs_buf_item_dirty(bip) || XFS_BUF_ISPINNED(bp))
259 bp = xfs_getsb(mp, 0);
262 * If the buffer is pinned then push on the log so we won't
263 * get stuck waiting in the write for someone, maybe
264 * ourselves, to flush the log.
266 * Even though we just pushed the log above, we did not have
267 * the superblock buffer locked at that point so it can
268 * become pinned in between there and here.
270 if (XFS_BUF_ISPINNED(bp))
271 xfs_log_force(mp, 0, XFS_LOG_FORCE);
275 if (flags & SYNC_WAIT)
280 return xfs_bwrite(mp, bp);
289 * When remounting a filesystem read-only or freezing the filesystem, we have
290 * two phases to execute. This first phase is syncing the data before we
291 * quiesce the filesystem, and the second is flushing all the inodes out after
292 * we've waited for all the transactions created by the first phase to
293 * complete. The second phase ensures that the inodes are written to their
294 * location on disk rather than just existing in transactions in the log. This
295 * means after a quiesce there is no log replay required to write the inodes to
296 * disk (this is the main difference between a sync and a quiesce).
299 * First stage of freeze - no writers will make progress now we are here,
300 * so we flush delwri and delalloc buffers here, then wait for all I/O to
301 * complete. Data is frozen at that point. Metadata is not frozen,
302 * transactions can still occur here so don't bother flushing the buftarg
303 * because it'll just get dirty again.
307 struct xfs_mount *mp)
311 /* push non-blocking */
312 xfs_sync_inodes(mp, SYNC_DELWRI|SYNC_BDFLUSH);
313 XFS_QM_DQSYNC(mp, SYNC_BDFLUSH);
314 xfs_filestream_flush(mp);
317 xfs_sync_inodes(mp, SYNC_DELWRI|SYNC_WAIT|SYNC_IOWAIT);
318 XFS_QM_DQSYNC(mp, SYNC_WAIT);
320 /* write superblock and hoover up shutdown errors */
321 error = xfs_sync_fsdata(mp, 0);
323 /* flush data-only devices */
324 if (mp->m_rtdev_targp)
325 XFS_bflush(mp->m_rtdev_targp);
332 struct xfs_mount *mp)
334 int count = 0, pincount;
336 xfs_flush_buftarg(mp->m_ddev_targp, 0);
337 xfs_reclaim_inodes(mp, 0, XFS_IFLUSH_DELWRI_ELSE_ASYNC);
340 * This loop must run at least twice. The first instance of the loop
341 * will flush most meta data but that will generate more meta data
342 * (typically directory updates). Which then must be flushed and
343 * logged before we can write the unmount record.
346 xfs_sync_inodes(mp, SYNC_ATTR|SYNC_WAIT);
347 pincount = xfs_flush_buftarg(mp->m_ddev_targp, 1);
356 * Second stage of a quiesce. The data is already synced, now we have to take
357 * care of the metadata. New transactions are already blocked, so we need to
358 * wait for any remaining transactions to drain out before proceding.
362 struct xfs_mount *mp)
366 /* wait for all modifications to complete */
367 while (atomic_read(&mp->m_active_trans) > 0)
370 /* flush inodes and push all remaining buffers out to disk */
373 ASSERT_ALWAYS(atomic_read(&mp->m_active_trans) == 0);
375 /* Push the superblock and write an unmount record */
376 error = xfs_log_sbcount(mp, 1);
378 xfs_fs_cmn_err(CE_WARN, mp,
379 "xfs_attr_quiesce: failed to log sb changes. "
380 "Frozen image may not be consistent.");
381 xfs_log_unmount_write(mp);
382 xfs_unmountfs_writesb(mp);
386 * Enqueue a work item to be picked up by the vfs xfssyncd thread.
387 * Doing this has two advantages:
388 * - It saves on stack space, which is tight in certain situations
389 * - It can be used (with care) as a mechanism to avoid deadlocks.
390 * Flushing while allocating in a full filesystem requires both.
393 xfs_syncd_queue_work(
394 struct xfs_mount *mp,
396 void (*syncer)(struct xfs_mount *, void *))
398 struct bhv_vfs_sync_work *work;
400 work = kmem_alloc(sizeof(struct bhv_vfs_sync_work), KM_SLEEP);
401 INIT_LIST_HEAD(&work->w_list);
402 work->w_syncer = syncer;
405 spin_lock(&mp->m_sync_lock);
406 list_add_tail(&work->w_list, &mp->m_sync_list);
407 spin_unlock(&mp->m_sync_lock);
408 wake_up_process(mp->m_sync_task);
412 * Flush delayed allocate data, attempting to free up reserved space
413 * from existing allocations. At this point a new allocation attempt
414 * has failed with ENOSPC and we are in the process of scratching our
415 * heads, looking about for more room...
418 xfs_flush_inode_work(
419 struct xfs_mount *mp,
422 struct inode *inode = arg;
423 filemap_flush(inode->i_mapping);
431 struct inode *inode = VFS_I(ip);
434 xfs_syncd_queue_work(ip->i_mount, inode, xfs_flush_inode_work);
435 delay(msecs_to_jiffies(500));
439 * This is the "bigger hammer" version of xfs_flush_inode_work...
440 * (IOW, "If at first you don't succeed, use a Bigger Hammer").
443 xfs_flush_device_work(
444 struct xfs_mount *mp,
447 struct inode *inode = arg;
448 sync_blockdev(mp->m_super->s_bdev);
456 struct inode *inode = VFS_I(ip);
459 xfs_syncd_queue_work(ip->i_mount, inode, xfs_flush_device_work);
460 delay(msecs_to_jiffies(500));
461 xfs_log_force(ip->i_mount, (xfs_lsn_t)0, XFS_LOG_FORCE|XFS_LOG_SYNC);
465 * Every sync period we need to unpin all items, reclaim inodes, sync
466 * quota and write out the superblock. We might need to cover the log
467 * to indicate it is idle.
471 struct xfs_mount *mp,
476 if (!(mp->m_flags & XFS_MOUNT_RDONLY)) {
477 xfs_log_force(mp, (xfs_lsn_t)0, XFS_LOG_FORCE);
478 xfs_reclaim_inodes(mp, 0, XFS_IFLUSH_DELWRI_ELSE_ASYNC);
479 /* dgc: errors ignored here */
480 error = XFS_QM_DQSYNC(mp, SYNC_BDFLUSH);
481 error = xfs_sync_fsdata(mp, SYNC_BDFLUSH);
482 if (xfs_log_need_covered(mp))
483 error = xfs_commit_dummy_trans(mp, XFS_LOG_FORCE);
486 wake_up(&mp->m_wait_single_sync_task);
493 struct xfs_mount *mp = arg;
495 bhv_vfs_sync_work_t *work, *n;
499 timeleft = xfs_syncd_centisecs * msecs_to_jiffies(10);
501 timeleft = schedule_timeout_interruptible(timeleft);
504 if (kthread_should_stop() && list_empty(&mp->m_sync_list))
507 spin_lock(&mp->m_sync_lock);
509 * We can get woken by laptop mode, to do a sync -
510 * that's the (only!) case where the list would be
511 * empty with time remaining.
513 if (!timeleft || list_empty(&mp->m_sync_list)) {
515 timeleft = xfs_syncd_centisecs *
516 msecs_to_jiffies(10);
517 INIT_LIST_HEAD(&mp->m_sync_work.w_list);
518 list_add_tail(&mp->m_sync_work.w_list,
521 list_for_each_entry_safe(work, n, &mp->m_sync_list, w_list)
522 list_move(&work->w_list, &tmp);
523 spin_unlock(&mp->m_sync_lock);
525 list_for_each_entry_safe(work, n, &tmp, w_list) {
526 (*work->w_syncer)(mp, work->w_data);
527 list_del(&work->w_list);
528 if (work == &mp->m_sync_work)
539 struct xfs_mount *mp)
541 mp->m_sync_work.w_syncer = xfs_sync_worker;
542 mp->m_sync_work.w_mount = mp;
543 mp->m_sync_task = kthread_run(xfssyncd, mp, "xfssyncd");
544 if (IS_ERR(mp->m_sync_task))
545 return -PTR_ERR(mp->m_sync_task);
551 struct xfs_mount *mp)
553 kthread_stop(mp->m_sync_task);
562 xfs_perag_t *pag = xfs_get_perag(ip->i_mount, ip->i_ino);
564 /* The hash lock here protects a thread in xfs_iget_core from
565 * racing with us on linking the inode back with a vnode.
566 * Once we have the XFS_IRECLAIM flag set it will not touch
569 write_lock(&pag->pag_ici_lock);
570 spin_lock(&ip->i_flags_lock);
571 if (__xfs_iflags_test(ip, XFS_IRECLAIM) ||
572 !__xfs_iflags_test(ip, XFS_IRECLAIMABLE)) {
573 spin_unlock(&ip->i_flags_lock);
574 write_unlock(&pag->pag_ici_lock);
577 xfs_iunlock(ip, XFS_ILOCK_EXCL);
581 __xfs_iflags_set(ip, XFS_IRECLAIM);
582 spin_unlock(&ip->i_flags_lock);
583 write_unlock(&pag->pag_ici_lock);
584 xfs_put_perag(ip->i_mount, pag);
587 * If the inode is still dirty, then flush it out. If the inode
588 * is not in the AIL, then it will be OK to flush it delwri as
589 * long as xfs_iflush() does not keep any references to the inode.
590 * We leave that decision up to xfs_iflush() since it has the
591 * knowledge of whether it's OK to simply do a delwri flush of
592 * the inode or whether we need to wait until the inode is
593 * pulled from the AIL.
594 * We get the flush lock regardless, though, just to make sure
595 * we don't free it while it is being flushed.
598 xfs_ilock(ip, XFS_ILOCK_EXCL);
603 * In the case of a forced shutdown we rely on xfs_iflush() to
604 * wait for the inode to be unpinned before returning an error.
606 if (!is_bad_inode(VFS_I(ip)) && xfs_iflush(ip, sync_mode) == 0) {
607 /* synchronize with xfs_iflush_done */
612 xfs_iunlock(ip, XFS_ILOCK_EXCL);
618 * We set the inode flag atomically with the radix tree tag.
619 * Once we get tag lookups on the radix tree, this inode flag
623 xfs_inode_set_reclaim_tag(
626 xfs_mount_t *mp = ip->i_mount;
627 xfs_perag_t *pag = xfs_get_perag(mp, ip->i_ino);
629 read_lock(&pag->pag_ici_lock);
630 spin_lock(&ip->i_flags_lock);
631 radix_tree_tag_set(&pag->pag_ici_root,
632 XFS_INO_TO_AGINO(mp, ip->i_ino), XFS_ICI_RECLAIM_TAG);
633 __xfs_iflags_set(ip, XFS_IRECLAIMABLE);
634 spin_unlock(&ip->i_flags_lock);
635 read_unlock(&pag->pag_ici_lock);
636 xfs_put_perag(mp, pag);
640 __xfs_inode_clear_reclaim_tag(
645 radix_tree_tag_clear(&pag->pag_ici_root,
646 XFS_INO_TO_AGINO(mp, ip->i_ino), XFS_ICI_RECLAIM_TAG);
650 xfs_inode_clear_reclaim_tag(
653 xfs_mount_t *mp = ip->i_mount;
654 xfs_perag_t *pag = xfs_get_perag(mp, ip->i_ino);
656 read_lock(&pag->pag_ici_lock);
657 spin_lock(&ip->i_flags_lock);
658 __xfs_inode_clear_reclaim_tag(mp, pag, ip);
659 spin_unlock(&ip->i_flags_lock);
660 read_unlock(&pag->pag_ici_lock);
661 xfs_put_perag(mp, pag);
666 xfs_reclaim_inodes_ag(
672 xfs_inode_t *ip = NULL;
673 xfs_perag_t *pag = &mp->m_perag[ag];
675 uint32_t first_index;
683 * use a gang lookup to find the next inode in the tree
684 * as the tree is sparse and a gang lookup walks to find
685 * the number of objects requested.
687 read_lock(&pag->pag_ici_lock);
688 nr_found = radix_tree_gang_lookup_tag(&pag->pag_ici_root,
689 (void**)&ip, first_index, 1,
690 XFS_ICI_RECLAIM_TAG);
693 read_unlock(&pag->pag_ici_lock);
698 * Update the index for the next lookup. Catch overflows
699 * into the next AG range which can occur if we have inodes
700 * in the last block of the AG and we are currently
701 * pointing to the last inode.
703 first_index = XFS_INO_TO_AGINO(mp, ip->i_ino + 1);
704 if (first_index < XFS_INO_TO_AGINO(mp, ip->i_ino)) {
705 read_unlock(&pag->pag_ici_lock);
709 ASSERT(xfs_iflags_test(ip, (XFS_IRECLAIMABLE|XFS_IRECLAIM)));
711 /* ignore if already under reclaim */
712 if (xfs_iflags_test(ip, XFS_IRECLAIM)) {
713 read_unlock(&pag->pag_ici_lock);
718 if (!xfs_ilock_nowait(ip, XFS_ILOCK_EXCL)) {
719 read_unlock(&pag->pag_ici_lock);
722 if (xfs_ipincount(ip) ||
723 !xfs_iflock_nowait(ip)) {
724 xfs_iunlock(ip, XFS_ILOCK_EXCL);
725 read_unlock(&pag->pag_ici_lock);
729 read_unlock(&pag->pag_ici_lock);
732 * hmmm - this is an inode already in reclaim. Do
733 * we even bother catching it here?
735 if (xfs_reclaim_inode(ip, noblock, mode))
755 for (i = 0; i < mp->m_sb.sb_agcount; i++) {
756 if (!mp->m_perag[i].pag_ici_init)
758 xfs_reclaim_inodes_ag(mp, i, noblock, mode);