2 * Copyright (c) 2000-2005 Silicon Graphics, Inc.
5 * This program is free software; you can redistribute it and/or
6 * modify it under the terms of the GNU General Public License as
7 * published by the Free Software Foundation.
9 * This program is distributed in the hope that it would be useful,
10 * but WITHOUT ANY WARRANTY; without even the implied warranty of
11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
12 * GNU General Public License for more details.
14 * You should have received a copy of the GNU General Public License
15 * along with this program; if not, write the Free Software Foundation,
16 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
20 #include "xfs_types.h"
24 #include "xfs_trans.h"
28 #include "xfs_dmapi.h"
29 #include "xfs_mount.h"
30 #include "xfs_bmap_btree.h"
31 #include "xfs_alloc_btree.h"
32 #include "xfs_ialloc_btree.h"
33 #include "xfs_btree.h"
34 #include "xfs_dir2_sf.h"
35 #include "xfs_attr_sf.h"
36 #include "xfs_inode.h"
37 #include "xfs_dinode.h"
38 #include "xfs_error.h"
39 #include "xfs_mru_cache.h"
40 #include "xfs_filestream.h"
41 #include "xfs_vnodeops.h"
42 #include "xfs_utils.h"
43 #include "xfs_buf_item.h"
44 #include "xfs_inode_item.h"
46 #include "xfs_quota.h"
48 #include <linux/kthread.h>
49 #include <linux/freezer.h>
52 * Sync all the inodes in the given AG according to the
53 * direction given by the flags.
61 xfs_perag_t *pag = &mp->m_perag[ag];
63 uint32_t first_index = 0;
69 xfs_inode_t *ip = NULL;
70 int lock_flags = XFS_ILOCK_SHARED;
73 * use a gang lookup to find the next inode in the tree
74 * as the tree is sparse and a gang lookup walks to find
75 * the number of objects requested.
77 read_lock(&pag->pag_ici_lock);
78 nr_found = radix_tree_gang_lookup(&pag->pag_ici_root,
79 (void**)&ip, first_index, 1);
82 read_unlock(&pag->pag_ici_lock);
87 * Update the index for the next lookup. Catch overflows
88 * into the next AG range which can occur if we have inodes
89 * in the last block of the AG and we are currently
90 * pointing to the last inode.
92 first_index = XFS_INO_TO_AGINO(mp, ip->i_ino + 1);
93 if (first_index < XFS_INO_TO_AGINO(mp, ip->i_ino)) {
94 read_unlock(&pag->pag_ici_lock);
98 /* nothing to sync during shutdown */
99 if (XFS_FORCED_SHUTDOWN(mp)) {
100 read_unlock(&pag->pag_ici_lock);
105 * If we can't get a reference on the inode, it must be
106 * in reclaim. Leave it for the reclaim code to flush.
110 read_unlock(&pag->pag_ici_lock);
113 read_unlock(&pag->pag_ici_lock);
115 /* avoid new or bad inodes */
116 if (is_bad_inode(inode) ||
117 xfs_iflags_test(ip, XFS_INEW)) {
123 * If we have to flush data or wait for I/O completion
124 * we need to hold the iolock.
126 if (flags & SYNC_DELWRI) {
127 if (VN_DIRTY(inode)) {
128 if (flags & SYNC_TRYLOCK) {
129 if (xfs_ilock_nowait(ip, XFS_IOLOCK_SHARED))
130 lock_flags |= XFS_IOLOCK_SHARED;
132 xfs_ilock(ip, XFS_IOLOCK_SHARED);
133 lock_flags |= XFS_IOLOCK_SHARED;
135 if (lock_flags & XFS_IOLOCK_SHARED) {
136 error = xfs_flush_pages(ip, 0, -1,
137 (flags & SYNC_WAIT) ? 0
142 if (VN_CACHED(inode) && (flags & SYNC_IOWAIT))
145 xfs_ilock(ip, XFS_ILOCK_SHARED);
147 if ((flags & SYNC_ATTR) && !xfs_inode_clean(ip)) {
148 if (flags & SYNC_WAIT) {
150 if (!xfs_inode_clean(ip))
151 error = xfs_iflush(ip, XFS_IFLUSH_SYNC);
154 } else if (xfs_iflock_nowait(ip)) {
155 if (!xfs_inode_clean(ip))
156 error = xfs_iflush(ip, XFS_IFLUSH_DELWRI);
161 xfs_iput(ip, lock_flags);
166 * bail out if the filesystem is corrupted.
168 if (error == EFSCORRUPTED)
169 return XFS_ERROR(error);
184 int lflags = XFS_LOG_FORCE;
186 if (mp->m_flags & XFS_MOUNT_RDONLY)
191 if (flags & SYNC_WAIT)
192 lflags |= XFS_LOG_SYNC;
194 for (i = 0; i < mp->m_sb.sb_agcount; i++) {
195 if (!mp->m_perag[i].pag_ici_init)
197 error = xfs_sync_inodes_ag(mp, i, flags);
200 if (error == EFSCORRUPTED)
203 if (flags & SYNC_DELWRI)
204 xfs_log_force(mp, 0, lflags);
206 return XFS_ERROR(last_error);
210 xfs_commit_dummy_trans(
211 struct xfs_mount *mp,
214 struct xfs_inode *ip = mp->m_rootip;
215 struct xfs_trans *tp;
219 * Put a dummy transaction in the log to tell recovery
220 * that all others are OK.
222 tp = xfs_trans_alloc(mp, XFS_TRANS_DUMMY1);
223 error = xfs_trans_reserve(tp, 0, XFS_ICHANGE_LOG_RES(mp), 0, 0, 0);
225 xfs_trans_cancel(tp, 0);
229 xfs_ilock(ip, XFS_ILOCK_EXCL);
231 xfs_trans_ijoin(tp, ip, XFS_ILOCK_EXCL);
232 xfs_trans_ihold(tp, ip);
233 xfs_trans_log_inode(tp, ip, XFS_ILOG_CORE);
234 /* XXX(hch): ignoring the error here.. */
235 error = xfs_trans_commit(tp, 0);
237 xfs_iunlock(ip, XFS_ILOCK_EXCL);
239 xfs_log_force(mp, 0, log_flags);
245 struct xfs_mount *mp,
249 struct xfs_buf_log_item *bip;
253 * If this is xfssyncd() then only sync the superblock if we can
254 * lock it without sleeping and it is not pinned.
256 if (flags & SYNC_BDFLUSH) {
257 ASSERT(!(flags & SYNC_WAIT));
259 bp = xfs_getsb(mp, XFS_BUF_TRYLOCK);
263 bip = XFS_BUF_FSPRIVATE(bp, struct xfs_buf_log_item *);
264 if (!bip || !xfs_buf_item_dirty(bip) || XFS_BUF_ISPINNED(bp))
267 bp = xfs_getsb(mp, 0);
270 * If the buffer is pinned then push on the log so we won't
271 * get stuck waiting in the write for someone, maybe
272 * ourselves, to flush the log.
274 * Even though we just pushed the log above, we did not have
275 * the superblock buffer locked at that point so it can
276 * become pinned in between there and here.
278 if (XFS_BUF_ISPINNED(bp))
279 xfs_log_force(mp, 0, XFS_LOG_FORCE);
283 if (flags & SYNC_WAIT)
288 return xfs_bwrite(mp, bp);
297 * When remounting a filesystem read-only or freezing the filesystem, we have
298 * two phases to execute. This first phase is syncing the data before we
299 * quiesce the filesystem, and the second is flushing all the inodes out after
300 * we've waited for all the transactions created by the first phase to
301 * complete. The second phase ensures that the inodes are written to their
302 * location on disk rather than just existing in transactions in the log. This
303 * means after a quiesce there is no log replay required to write the inodes to
304 * disk (this is the main difference between a sync and a quiesce).
307 * First stage of freeze - no writers will make progress now we are here,
308 * so we flush delwri and delalloc buffers here, then wait for all I/O to
309 * complete. Data is frozen at that point. Metadata is not frozen,
310 * transactions can still occur here so don't bother flushing the buftarg
311 * because it'll just get dirty again.
315 struct xfs_mount *mp)
319 /* push non-blocking */
320 xfs_sync_inodes(mp, SYNC_DELWRI|SYNC_BDFLUSH);
321 xfs_qm_sync(mp, SYNC_BDFLUSH);
322 xfs_filestream_flush(mp);
325 xfs_sync_inodes(mp, SYNC_DELWRI|SYNC_WAIT|SYNC_IOWAIT);
326 xfs_qm_sync(mp, SYNC_WAIT);
328 /* write superblock and hoover up shutdown errors */
329 error = xfs_sync_fsdata(mp, 0);
331 /* flush data-only devices */
332 if (mp->m_rtdev_targp)
333 XFS_bflush(mp->m_rtdev_targp);
340 struct xfs_mount *mp)
342 int count = 0, pincount;
344 xfs_flush_buftarg(mp->m_ddev_targp, 0);
345 xfs_reclaim_inodes(mp, 0, XFS_IFLUSH_DELWRI_ELSE_ASYNC);
348 * This loop must run at least twice. The first instance of the loop
349 * will flush most meta data but that will generate more meta data
350 * (typically directory updates). Which then must be flushed and
351 * logged before we can write the unmount record.
354 xfs_sync_inodes(mp, SYNC_ATTR|SYNC_WAIT);
355 pincount = xfs_flush_buftarg(mp->m_ddev_targp, 1);
364 * Second stage of a quiesce. The data is already synced, now we have to take
365 * care of the metadata. New transactions are already blocked, so we need to
366 * wait for any remaining transactions to drain out before proceding.
370 struct xfs_mount *mp)
374 /* wait for all modifications to complete */
375 while (atomic_read(&mp->m_active_trans) > 0)
378 /* flush inodes and push all remaining buffers out to disk */
382 * Just warn here till VFS can correctly support
383 * read-only remount without racing.
385 WARN_ON(atomic_read(&mp->m_active_trans) != 0);
387 /* Push the superblock and write an unmount record */
388 error = xfs_log_sbcount(mp, 1);
390 xfs_fs_cmn_err(CE_WARN, mp,
391 "xfs_attr_quiesce: failed to log sb changes. "
392 "Frozen image may not be consistent.");
393 xfs_log_unmount_write(mp);
394 xfs_unmountfs_writesb(mp);
398 * Enqueue a work item to be picked up by the vfs xfssyncd thread.
399 * Doing this has two advantages:
400 * - It saves on stack space, which is tight in certain situations
401 * - It can be used (with care) as a mechanism to avoid deadlocks.
402 * Flushing while allocating in a full filesystem requires both.
405 xfs_syncd_queue_work(
406 struct xfs_mount *mp,
408 void (*syncer)(struct xfs_mount *, void *),
409 struct completion *completion)
411 struct xfs_sync_work *work;
413 work = kmem_alloc(sizeof(struct xfs_sync_work), KM_SLEEP);
414 INIT_LIST_HEAD(&work->w_list);
415 work->w_syncer = syncer;
418 work->w_completion = completion;
419 spin_lock(&mp->m_sync_lock);
420 list_add_tail(&work->w_list, &mp->m_sync_list);
421 spin_unlock(&mp->m_sync_lock);
422 wake_up_process(mp->m_sync_task);
426 * Flush delayed allocate data, attempting to free up reserved space
427 * from existing allocations. At this point a new allocation attempt
428 * has failed with ENOSPC and we are in the process of scratching our
429 * heads, looking about for more room...
432 xfs_flush_inodes_work(
433 struct xfs_mount *mp,
436 struct inode *inode = arg;
437 xfs_sync_inodes(mp, SYNC_DELWRI | SYNC_TRYLOCK);
438 xfs_sync_inodes(mp, SYNC_DELWRI | SYNC_TRYLOCK | SYNC_IOWAIT);
446 struct inode *inode = VFS_I(ip);
447 DECLARE_COMPLETION_ONSTACK(completion);
450 xfs_syncd_queue_work(ip->i_mount, inode, xfs_flush_inodes_work, &completion);
451 wait_for_completion(&completion);
452 xfs_log_force(ip->i_mount, (xfs_lsn_t)0, XFS_LOG_FORCE|XFS_LOG_SYNC);
456 * Every sync period we need to unpin all items, reclaim inodes, sync
457 * quota and write out the superblock. We might need to cover the log
458 * to indicate it is idle.
462 struct xfs_mount *mp,
467 if (!(mp->m_flags & XFS_MOUNT_RDONLY)) {
468 xfs_log_force(mp, (xfs_lsn_t)0, XFS_LOG_FORCE);
469 xfs_reclaim_inodes(mp, 0, XFS_IFLUSH_DELWRI_ELSE_ASYNC);
470 /* dgc: errors ignored here */
471 error = xfs_qm_sync(mp, SYNC_BDFLUSH);
472 error = xfs_sync_fsdata(mp, SYNC_BDFLUSH);
473 if (xfs_log_need_covered(mp))
474 error = xfs_commit_dummy_trans(mp, XFS_LOG_FORCE);
477 wake_up(&mp->m_wait_single_sync_task);
484 struct xfs_mount *mp = arg;
486 xfs_sync_work_t *work, *n;
490 timeleft = xfs_syncd_centisecs * msecs_to_jiffies(10);
492 timeleft = schedule_timeout_interruptible(timeleft);
495 if (kthread_should_stop() && list_empty(&mp->m_sync_list))
498 spin_lock(&mp->m_sync_lock);
500 * We can get woken by laptop mode, to do a sync -
501 * that's the (only!) case where the list would be
502 * empty with time remaining.
504 if (!timeleft || list_empty(&mp->m_sync_list)) {
506 timeleft = xfs_syncd_centisecs *
507 msecs_to_jiffies(10);
508 INIT_LIST_HEAD(&mp->m_sync_work.w_list);
509 list_add_tail(&mp->m_sync_work.w_list,
512 list_for_each_entry_safe(work, n, &mp->m_sync_list, w_list)
513 list_move(&work->w_list, &tmp);
514 spin_unlock(&mp->m_sync_lock);
516 list_for_each_entry_safe(work, n, &tmp, w_list) {
517 (*work->w_syncer)(mp, work->w_data);
518 list_del(&work->w_list);
519 if (work == &mp->m_sync_work)
521 if (work->w_completion)
522 complete(work->w_completion);
532 struct xfs_mount *mp)
534 mp->m_sync_work.w_syncer = xfs_sync_worker;
535 mp->m_sync_work.w_mount = mp;
536 mp->m_sync_work.w_completion = NULL;
537 mp->m_sync_task = kthread_run(xfssyncd, mp, "xfssyncd");
538 if (IS_ERR(mp->m_sync_task))
539 return -PTR_ERR(mp->m_sync_task);
545 struct xfs_mount *mp)
547 kthread_stop(mp->m_sync_task);
556 xfs_perag_t *pag = xfs_get_perag(ip->i_mount, ip->i_ino);
558 /* The hash lock here protects a thread in xfs_iget_core from
559 * racing with us on linking the inode back with a vnode.
560 * Once we have the XFS_IRECLAIM flag set it will not touch
563 write_lock(&pag->pag_ici_lock);
564 spin_lock(&ip->i_flags_lock);
565 if (__xfs_iflags_test(ip, XFS_IRECLAIM) ||
566 !__xfs_iflags_test(ip, XFS_IRECLAIMABLE)) {
567 spin_unlock(&ip->i_flags_lock);
568 write_unlock(&pag->pag_ici_lock);
571 xfs_iunlock(ip, XFS_ILOCK_EXCL);
575 __xfs_iflags_set(ip, XFS_IRECLAIM);
576 spin_unlock(&ip->i_flags_lock);
577 write_unlock(&pag->pag_ici_lock);
578 xfs_put_perag(ip->i_mount, pag);
581 * If the inode is still dirty, then flush it out. If the inode
582 * is not in the AIL, then it will be OK to flush it delwri as
583 * long as xfs_iflush() does not keep any references to the inode.
584 * We leave that decision up to xfs_iflush() since it has the
585 * knowledge of whether it's OK to simply do a delwri flush of
586 * the inode or whether we need to wait until the inode is
587 * pulled from the AIL.
588 * We get the flush lock regardless, though, just to make sure
589 * we don't free it while it is being flushed.
592 xfs_ilock(ip, XFS_ILOCK_EXCL);
597 * In the case of a forced shutdown we rely on xfs_iflush() to
598 * wait for the inode to be unpinned before returning an error.
600 if (!is_bad_inode(VFS_I(ip)) && xfs_iflush(ip, sync_mode) == 0) {
601 /* synchronize with xfs_iflush_done */
606 xfs_iunlock(ip, XFS_ILOCK_EXCL);
612 * We set the inode flag atomically with the radix tree tag.
613 * Once we get tag lookups on the radix tree, this inode flag
617 xfs_inode_set_reclaim_tag(
620 xfs_mount_t *mp = ip->i_mount;
621 xfs_perag_t *pag = xfs_get_perag(mp, ip->i_ino);
623 read_lock(&pag->pag_ici_lock);
624 spin_lock(&ip->i_flags_lock);
625 radix_tree_tag_set(&pag->pag_ici_root,
626 XFS_INO_TO_AGINO(mp, ip->i_ino), XFS_ICI_RECLAIM_TAG);
627 __xfs_iflags_set(ip, XFS_IRECLAIMABLE);
628 spin_unlock(&ip->i_flags_lock);
629 read_unlock(&pag->pag_ici_lock);
630 xfs_put_perag(mp, pag);
634 __xfs_inode_clear_reclaim_tag(
639 radix_tree_tag_clear(&pag->pag_ici_root,
640 XFS_INO_TO_AGINO(mp, ip->i_ino), XFS_ICI_RECLAIM_TAG);
644 xfs_inode_clear_reclaim_tag(
647 xfs_mount_t *mp = ip->i_mount;
648 xfs_perag_t *pag = xfs_get_perag(mp, ip->i_ino);
650 read_lock(&pag->pag_ici_lock);
651 spin_lock(&ip->i_flags_lock);
652 __xfs_inode_clear_reclaim_tag(mp, pag, ip);
653 spin_unlock(&ip->i_flags_lock);
654 read_unlock(&pag->pag_ici_lock);
655 xfs_put_perag(mp, pag);
660 xfs_reclaim_inodes_ag(
666 xfs_inode_t *ip = NULL;
667 xfs_perag_t *pag = &mp->m_perag[ag];
669 uint32_t first_index;
677 * use a gang lookup to find the next inode in the tree
678 * as the tree is sparse and a gang lookup walks to find
679 * the number of objects requested.
681 read_lock(&pag->pag_ici_lock);
682 nr_found = radix_tree_gang_lookup_tag(&pag->pag_ici_root,
683 (void**)&ip, first_index, 1,
684 XFS_ICI_RECLAIM_TAG);
687 read_unlock(&pag->pag_ici_lock);
692 * Update the index for the next lookup. Catch overflows
693 * into the next AG range which can occur if we have inodes
694 * in the last block of the AG and we are currently
695 * pointing to the last inode.
697 first_index = XFS_INO_TO_AGINO(mp, ip->i_ino + 1);
698 if (first_index < XFS_INO_TO_AGINO(mp, ip->i_ino)) {
699 read_unlock(&pag->pag_ici_lock);
703 /* ignore if already under reclaim */
704 if (xfs_iflags_test(ip, XFS_IRECLAIM)) {
705 read_unlock(&pag->pag_ici_lock);
710 if (!xfs_ilock_nowait(ip, XFS_ILOCK_EXCL)) {
711 read_unlock(&pag->pag_ici_lock);
714 if (xfs_ipincount(ip) ||
715 !xfs_iflock_nowait(ip)) {
716 xfs_iunlock(ip, XFS_ILOCK_EXCL);
717 read_unlock(&pag->pag_ici_lock);
721 read_unlock(&pag->pag_ici_lock);
724 * hmmm - this is an inode already in reclaim. Do
725 * we even bother catching it here?
727 if (xfs_reclaim_inode(ip, noblock, mode))
747 for (i = 0; i < mp->m_sb.sb_agcount; i++) {
748 if (!mp->m_perag[i].pag_ici_init)
750 xfs_reclaim_inodes_ag(mp, i, noblock, mode);