2 * Copyright (c) 2000-2005 Silicon Graphics, Inc.
5 * This program is free software; you can redistribute it and/or
6 * modify it under the terms of the GNU General Public License as
7 * published by the Free Software Foundation.
9 * This program is distributed in the hope that it would be useful,
10 * but WITHOUT ANY WARRANTY; without even the implied warranty of
11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
12 * GNU General Public License for more details.
14 * You should have received a copy of the GNU General Public License
15 * along with this program; if not, write the Free Software Foundation,
16 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
20 #include "xfs_types.h"
24 #include "xfs_trans.h"
28 #include "xfs_dmapi.h"
29 #include "xfs_mount.h"
30 #include "xfs_bmap_btree.h"
31 #include "xfs_alloc_btree.h"
32 #include "xfs_ialloc_btree.h"
33 #include "xfs_btree.h"
34 #include "xfs_dir2_sf.h"
35 #include "xfs_attr_sf.h"
36 #include "xfs_inode.h"
37 #include "xfs_dinode.h"
38 #include "xfs_error.h"
39 #include "xfs_mru_cache.h"
40 #include "xfs_filestream.h"
41 #include "xfs_vnodeops.h"
42 #include "xfs_utils.h"
43 #include "xfs_buf_item.h"
44 #include "xfs_inode_item.h"
46 #include "xfs_quota.h"
47 #include "xfs_trace.h"
49 #include <linux/kthread.h>
50 #include <linux/freezer.h>
56 struct xfs_perag *pag,
57 uint32_t *first_index,
64 * use a gang lookup to find the next inode in the tree
65 * as the tree is sparse and a gang lookup walks to find
66 * the number of objects requested.
68 if (tag == XFS_ICI_NO_TAG) {
69 nr_found = radix_tree_gang_lookup(&pag->pag_ici_root,
70 (void **)&ip, *first_index, 1);
72 nr_found = radix_tree_gang_lookup_tag(&pag->pag_ici_root,
73 (void **)&ip, *first_index, 1, tag);
79 * Update the index for the next lookup. Catch overflows
80 * into the next AG range which can occur if we have inodes
81 * in the last block of the AG and we are currently
82 * pointing to the last inode.
84 *first_index = XFS_INO_TO_AGINO(mp, ip->i_ino + 1);
85 if (*first_index < XFS_INO_TO_AGINO(mp, ip->i_ino))
94 int (*execute)(struct xfs_inode *ip,
95 struct xfs_perag *pag, int flags),
100 struct xfs_perag *pag = &mp->m_perag[ag];
101 uint32_t first_index;
113 write_lock(&pag->pag_ici_lock);
115 read_lock(&pag->pag_ici_lock);
116 ip = xfs_inode_ag_lookup(mp, pag, &first_index, tag);
119 write_unlock(&pag->pag_ici_lock);
121 read_unlock(&pag->pag_ici_lock);
125 /* execute releases pag->pag_ici_lock */
126 error = execute(ip, pag, flags);
127 if (error == EAGAIN) {
134 /* bail out if the filesystem is corrupted. */
135 if (error == EFSCORRUPTED)
145 xfs_put_perag(mp, pag);
150 xfs_inode_ag_iterator(
151 struct xfs_mount *mp,
152 int (*execute)(struct xfs_inode *ip,
153 struct xfs_perag *pag, int flags),
162 for (ag = 0; ag < mp->m_sb.sb_agcount; ag++) {
163 if (!mp->m_perag[ag].pag_ici_init)
165 error = xfs_inode_ag_walk(mp, ag, execute, flags, tag,
169 if (error == EFSCORRUPTED)
173 return XFS_ERROR(last_error);
176 /* must be called with pag_ici_lock held and releases it */
178 xfs_sync_inode_valid(
179 struct xfs_inode *ip,
180 struct xfs_perag *pag)
182 struct inode *inode = VFS_I(ip);
184 /* nothing to sync during shutdown */
185 if (XFS_FORCED_SHUTDOWN(ip->i_mount)) {
186 read_unlock(&pag->pag_ici_lock);
190 /* If we can't get a reference on the inode, it must be in reclaim. */
192 read_unlock(&pag->pag_ici_lock);
195 read_unlock(&pag->pag_ici_lock);
197 if (is_bad_inode(inode) || xfs_iflags_test(ip, XFS_INEW)) {
207 struct xfs_inode *ip,
208 struct xfs_perag *pag,
211 struct inode *inode = VFS_I(ip);
212 struct address_space *mapping = inode->i_mapping;
215 error = xfs_sync_inode_valid(ip, pag);
219 if (!mapping_tagged(mapping, PAGECACHE_TAG_DIRTY))
222 if (!xfs_ilock_nowait(ip, XFS_IOLOCK_SHARED)) {
223 if (flags & SYNC_TRYLOCK)
225 xfs_ilock(ip, XFS_IOLOCK_SHARED);
228 error = xfs_flush_pages(ip, 0, -1, (flags & SYNC_WAIT) ?
229 0 : XFS_B_ASYNC, FI_NONE);
230 xfs_iunlock(ip, XFS_IOLOCK_SHARED);
233 if (flags & SYNC_WAIT)
241 struct xfs_inode *ip,
242 struct xfs_perag *pag,
247 error = xfs_sync_inode_valid(ip, pag);
251 xfs_ilock(ip, XFS_ILOCK_SHARED);
252 if (xfs_inode_clean(ip))
254 if (!xfs_iflock_nowait(ip)) {
255 if (!(flags & SYNC_WAIT))
260 if (xfs_inode_clean(ip)) {
265 error = xfs_iflush(ip, (flags & SYNC_WAIT) ?
266 XFS_IFLUSH_SYNC : XFS_IFLUSH_DELWRI);
269 xfs_iunlock(ip, XFS_ILOCK_SHARED);
275 * Write out pagecache data for the whole filesystem.
279 struct xfs_mount *mp,
284 ASSERT((flags & ~(SYNC_TRYLOCK|SYNC_WAIT)) == 0);
286 error = xfs_inode_ag_iterator(mp, xfs_sync_inode_data, flags,
289 return XFS_ERROR(error);
292 (flags & SYNC_WAIT) ?
293 XFS_LOG_FORCE | XFS_LOG_SYNC :
299 * Write out inode metadata (attributes) for the whole filesystem.
303 struct xfs_mount *mp,
306 ASSERT((flags & ~SYNC_WAIT) == 0);
308 return xfs_inode_ag_iterator(mp, xfs_sync_inode_attr, flags,
313 xfs_commit_dummy_trans(
314 struct xfs_mount *mp,
317 struct xfs_inode *ip = mp->m_rootip;
318 struct xfs_trans *tp;
320 int log_flags = XFS_LOG_FORCE;
322 if (flags & SYNC_WAIT)
323 log_flags |= XFS_LOG_SYNC;
326 * Put a dummy transaction in the log to tell recovery
327 * that all others are OK.
329 tp = xfs_trans_alloc(mp, XFS_TRANS_DUMMY1);
330 error = xfs_trans_reserve(tp, 0, XFS_ICHANGE_LOG_RES(mp), 0, 0, 0);
332 xfs_trans_cancel(tp, 0);
336 xfs_ilock(ip, XFS_ILOCK_EXCL);
338 xfs_trans_ijoin(tp, ip, XFS_ILOCK_EXCL);
339 xfs_trans_ihold(tp, ip);
340 xfs_trans_log_inode(tp, ip, XFS_ILOG_CORE);
341 error = xfs_trans_commit(tp, 0);
342 xfs_iunlock(ip, XFS_ILOCK_EXCL);
344 /* the log force ensures this transaction is pushed to disk */
345 xfs_log_force(mp, 0, log_flags);
351 struct xfs_mount *mp,
355 struct xfs_buf_log_item *bip;
359 * If this is xfssyncd() then only sync the superblock if we can
360 * lock it without sleeping and it is not pinned.
362 if (flags & SYNC_TRYLOCK) {
363 ASSERT(!(flags & SYNC_WAIT));
365 bp = xfs_getsb(mp, XFS_BUF_TRYLOCK);
369 bip = XFS_BUF_FSPRIVATE(bp, struct xfs_buf_log_item *);
370 if (!bip || !xfs_buf_item_dirty(bip) || XFS_BUF_ISPINNED(bp))
373 bp = xfs_getsb(mp, 0);
376 * If the buffer is pinned then push on the log so we won't
377 * get stuck waiting in the write for someone, maybe
378 * ourselves, to flush the log.
380 * Even though we just pushed the log above, we did not have
381 * the superblock buffer locked at that point so it can
382 * become pinned in between there and here.
384 if (XFS_BUF_ISPINNED(bp))
385 xfs_log_force(mp, 0, XFS_LOG_FORCE);
389 if (flags & SYNC_WAIT)
394 error = xfs_bwrite(mp, bp);
399 * If this is a data integrity sync make sure all pending buffers
400 * are flushed out for the log coverage check below.
402 if (flags & SYNC_WAIT)
403 xfs_flush_buftarg(mp->m_ddev_targp, 1);
405 if (xfs_log_need_covered(mp))
406 error = xfs_commit_dummy_trans(mp, flags);
416 * When remounting a filesystem read-only or freezing the filesystem, we have
417 * two phases to execute. This first phase is syncing the data before we
418 * quiesce the filesystem, and the second is flushing all the inodes out after
419 * we've waited for all the transactions created by the first phase to
420 * complete. The second phase ensures that the inodes are written to their
421 * location on disk rather than just existing in transactions in the log. This
422 * means after a quiesce there is no log replay required to write the inodes to
423 * disk (this is the main difference between a sync and a quiesce).
426 * First stage of freeze - no writers will make progress now we are here,
427 * so we flush delwri and delalloc buffers here, then wait for all I/O to
428 * complete. Data is frozen at that point. Metadata is not frozen,
429 * transactions can still occur here so don't bother flushing the buftarg
430 * because it'll just get dirty again.
434 struct xfs_mount *mp)
438 /* push non-blocking */
439 xfs_sync_data(mp, 0);
440 xfs_qm_sync(mp, SYNC_TRYLOCK);
442 /* push and block till complete */
443 xfs_sync_data(mp, SYNC_WAIT);
444 xfs_qm_sync(mp, SYNC_WAIT);
446 /* drop inode references pinned by filestreams */
447 xfs_filestream_flush(mp);
449 /* write superblock and hoover up shutdown errors */
450 error = xfs_sync_fsdata(mp, SYNC_WAIT);
452 /* flush data-only devices */
453 if (mp->m_rtdev_targp)
454 XFS_bflush(mp->m_rtdev_targp);
461 struct xfs_mount *mp)
463 int count = 0, pincount;
465 xfs_flush_buftarg(mp->m_ddev_targp, 0);
466 xfs_reclaim_inodes(mp, XFS_IFLUSH_DELWRI_ELSE_ASYNC);
469 * This loop must run at least twice. The first instance of the loop
470 * will flush most meta data but that will generate more meta data
471 * (typically directory updates). Which then must be flushed and
472 * logged before we can write the unmount record.
475 xfs_sync_attr(mp, SYNC_WAIT);
476 pincount = xfs_flush_buftarg(mp->m_ddev_targp, 1);
485 * Second stage of a quiesce. The data is already synced, now we have to take
486 * care of the metadata. New transactions are already blocked, so we need to
487 * wait for any remaining transactions to drain out before proceding.
491 struct xfs_mount *mp)
495 /* wait for all modifications to complete */
496 while (atomic_read(&mp->m_active_trans) > 0)
499 /* flush inodes and push all remaining buffers out to disk */
503 * Just warn here till VFS can correctly support
504 * read-only remount without racing.
506 WARN_ON(atomic_read(&mp->m_active_trans) != 0);
508 /* Push the superblock and write an unmount record */
509 error = xfs_log_sbcount(mp, 1);
511 xfs_fs_cmn_err(CE_WARN, mp,
512 "xfs_attr_quiesce: failed to log sb changes. "
513 "Frozen image may not be consistent.");
514 xfs_log_unmount_write(mp);
515 xfs_unmountfs_writesb(mp);
519 * Enqueue a work item to be picked up by the vfs xfssyncd thread.
520 * Doing this has two advantages:
521 * - It saves on stack space, which is tight in certain situations
522 * - It can be used (with care) as a mechanism to avoid deadlocks.
523 * Flushing while allocating in a full filesystem requires both.
526 xfs_syncd_queue_work(
527 struct xfs_mount *mp,
529 void (*syncer)(struct xfs_mount *, void *),
530 struct completion *completion)
532 struct xfs_sync_work *work;
534 work = kmem_alloc(sizeof(struct xfs_sync_work), KM_SLEEP);
535 INIT_LIST_HEAD(&work->w_list);
536 work->w_syncer = syncer;
539 work->w_completion = completion;
540 spin_lock(&mp->m_sync_lock);
541 list_add_tail(&work->w_list, &mp->m_sync_list);
542 spin_unlock(&mp->m_sync_lock);
543 wake_up_process(mp->m_sync_task);
547 * Flush delayed allocate data, attempting to free up reserved space
548 * from existing allocations. At this point a new allocation attempt
549 * has failed with ENOSPC and we are in the process of scratching our
550 * heads, looking about for more room...
553 xfs_flush_inodes_work(
554 struct xfs_mount *mp,
557 struct inode *inode = arg;
558 xfs_sync_data(mp, SYNC_TRYLOCK);
559 xfs_sync_data(mp, SYNC_TRYLOCK | SYNC_WAIT);
567 struct inode *inode = VFS_I(ip);
568 DECLARE_COMPLETION_ONSTACK(completion);
571 xfs_syncd_queue_work(ip->i_mount, inode, xfs_flush_inodes_work, &completion);
572 wait_for_completion(&completion);
573 xfs_log_force(ip->i_mount, (xfs_lsn_t)0, XFS_LOG_FORCE|XFS_LOG_SYNC);
577 * Every sync period we need to unpin all items, reclaim inodes, sync
578 * quota and write out the superblock. We might need to cover the log
579 * to indicate it is idle.
583 struct xfs_mount *mp,
588 if (!(mp->m_flags & XFS_MOUNT_RDONLY)) {
589 xfs_log_force(mp, (xfs_lsn_t)0, XFS_LOG_FORCE);
590 xfs_reclaim_inodes(mp, XFS_IFLUSH_DELWRI_ELSE_ASYNC);
591 /* dgc: errors ignored here */
592 error = xfs_qm_sync(mp, SYNC_TRYLOCK);
593 error = xfs_sync_fsdata(mp, SYNC_TRYLOCK);
596 wake_up(&mp->m_wait_single_sync_task);
603 struct xfs_mount *mp = arg;
605 xfs_sync_work_t *work, *n;
609 timeleft = xfs_syncd_centisecs * msecs_to_jiffies(10);
611 timeleft = schedule_timeout_interruptible(timeleft);
614 if (kthread_should_stop() && list_empty(&mp->m_sync_list))
617 spin_lock(&mp->m_sync_lock);
619 * We can get woken by laptop mode, to do a sync -
620 * that's the (only!) case where the list would be
621 * empty with time remaining.
623 if (!timeleft || list_empty(&mp->m_sync_list)) {
625 timeleft = xfs_syncd_centisecs *
626 msecs_to_jiffies(10);
627 INIT_LIST_HEAD(&mp->m_sync_work.w_list);
628 list_add_tail(&mp->m_sync_work.w_list,
631 list_for_each_entry_safe(work, n, &mp->m_sync_list, w_list)
632 list_move(&work->w_list, &tmp);
633 spin_unlock(&mp->m_sync_lock);
635 list_for_each_entry_safe(work, n, &tmp, w_list) {
636 (*work->w_syncer)(mp, work->w_data);
637 list_del(&work->w_list);
638 if (work == &mp->m_sync_work)
640 if (work->w_completion)
641 complete(work->w_completion);
651 struct xfs_mount *mp)
653 mp->m_sync_work.w_syncer = xfs_sync_worker;
654 mp->m_sync_work.w_mount = mp;
655 mp->m_sync_work.w_completion = NULL;
656 mp->m_sync_task = kthread_run(xfssyncd, mp, "xfssyncd");
657 if (IS_ERR(mp->m_sync_task))
658 return -PTR_ERR(mp->m_sync_task);
664 struct xfs_mount *mp)
666 kthread_stop(mp->m_sync_task);
670 __xfs_inode_set_reclaim_tag(
671 struct xfs_perag *pag,
672 struct xfs_inode *ip)
674 radix_tree_tag_set(&pag->pag_ici_root,
675 XFS_INO_TO_AGINO(ip->i_mount, ip->i_ino),
676 XFS_ICI_RECLAIM_TAG);
680 * We set the inode flag atomically with the radix tree tag.
681 * Once we get tag lookups on the radix tree, this inode flag
685 xfs_inode_set_reclaim_tag(
688 xfs_mount_t *mp = ip->i_mount;
689 xfs_perag_t *pag = xfs_get_perag(mp, ip->i_ino);
691 read_lock(&pag->pag_ici_lock);
692 spin_lock(&ip->i_flags_lock);
693 __xfs_inode_set_reclaim_tag(pag, ip);
694 __xfs_iflags_set(ip, XFS_IRECLAIMABLE);
695 spin_unlock(&ip->i_flags_lock);
696 read_unlock(&pag->pag_ici_lock);
697 xfs_put_perag(mp, pag);
701 __xfs_inode_clear_reclaim_tag(
706 radix_tree_tag_clear(&pag->pag_ici_root,
707 XFS_INO_TO_AGINO(mp, ip->i_ino), XFS_ICI_RECLAIM_TAG);
712 struct xfs_inode *ip,
713 struct xfs_perag *pag,
717 * The radix tree lock here protects a thread in xfs_iget from racing
718 * with us starting reclaim on the inode. Once we have the
719 * XFS_IRECLAIM flag set it will not touch us.
721 spin_lock(&ip->i_flags_lock);
722 ASSERT_ALWAYS(__xfs_iflags_test(ip, XFS_IRECLAIMABLE));
723 if (__xfs_iflags_test(ip, XFS_IRECLAIM)) {
724 /* ignore as it is already under reclaim */
725 spin_unlock(&ip->i_flags_lock);
726 write_unlock(&pag->pag_ici_lock);
729 __xfs_iflags_set(ip, XFS_IRECLAIM);
730 spin_unlock(&ip->i_flags_lock);
731 write_unlock(&pag->pag_ici_lock);
734 * If the inode is still dirty, then flush it out. If the inode
735 * is not in the AIL, then it will be OK to flush it delwri as
736 * long as xfs_iflush() does not keep any references to the inode.
737 * We leave that decision up to xfs_iflush() since it has the
738 * knowledge of whether it's OK to simply do a delwri flush of
739 * the inode or whether we need to wait until the inode is
740 * pulled from the AIL.
741 * We get the flush lock regardless, though, just to make sure
742 * we don't free it while it is being flushed.
744 xfs_ilock(ip, XFS_ILOCK_EXCL);
748 * In the case of a forced shutdown we rely on xfs_iflush() to
749 * wait for the inode to be unpinned before returning an error.
751 if (!is_bad_inode(VFS_I(ip)) && xfs_iflush(ip, sync_mode) == 0) {
752 /* synchronize with xfs_iflush_done */
757 xfs_iunlock(ip, XFS_ILOCK_EXCL);
767 return xfs_inode_ag_iterator(mp, xfs_reclaim_inode, mode,
768 XFS_ICI_RECLAIM_TAG, 1);