4 * Copyright (C) 2002, Linus Torvalds.
6 * Contains all the functions related to writing back and waiting
7 * upon dirty inodes against superblocks, and writing back dirty
8 * pages against inodes. ie: data writeback. Writeout of the
9 * inode itself is not handled here.
11 * 10Apr2002 Andrew Morton
12 * Split out of fs/inode.c
13 * Additions for address_space-based writeback
16 #include <linux/kernel.h>
17 #include <linux/module.h>
18 #include <linux/spinlock.h>
19 #include <linux/sched.h>
22 #include <linux/writeback.h>
23 #include <linux/blkdev.h>
24 #include <linux/backing-dev.h>
25 #include <linux/buffer_head.h>
28 #define inode_to_bdi(inode) ((inode)->i_mapping->backing_dev_info)
31 * writeback_acquire - attempt to get exclusive writeback access to a device
32 * @bdi: the device's backing_dev_info structure
34 * It is a waste of resources to have more than one pdflush thread blocked on
35 * a single request queue. Exclusion at the request_queue level is obtained
36 * via a flag in the request_queue's backing_dev_info.state.
38 * Non-request_queue-backed address_spaces will share default_backing_dev_info,
39 * unless they implement their own. Which is somewhat inefficient, as this
40 * may prevent concurrent writeback against multiple devices.
42 static int writeback_acquire(struct backing_dev_info *bdi)
44 return !test_and_set_bit(BDI_pdflush, &bdi->state);
48 * writeback_in_progress - determine whether there is writeback in progress
49 * @bdi: the device's backing_dev_info structure.
51 * Determine whether there is writeback in progress against a backing device.
53 int writeback_in_progress(struct backing_dev_info *bdi)
55 return test_bit(BDI_pdflush, &bdi->state);
59 * writeback_release - relinquish exclusive writeback access against a device.
60 * @bdi: the device's backing_dev_info structure
62 static void writeback_release(struct backing_dev_info *bdi)
64 BUG_ON(!writeback_in_progress(bdi));
65 clear_bit(BDI_pdflush, &bdi->state);
68 static noinline void block_dump___mark_inode_dirty(struct inode *inode)
70 if (inode->i_ino || strcmp(inode->i_sb->s_id, "bdev")) {
71 struct dentry *dentry;
72 const char *name = "?";
74 dentry = d_find_alias(inode);
76 spin_lock(&dentry->d_lock);
77 name = (const char *) dentry->d_name.name;
80 "%s(%d): dirtied inode %lu (%s) on %s\n",
81 current->comm, task_pid_nr(current), inode->i_ino,
82 name, inode->i_sb->s_id);
84 spin_unlock(&dentry->d_lock);
91 * __mark_inode_dirty - internal function
92 * @inode: inode to mark
93 * @flags: what kind of dirty (i.e. I_DIRTY_SYNC)
94 * Mark an inode as dirty. Callers should use mark_inode_dirty or
95 * mark_inode_dirty_sync.
97 * Put the inode on the super block's dirty list.
99 * CAREFUL! We mark it dirty unconditionally, but move it onto the
100 * dirty list only if it is hashed or if it refers to a blockdev.
101 * If it was not hashed, it will never be added to the dirty list
102 * even if it is later hashed, as it will have been marked dirty already.
104 * In short, make sure you hash any inodes _before_ you start marking
107 * This function *must* be atomic for the I_DIRTY_PAGES case -
108 * set_page_dirty() is called under spinlock in several places.
110 * Note that for blockdevs, inode->dirtied_when represents the dirtying time of
111 * the block-special inode (/dev/hda1) itself. And the ->dirtied_when field of
112 * the kernel-internal blockdev inode represents the dirtying time of the
113 * blockdev's pages. This is why for I_DIRTY_PAGES we always use
114 * page->mapping->host, so the page-dirtying time is recorded in the internal
117 void __mark_inode_dirty(struct inode *inode, int flags)
119 struct super_block *sb = inode->i_sb;
122 * Don't do this for I_DIRTY_PAGES - that doesn't actually
123 * dirty the inode itself
125 if (flags & (I_DIRTY_SYNC | I_DIRTY_DATASYNC)) {
126 if (sb->s_op->dirty_inode)
127 sb->s_op->dirty_inode(inode);
131 * make sure that changes are seen by all cpus before we test i_state
136 /* avoid the locking if we can */
137 if ((inode->i_state & flags) == flags)
140 if (unlikely(block_dump))
141 block_dump___mark_inode_dirty(inode);
143 spin_lock(&inode_lock);
144 if ((inode->i_state & flags) != flags) {
145 const int was_dirty = inode->i_state & I_DIRTY;
147 inode->i_state |= flags;
150 * If the inode is being synced, just update its dirty state.
151 * The unlocker will place the inode on the appropriate
152 * superblock list, based upon its state.
154 if (inode->i_state & I_SYNC)
158 * Only add valid (hashed) inodes to the superblock's
159 * dirty list. Add blockdev inodes as well.
161 if (!S_ISBLK(inode->i_mode)) {
162 if (hlist_unhashed(&inode->i_hash))
165 if (inode->i_state & (I_FREEING|I_CLEAR))
169 * If the inode was already on b_dirty/b_io/b_more_io, don't
170 * reposition it (that would break b_dirty time-ordering).
173 inode->dirtied_when = jiffies;
174 list_move(&inode->i_list,
175 &inode_to_bdi(inode)->b_dirty);
179 spin_unlock(&inode_lock);
182 EXPORT_SYMBOL(__mark_inode_dirty);
184 static int write_inode(struct inode *inode, int sync)
186 if (inode->i_sb->s_op->write_inode && !is_bad_inode(inode))
187 return inode->i_sb->s_op->write_inode(inode, sync);
192 * Redirty an inode: set its when-it-was dirtied timestamp and move it to the
193 * furthest end of its superblock's dirty-inode list.
195 * Before stamping the inode's ->dirtied_when, we check to see whether it is
196 * already the most-recently-dirtied inode on the b_dirty list. If that is
197 * the case then the inode must have been redirtied while it was being written
198 * out and we don't reset its dirtied_when.
200 static void redirty_tail(struct inode *inode)
202 struct backing_dev_info *bdi = inode_to_bdi(inode);
204 if (!list_empty(&bdi->b_dirty)) {
207 tail = list_entry(bdi->b_dirty.next, struct inode, i_list);
208 if (time_before(inode->dirtied_when, tail->dirtied_when))
209 inode->dirtied_when = jiffies;
211 list_move(&inode->i_list, &bdi->b_dirty);
215 * requeue inode for re-scanning after bdi->b_io list is exhausted.
217 static void requeue_io(struct inode *inode)
219 list_move(&inode->i_list, &inode_to_bdi(inode)->b_more_io);
222 static void inode_sync_complete(struct inode *inode)
225 * Prevent speculative execution through spin_unlock(&inode_lock);
228 wake_up_bit(&inode->i_state, __I_SYNC);
231 static bool inode_dirtied_after(struct inode *inode, unsigned long t)
233 bool ret = time_after(inode->dirtied_when, t);
236 * For inodes being constantly redirtied, dirtied_when can get stuck.
237 * It _appears_ to be in the future, but is actually in distant past.
238 * This test is necessary to prevent such wrapped-around relative times
239 * from permanently stopping the whole pdflush writeback.
241 ret = ret && time_before_eq(inode->dirtied_when, jiffies);
247 * Move expired dirty inodes from @delaying_queue to @dispatch_queue.
249 static void move_expired_inodes(struct list_head *delaying_queue,
250 struct list_head *dispatch_queue,
251 unsigned long *older_than_this)
253 while (!list_empty(delaying_queue)) {
254 struct inode *inode = list_entry(delaying_queue->prev,
255 struct inode, i_list);
256 if (older_than_this &&
257 inode_dirtied_after(inode, *older_than_this))
259 list_move(&inode->i_list, dispatch_queue);
264 * Queue all expired dirty inodes for io, eldest first.
266 static void queue_io(struct backing_dev_info *bdi,
267 unsigned long *older_than_this)
269 list_splice_init(&bdi->b_more_io, bdi->b_io.prev);
270 move_expired_inodes(&bdi->b_dirty, &bdi->b_io, older_than_this);
273 static int sb_on_inode_list(struct super_block *sb, struct list_head *list)
278 spin_lock(&inode_lock);
279 list_for_each_entry(inode, list, i_list) {
280 if (inode->i_sb == sb) {
285 spin_unlock(&inode_lock);
289 int sb_has_dirty_inodes(struct super_block *sb)
291 struct backing_dev_info *bdi;
295 * This is REALLY expensive right now, but it'll go away
296 * when the bdi writeback is introduced
298 mutex_lock(&bdi_lock);
299 list_for_each_entry(bdi, &bdi_list, bdi_list) {
300 if (sb_on_inode_list(sb, &bdi->b_dirty) ||
301 sb_on_inode_list(sb, &bdi->b_io) ||
302 sb_on_inode_list(sb, &bdi->b_more_io)) {
307 mutex_unlock(&bdi_lock);
311 EXPORT_SYMBOL(sb_has_dirty_inodes);
314 * Wait for writeback on an inode to complete.
316 static void inode_wait_for_writeback(struct inode *inode)
318 DEFINE_WAIT_BIT(wq, &inode->i_state, __I_SYNC);
319 wait_queue_head_t *wqh;
321 wqh = bit_waitqueue(&inode->i_state, __I_SYNC);
323 spin_unlock(&inode_lock);
324 __wait_on_bit(wqh, &wq, inode_wait, TASK_UNINTERRUPTIBLE);
325 spin_lock(&inode_lock);
326 } while (inode->i_state & I_SYNC);
330 * Write out an inode's dirty pages. Called under inode_lock. Either the
331 * caller has ref on the inode (either via __iget or via syscall against an fd)
332 * or the inode has I_WILL_FREE set (via generic_forget_inode)
334 * If `wait' is set, wait on the writeout.
336 * The whole writeout design is quite complex and fragile. We want to avoid
337 * starvation of particular inodes when others are being redirtied, prevent
340 * Called under inode_lock.
343 writeback_single_inode(struct inode *inode, struct writeback_control *wbc)
345 struct address_space *mapping = inode->i_mapping;
346 int wait = wbc->sync_mode == WB_SYNC_ALL;
350 if (!atomic_read(&inode->i_count))
351 WARN_ON(!(inode->i_state & (I_WILL_FREE|I_FREEING)));
353 WARN_ON(inode->i_state & I_WILL_FREE);
355 if (inode->i_state & I_SYNC) {
357 * If this inode is locked for writeback and we are not doing
358 * writeback-for-data-integrity, move it to b_more_io so that
359 * writeback can proceed with the other inodes on s_io.
361 * We'll have another go at writing back this inode when we
362 * completed a full scan of b_io.
370 * It's a data-integrity sync. We must wait.
372 inode_wait_for_writeback(inode);
375 BUG_ON(inode->i_state & I_SYNC);
377 /* Set I_SYNC, reset I_DIRTY */
378 dirty = inode->i_state & I_DIRTY;
379 inode->i_state |= I_SYNC;
380 inode->i_state &= ~I_DIRTY;
382 spin_unlock(&inode_lock);
384 ret = do_writepages(mapping, wbc);
386 /* Don't write the inode if only I_DIRTY_PAGES was set */
387 if (dirty & (I_DIRTY_SYNC | I_DIRTY_DATASYNC)) {
388 int err = write_inode(inode, wait);
394 int err = filemap_fdatawait(mapping);
399 spin_lock(&inode_lock);
400 inode->i_state &= ~I_SYNC;
401 if (!(inode->i_state & (I_FREEING | I_CLEAR))) {
402 if (!(inode->i_state & I_DIRTY) &&
403 mapping_tagged(mapping, PAGECACHE_TAG_DIRTY)) {
405 * We didn't write back all the pages. nfs_writepages()
406 * sometimes bales out without doing anything. Redirty
407 * the inode; Move it from b_io onto b_more_io/b_dirty.
410 * akpm: if the caller was the kupdate function we put
411 * this inode at the head of b_dirty so it gets first
412 * consideration. Otherwise, move it to the tail, for
413 * the reasons described there. I'm not really sure
414 * how much sense this makes. Presumably I had a good
415 * reasons for doing it this way, and I'd rather not
416 * muck with it at present.
418 if (wbc->for_kupdate) {
420 * For the kupdate function we move the inode
421 * to b_more_io so it will get more writeout as
422 * soon as the queue becomes uncongested.
424 inode->i_state |= I_DIRTY_PAGES;
425 if (wbc->nr_to_write <= 0) {
427 * slice used up: queue for next turn
432 * somehow blocked: retry later
438 * Otherwise fully redirty the inode so that
439 * other inodes on this superblock will get some
440 * writeout. Otherwise heavy writing to one
441 * file would indefinitely suspend writeout of
442 * all the other files.
444 inode->i_state |= I_DIRTY_PAGES;
447 } else if (inode->i_state & I_DIRTY) {
449 * Someone redirtied the inode while were writing back
453 } else if (atomic_read(&inode->i_count)) {
455 * The inode is clean, inuse
457 list_move(&inode->i_list, &inode_in_use);
460 * The inode is clean, unused
462 list_move(&inode->i_list, &inode_unused);
465 inode_sync_complete(inode);
469 static void generic_sync_bdi_inodes(struct backing_dev_info *bdi,
470 struct writeback_control *wbc,
471 struct super_block *sb)
473 const int is_blkdev_sb = sb_is_blkdev_sb(sb);
474 const unsigned long start = jiffies; /* livelock avoidance */
476 spin_lock(&inode_lock);
478 if (!wbc->for_kupdate || list_empty(&bdi->b_io))
479 queue_io(bdi, wbc->older_than_this);
481 while (!list_empty(&bdi->b_io)) {
482 struct inode *inode = list_entry(bdi->b_io.prev,
483 struct inode, i_list);
487 * super block given and doesn't match, skip this inode
489 if (sb && sb != inode->i_sb) {
494 if (!bdi_cap_writeback_dirty(bdi)) {
498 * Dirty memory-backed blockdev: the ramdisk
499 * driver does this. Skip just this inode
504 * Dirty memory-backed inode against a filesystem other
505 * than the kernel-internal bdev filesystem. Skip the
511 if (inode->i_state & (I_NEW | I_WILL_FREE)) {
516 if (wbc->nonblocking && bdi_write_congested(bdi)) {
517 wbc->encountered_congestion = 1;
519 break; /* Skip a congested fs */
521 continue; /* Skip a congested blockdev */
524 if (wbc->bdi && bdi != wbc->bdi) {
526 break; /* fs has the wrong queue */
528 continue; /* blockdev has wrong queue */
532 * Was this inode dirtied after sync_sb_inodes was called?
533 * This keeps sync from extra jobs and livelock.
535 if (inode_dirtied_after(inode, start))
538 /* Is another pdflush already flushing this queue? */
539 if (current_is_pdflush() && !writeback_acquire(bdi))
542 BUG_ON(inode->i_state & (I_FREEING | I_CLEAR));
544 pages_skipped = wbc->pages_skipped;
545 writeback_single_inode(inode, wbc);
546 if (current_is_pdflush())
547 writeback_release(bdi);
548 if (wbc->pages_skipped != pages_skipped) {
550 * writeback is not making progress due to locked
551 * buffers. Skip this inode for now.
555 spin_unlock(&inode_lock);
558 spin_lock(&inode_lock);
559 if (wbc->nr_to_write <= 0) {
563 if (!list_empty(&bdi->b_more_io))
567 spin_unlock(&inode_lock);
568 /* Leave any unwritten inodes on b_io */
572 * Write out a superblock's list of dirty inodes. A wait will be performed
573 * upon no inodes, all inodes or the final one, depending upon sync_mode.
575 * If older_than_this is non-NULL, then only write out inodes which
576 * had their first dirtying at a time earlier than *older_than_this.
578 * If we're a pdlfush thread, then implement pdflush collision avoidance
579 * against the entire list.
581 * If `bdi' is non-zero then we're being asked to writeback a specific queue.
582 * This function assumes that the blockdev superblock's inodes are backed by
583 * a variety of queues, so all inodes are searched. For other superblocks,
584 * assume that all inodes are backed by the same queue.
586 * FIXME: this linear search could get expensive with many fileystems. But
587 * how to fix? We need to go from an address_space to all inodes which share
588 * a queue with that address_space. (Easy: have a global "dirty superblocks"
591 * The inodes to be written are parked on bdi->b_io. They are moved back onto
592 * bdi->b_dirty as they are selected for writing. This way, none can be missed
593 * on the writer throttling path, and we get decent balancing between many
594 * throttled threads: we don't want them all piling up on inode_sync_wait.
596 static void generic_sync_sb_inodes(struct super_block *sb,
597 struct writeback_control *wbc)
599 struct backing_dev_info *bdi;
602 mutex_lock(&bdi_lock);
603 list_for_each_entry(bdi, &bdi_list, bdi_list)
604 generic_sync_bdi_inodes(bdi, wbc, sb);
605 mutex_unlock(&bdi_lock);
607 generic_sync_bdi_inodes(wbc->bdi, wbc, sb);
609 if (wbc->sync_mode == WB_SYNC_ALL) {
610 struct inode *inode, *old_inode = NULL;
612 spin_lock(&inode_lock);
615 * Data integrity sync. Must wait for all pages under writeback,
616 * because there may have been pages dirtied before our sync
617 * call, but which had writeout started before we write it out.
618 * In which case, the inode may not be on the dirty list, but
619 * we still have to wait for that writeout.
621 list_for_each_entry(inode, &sb->s_inodes, i_sb_list) {
622 struct address_space *mapping;
625 (I_FREEING|I_CLEAR|I_WILL_FREE|I_NEW))
627 mapping = inode->i_mapping;
628 if (mapping->nrpages == 0)
631 spin_unlock(&inode_lock);
633 * We hold a reference to 'inode' so it couldn't have
634 * been removed from s_inodes list while we dropped the
635 * inode_lock. We cannot iput the inode now as we can
636 * be holding the last reference and we cannot iput it
637 * under inode_lock. So we keep the reference and iput
643 filemap_fdatawait(mapping);
647 spin_lock(&inode_lock);
649 spin_unlock(&inode_lock);
655 * Start writeback of dirty pagecache data against all unlocked inodes.
658 * We don't need to grab a reference to superblock here. If it has non-empty
659 * ->b_dirty it's hadn't been killed yet and kill_super() won't proceed
660 * past sync_inodes_sb() until the ->b_dirty/b_io/b_more_io lists are all
661 * empty. Since __sync_single_inode() regains inode_lock before it finally moves
662 * inode from superblock lists we are OK.
664 * If `older_than_this' is non-zero then only flush inodes which have a
665 * flushtime older than *older_than_this.
667 * If `bdi' is non-zero then we will scan the first inode against each
668 * superblock until we find the matching ones. One group will be the dirty
669 * inodes against a filesystem. Then when we hit the dummy blockdev superblock,
670 * sync_sb_inodes will seekout the blockdev which matches `bdi'. Maybe not
671 * super-efficient but we're about to do a ton of I/O...
674 writeback_inodes(struct writeback_control *wbc)
676 struct super_block *sb;
681 list_for_each_entry_reverse(sb, &super_blocks, s_list) {
682 if (sb_has_dirty_inodes(sb)) {
683 /* we're making our own get_super here */
685 spin_unlock(&sb_lock);
687 * If we can't get the readlock, there's no sense in
688 * waiting around, most of the time the FS is going to
689 * be unmounted by the time it is released.
691 if (down_read_trylock(&sb->s_umount)) {
693 generic_sync_sb_inodes(sb, wbc);
694 up_read(&sb->s_umount);
697 if (__put_super_and_need_restart(sb))
700 if (wbc->nr_to_write <= 0)
703 spin_unlock(&sb_lock);
707 * writeback_inodes_sb - writeback dirty inodes from given super_block
708 * @sb: the superblock
710 * Start writeback on some inodes on this super_block. No guarantees are made
711 * on how many (if any) will be written, and this function does not wait
712 * for IO completion of submitted IO. The number of pages submitted is
715 long writeback_inodes_sb(struct super_block *sb)
717 struct writeback_control wbc = {
718 .sync_mode = WB_SYNC_NONE,
720 .range_end = LLONG_MAX,
722 unsigned long nr_dirty = global_page_state(NR_FILE_DIRTY);
723 unsigned long nr_unstable = global_page_state(NR_UNSTABLE_NFS);
726 nr_to_write = nr_dirty + nr_unstable +
727 (inodes_stat.nr_inodes - inodes_stat.nr_unused);
729 wbc.nr_to_write = nr_to_write;
730 generic_sync_sb_inodes(sb, &wbc);
731 return nr_to_write - wbc.nr_to_write;
733 EXPORT_SYMBOL(writeback_inodes_sb);
736 * sync_inodes_sb - sync sb inode pages
737 * @sb: the superblock
739 * This function writes and waits on any dirty inode belonging to this
740 * super_block. The number of pages synced is returned.
742 long sync_inodes_sb(struct super_block *sb)
744 struct writeback_control wbc = {
745 .sync_mode = WB_SYNC_ALL,
747 .range_end = LLONG_MAX,
749 long nr_to_write = LONG_MAX; /* doesn't actually matter */
751 wbc.nr_to_write = nr_to_write;
752 generic_sync_sb_inodes(sb, &wbc);
753 return nr_to_write - wbc.nr_to_write;
755 EXPORT_SYMBOL(sync_inodes_sb);
758 * write_inode_now - write an inode to disk
759 * @inode: inode to write to disk
760 * @sync: whether the write should be synchronous or not
762 * This function commits an inode to disk immediately if it is dirty. This is
763 * primarily needed by knfsd.
765 * The caller must either have a ref on the inode or must have set I_WILL_FREE.
767 int write_inode_now(struct inode *inode, int sync)
770 struct writeback_control wbc = {
771 .nr_to_write = LONG_MAX,
772 .sync_mode = sync ? WB_SYNC_ALL : WB_SYNC_NONE,
774 .range_end = LLONG_MAX,
777 if (!mapping_cap_writeback_dirty(inode->i_mapping))
781 spin_lock(&inode_lock);
782 ret = writeback_single_inode(inode, &wbc);
783 spin_unlock(&inode_lock);
785 inode_sync_wait(inode);
788 EXPORT_SYMBOL(write_inode_now);
791 * sync_inode - write an inode and its pages to disk.
792 * @inode: the inode to sync
793 * @wbc: controls the writeback mode
795 * sync_inode() will write an inode and its pages to disk. It will also
796 * correctly update the inode on its superblock's dirty inode lists and will
797 * update inode->i_state.
799 * The caller must have a ref on the inode.
801 int sync_inode(struct inode *inode, struct writeback_control *wbc)
805 spin_lock(&inode_lock);
806 ret = writeback_single_inode(inode, wbc);
807 spin_unlock(&inode_lock);
810 EXPORT_SYMBOL(sync_inode);
813 * generic_osync_inode - flush all dirty data for a given inode to disk
814 * @inode: inode to write
815 * @mapping: the address_space that should be flushed
816 * @what: what to write and wait upon
818 * This can be called by file_write functions for files which have the
819 * O_SYNC flag set, to flush dirty writes to disk.
821 * @what is a bitmask, specifying which part of the inode's data should be
822 * written and waited upon.
824 * OSYNC_DATA: i_mapping's dirty data
825 * OSYNC_METADATA: the buffers at i_mapping->private_list
826 * OSYNC_INODE: the inode itself
829 int generic_osync_inode(struct inode *inode, struct address_space *mapping, int what)
832 int need_write_inode_now = 0;
835 if (what & OSYNC_DATA)
836 err = filemap_fdatawrite(mapping);
837 if (what & (OSYNC_METADATA|OSYNC_DATA)) {
838 err2 = sync_mapping_buffers(mapping);
842 if (what & OSYNC_DATA) {
843 err2 = filemap_fdatawait(mapping);
848 spin_lock(&inode_lock);
849 if ((inode->i_state & I_DIRTY) &&
850 ((what & OSYNC_INODE) || (inode->i_state & I_DIRTY_DATASYNC)))
851 need_write_inode_now = 1;
852 spin_unlock(&inode_lock);
854 if (need_write_inode_now) {
855 err2 = write_inode_now(inode, 1);
860 inode_sync_wait(inode);
864 EXPORT_SYMBOL(generic_osync_inode);