*/
#include <linux/kernel.h>
+#include <linux/module.h>
#include <linux/spinlock.h>
#include <linux/sched.h>
#include <linux/fs.h>
#include <linux/blkdev.h>
#include <linux/backing-dev.h>
#include <linux/buffer_head.h>
+#include "internal.h"
-extern struct super_block *blockdev_superblock;
+
+/**
+ * writeback_acquire - attempt to get exclusive writeback access to a device
+ * @bdi: the device's backing_dev_info structure
+ *
+ * It is a waste of resources to have more than one pdflush thread blocked on
+ * a single request queue. Exclusion at the request_queue level is obtained
+ * via a flag in the request_queue's backing_dev_info.state.
+ *
+ * Non-request_queue-backed address_spaces will share default_backing_dev_info,
+ * unless they implement their own. Which is somewhat inefficient, as this
+ * may prevent concurrent writeback against multiple devices.
+ */
+static int writeback_acquire(struct backing_dev_info *bdi)
+{
+ return !test_and_set_bit(BDI_pdflush, &bdi->state);
+}
+
+/**
+ * writeback_in_progress - determine whether there is writeback in progress
+ * @bdi: the device's backing_dev_info structure.
+ *
+ * Determine whether there is writeback in progress against a backing device.
+ */
+int writeback_in_progress(struct backing_dev_info *bdi)
+{
+ return test_bit(BDI_pdflush, &bdi->state);
+}
+
+/**
+ * writeback_release - relinquish exclusive writeback access against a device.
+ * @bdi: the device's backing_dev_info structure
+ */
+static void writeback_release(struct backing_dev_info *bdi)
+{
+ BUG_ON(!writeback_in_progress(bdi));
+ clear_bit(BDI_pdflush, &bdi->state);
+}
/**
* __mark_inode_dirty - internal function
if (inode->i_ino || strcmp(inode->i_sb->s_id, "bdev"))
printk(KERN_DEBUG
"%s(%d): dirtied inode %lu (%s) on %s\n",
- current->comm, current->pid, inode->i_ino,
+ current->comm, task_pid_nr(current), inode->i_ino,
name, inode->i_sb->s_id);
}
inode->i_state |= flags;
/*
- * If the inode is locked, just update its dirty state.
+ * If the inode is being synced, just update its dirty state.
* The unlocker will place the inode on the appropriate
* superblock list, based upon its state.
*/
- if (inode->i_state & I_LOCK)
+ if (inode->i_state & I_SYNC)
goto out;
/*
goto out;
/*
- * If the inode was already on s_dirty or s_io, don't
+ * If the inode was already on s_dirty/s_io/s_more_io, don't
* reposition it (that would break s_dirty time-ordering).
*/
if (!was_dirty) {
}
/*
+ * Redirty an inode: set its when-it-was dirtied timestamp and move it to the
+ * furthest end of its superblock's dirty-inode list.
+ *
+ * Before stamping the inode's ->dirtied_when, we check to see whether it is
+ * already the most-recently-dirtied inode on the s_dirty list. If that is
+ * the case then the inode must have been redirtied while it was being written
+ * out and we don't reset its dirtied_when.
+ */
+static void redirty_tail(struct inode *inode)
+{
+ struct super_block *sb = inode->i_sb;
+
+ if (!list_empty(&sb->s_dirty)) {
+ struct inode *tail_inode;
+
+ tail_inode = list_entry(sb->s_dirty.next, struct inode, i_list);
+ if (!time_after_eq(inode->dirtied_when,
+ tail_inode->dirtied_when))
+ inode->dirtied_when = jiffies;
+ }
+ list_move(&inode->i_list, &sb->s_dirty);
+}
+
+/*
+ * requeue inode for re-scanning after sb->s_io list is exhausted.
+ */
+static void requeue_io(struct inode *inode)
+{
+ list_move(&inode->i_list, &inode->i_sb->s_more_io);
+}
+
+static void inode_sync_complete(struct inode *inode)
+{
+ /*
+ * Prevent speculative execution through spin_unlock(&inode_lock);
+ */
+ smp_mb();
+ wake_up_bit(&inode->i_state, __I_SYNC);
+}
+
+/*
+ * Move expired dirty inodes from @delaying_queue to @dispatch_queue.
+ */
+static void move_expired_inodes(struct list_head *delaying_queue,
+ struct list_head *dispatch_queue,
+ unsigned long *older_than_this)
+{
+ while (!list_empty(delaying_queue)) {
+ struct inode *inode = list_entry(delaying_queue->prev,
+ struct inode, i_list);
+ if (older_than_this &&
+ time_after(inode->dirtied_when, *older_than_this))
+ break;
+ list_move(&inode->i_list, dispatch_queue);
+ }
+}
+
+/*
+ * Queue all expired dirty inodes for io, eldest first.
+ */
+static void queue_io(struct super_block *sb,
+ unsigned long *older_than_this)
+{
+ list_splice_init(&sb->s_more_io, sb->s_io.prev);
+ move_expired_inodes(&sb->s_dirty, &sb->s_io, older_than_this);
+}
+
+int sb_has_dirty_inodes(struct super_block *sb)
+{
+ return !list_empty(&sb->s_dirty) ||
+ !list_empty(&sb->s_io) ||
+ !list_empty(&sb->s_more_io);
+}
+EXPORT_SYMBOL(sb_has_dirty_inodes);
+
+/*
* Write a single inode's dirty pages and inode data out to disk.
* If `wait' is set, wait on the writeout.
*
{
unsigned dirty;
struct address_space *mapping = inode->i_mapping;
- struct super_block *sb = inode->i_sb;
int wait = wbc->sync_mode == WB_SYNC_ALL;
int ret;
- BUG_ON(inode->i_state & I_LOCK);
+ BUG_ON(inode->i_state & I_SYNC);
- /* Set I_LOCK, reset I_DIRTY */
+ /* Set I_SYNC, reset I_DIRTY */
dirty = inode->i_state & I_DIRTY;
- inode->i_state |= I_LOCK;
+ inode->i_state |= I_SYNC;
inode->i_state &= ~I_DIRTY;
spin_unlock(&inode_lock);
}
spin_lock(&inode_lock);
- inode->i_state &= ~I_LOCK;
+ inode->i_state &= ~I_SYNC;
if (!(inode->i_state & I_FREEING)) {
if (!(inode->i_state & I_DIRTY) &&
mapping_tagged(mapping, PAGECACHE_TAG_DIRTY)) {
/*
* We didn't write back all the pages. nfs_writepages()
* sometimes bales out without doing anything. Redirty
- * the inode. It is still on sb->s_io.
+ * the inode; Move it from s_io onto s_more_io/s_dirty.
+ */
+ /*
+ * akpm: if the caller was the kupdate function we put
+ * this inode at the head of s_dirty so it gets first
+ * consideration. Otherwise, move it to the tail, for
+ * the reasons described there. I'm not really sure
+ * how much sense this makes. Presumably I had a good
+ * reasons for doing it this way, and I'd rather not
+ * muck with it at present.
*/
if (wbc->for_kupdate) {
/*
- * For the kupdate function we leave the inode
- * at the head of sb_dirty so it will get more
- * writeout as soon as the queue becomes
- * uncongested.
+ * For the kupdate function we move the inode
+ * to s_more_io so it will get more writeout as
+ * soon as the queue becomes uncongested.
*/
inode->i_state |= I_DIRTY_PAGES;
- list_move_tail(&inode->i_list, &sb->s_dirty);
+ if (wbc->nr_to_write <= 0) {
+ /*
+ * slice used up: queue for next turn
+ */
+ requeue_io(inode);
+ } else {
+ /*
+ * somehow blocked: retry later
+ */
+ redirty_tail(inode);
+ }
} else {
/*
* Otherwise fully redirty the inode so that
* all the other files.
*/
inode->i_state |= I_DIRTY_PAGES;
- inode->dirtied_when = jiffies;
- list_move(&inode->i_list, &sb->s_dirty);
+ redirty_tail(inode);
}
} else if (inode->i_state & I_DIRTY) {
/*
* Someone redirtied the inode while were writing back
* the pages.
*/
- list_move(&inode->i_list, &sb->s_dirty);
+ redirty_tail(inode);
} else if (atomic_read(&inode->i_count)) {
/*
* The inode is clean, inuse
list_move(&inode->i_list, &inode_unused);
}
}
- wake_up_inode(inode);
+ inode_sync_complete(inode);
return ret;
}
else
WARN_ON(inode->i_state & I_WILL_FREE);
- if ((wbc->sync_mode != WB_SYNC_ALL) && (inode->i_state & I_LOCK)) {
- list_move(&inode->i_list, &inode->i_sb->s_dirty);
+ if ((wbc->sync_mode != WB_SYNC_ALL) && (inode->i_state & I_SYNC)) {
+ /*
+ * We're skipping this inode because it's locked, and we're not
+ * doing writeback-for-data-integrity. Move it to s_more_io so
+ * that writeback can proceed with the other inodes on s_io.
+ * We'll have another go at writing back this inode when we
+ * completed a full scan of s_io.
+ */
+ requeue_io(inode);
return 0;
}
/*
* It's a data-integrity sync. We must wait.
*/
- if (inode->i_state & I_LOCK) {
- DEFINE_WAIT_BIT(wq, &inode->i_state, __I_LOCK);
+ if (inode->i_state & I_SYNC) {
+ DEFINE_WAIT_BIT(wq, &inode->i_state, __I_SYNC);
- wqh = bit_waitqueue(&inode->i_state, __I_LOCK);
+ wqh = bit_waitqueue(&inode->i_state, __I_SYNC);
do {
spin_unlock(&inode_lock);
__wait_on_bit(wqh, &wq, inode_wait,
TASK_UNINTERRUPTIBLE);
spin_lock(&inode_lock);
- } while (inode->i_state & I_LOCK);
+ } while (inode->i_state & I_SYNC);
}
return __sync_single_inode(inode, wbc);
}
* The inodes to be written are parked on sb->s_io. They are moved back onto
* sb->s_dirty as they are selected for writing. This way, none can be missed
* on the writer throttling path, and we get decent balancing between many
- * throttled threads: we don't want them all piling up on __wait_on_inode.
+ * throttled threads: we don't want them all piling up on inode_sync_wait.
*/
static void
sync_sb_inodes(struct super_block *sb, struct writeback_control *wbc)
const unsigned long start = jiffies; /* livelock avoidance */
if (!wbc->for_kupdate || list_empty(&sb->s_io))
- list_splice_init(&sb->s_dirty, &sb->s_io);
+ queue_io(sb, wbc->older_than_this);
while (!list_empty(&sb->s_io)) {
struct inode *inode = list_entry(sb->s_io.prev,
long pages_skipped;
if (!bdi_cap_writeback_dirty(bdi)) {
- list_move(&inode->i_list, &sb->s_dirty);
- if (sb == blockdev_superblock) {
+ redirty_tail(inode);
+ if (sb_is_blkdev_sb(sb)) {
/*
* Dirty memory-backed blockdev: the ramdisk
* driver does this. Skip just this inode
if (wbc->nonblocking && bdi_write_congested(bdi)) {
wbc->encountered_congestion = 1;
- if (sb != blockdev_superblock)
+ if (!sb_is_blkdev_sb(sb))
break; /* Skip a congested fs */
- list_move(&inode->i_list, &sb->s_dirty);
+ requeue_io(inode);
continue; /* Skip a congested blockdev */
}
if (wbc->bdi && bdi != wbc->bdi) {
- if (sb != blockdev_superblock)
+ if (!sb_is_blkdev_sb(sb))
break; /* fs has the wrong queue */
- list_move(&inode->i_list, &sb->s_dirty);
+ requeue_io(inode);
continue; /* blockdev has wrong queue */
}
if (time_after(inode->dirtied_when, start))
break;
- /* Was this inode dirtied too recently? */
- if (wbc->older_than_this && time_after(inode->dirtied_when,
- *wbc->older_than_this))
- break;
-
/* Is another pdflush already flushing this queue? */
if (current_is_pdflush() && !writeback_acquire(bdi))
break;
* writeback is not making progress due to locked
* buffers. Skip this inode for now.
*/
- list_move(&inode->i_list, &sb->s_dirty);
+ redirty_tail(inode);
}
spin_unlock(&inode_lock);
- cond_resched();
iput(inode);
+ cond_resched();
spin_lock(&inode_lock);
- if (wbc->nr_to_write <= 0)
+ if (wbc->nr_to_write <= 0) {
+ wbc->more_io = 1;
break;
+ }
+ if (!list_empty(&sb->s_more_io))
+ wbc->more_io = 1;
}
return; /* Leave any unwritten inodes on s_io */
}
* Note:
* We don't need to grab a reference to superblock here. If it has non-empty
* ->s_dirty it's hadn't been killed yet and kill_super() won't proceed
- * past sync_inodes_sb() until both the ->s_dirty and ->s_io lists are
+ * past sync_inodes_sb() until the ->s_dirty/s_io/s_more_io lists are all
* empty. Since __sync_single_inode() regains inode_lock before it finally moves
* inode from superblock lists we are OK.
*
might_sleep();
spin_lock(&sb_lock);
restart:
- sb = sb_entry(super_blocks.prev);
- for (; sb != sb_entry(&super_blocks); sb = sb_entry(sb->s_list.prev)) {
- if (!list_empty(&sb->s_dirty) || !list_empty(&sb->s_io)) {
+ list_for_each_entry_reverse(sb, &super_blocks, s_list) {
+ if (sb_has_dirty_inodes(sb)) {
/* we're making our own get_super here */
sb->s_count++;
spin_unlock(&sb_lock);
{
struct writeback_control wbc = {
.sync_mode = wait ? WB_SYNC_ALL : WB_SYNC_HOLD,
+ .range_start = 0,
+ .range_end = LLONG_MAX,
};
- unsigned long nr_dirty = read_page_state(nr_dirty);
- unsigned long nr_unstable = read_page_state(nr_unstable);
+ unsigned long nr_dirty = global_page_state(NR_FILE_DIRTY);
+ unsigned long nr_unstable = global_page_state(NR_UNSTABLE_NFS);
wbc.nr_to_write = nr_dirty + nr_unstable +
(inodes_stat.nr_inodes - inodes_stat.nr_unused) +
{
struct super_block *sb;
spin_lock(&sb_lock);
- sb = sb_entry(super_blocks.prev);
- for (; sb != sb_entry(&super_blocks); sb = sb_entry(sb->s_list.prev)) {
+ list_for_each_entry_reverse(sb, &super_blocks, s_list)
sb->s_syncing = val;
- }
spin_unlock(&sb_lock);
}
int ret;
struct writeback_control wbc = {
.nr_to_write = LONG_MAX,
- .sync_mode = WB_SYNC_ALL,
+ .sync_mode = sync ? WB_SYNC_ALL : WB_SYNC_NONE,
+ .range_start = 0,
+ .range_end = LLONG_MAX,
};
if (!mapping_cap_writeback_dirty(inode->i_mapping))
- return 0;
+ wbc.nr_to_write = 0;
might_sleep();
spin_lock(&inode_lock);
ret = __writeback_single_inode(inode, &wbc);
spin_unlock(&inode_lock);
if (sync)
- wait_on_inode(inode);
+ inode_sync_wait(inode);
return ret;
}
EXPORT_SYMBOL(write_inode_now);
* O_SYNC flag set, to flush dirty writes to disk.
*
* @what is a bitmask, specifying which part of the inode's data should be
- * written and waited upon:
+ * written and waited upon.
*
* OSYNC_DATA: i_mapping's dirty data
* OSYNC_METADATA: the buffers at i_mapping->private_list
int need_write_inode_now = 0;
int err2;
- current->flags |= PF_SYNCWRITE;
if (what & OSYNC_DATA)
err = filemap_fdatawrite(mapping);
if (what & (OSYNC_METADATA|OSYNC_DATA)) {
if (!err)
err = err2;
}
- current->flags &= ~PF_SYNCWRITE;
spin_lock(&inode_lock);
if ((inode->i_state & I_DIRTY) &&
err = err2;
}
else
- wait_on_inode(inode);
+ inode_sync_wait(inode);
return err;
}
-
EXPORT_SYMBOL(generic_osync_inode);
-
-/**
- * writeback_acquire: attempt to get exclusive writeback access to a device
- * @bdi: the device's backing_dev_info structure
- *
- * It is a waste of resources to have more than one pdflush thread blocked on
- * a single request queue. Exclusion at the request_queue level is obtained
- * via a flag in the request_queue's backing_dev_info.state.
- *
- * Non-request_queue-backed address_spaces will share default_backing_dev_info,
- * unless they implement their own. Which is somewhat inefficient, as this
- * may prevent concurrent writeback against multiple devices.
- */
-int writeback_acquire(struct backing_dev_info *bdi)
-{
- return !test_and_set_bit(BDI_pdflush, &bdi->state);
-}
-
-/**
- * writeback_in_progress: determine whether there is writeback in progress
- * against a backing device.
- * @bdi: the device's backing_dev_info structure.
- */
-int writeback_in_progress(struct backing_dev_info *bdi)
-{
- return test_bit(BDI_pdflush, &bdi->state);
-}
-
-/**
- * writeback_release: relinquish exclusive writeback access against a device.
- * @bdi: the device's backing_dev_info structure
- */
-void writeback_release(struct backing_dev_info *bdi)
-{
- BUG_ON(!writeback_in_progress(bdi));
- clear_bit(BDI_pdflush, &bdi->state);
-}