Now that bdi_writeback_all() no longer handles integrity writeback,
it doesn't have to block anymore. This means that we can switch
bdi_list reader side protection to RCU.
Signed-off-by: Jens Axboe <jens.axboe@oracle.com>
WARN_ON(wbc->sync_mode == WB_SYNC_ALL);
WARN_ON(wbc->sync_mode == WB_SYNC_ALL);
- list_for_each_entry(bdi, &bdi_list, bdi_list) {
+ list_for_each_entry_rcu(bdi, &bdi_list, bdi_list) {
if (!bdi_has_dirty_io(bdi))
continue;
bdi_alloc_queue_work(bdi, wbc);
}
if (!bdi_has_dirty_io(bdi))
continue;
bdi_alloc_queue_work(bdi, wbc);
}
- spin_unlock(&bdi_lock);
struct backing_dev_info {
struct list_head bdi_list;
struct backing_dev_info {
struct list_head bdi_list;
+ struct rcu_head rcu_head;
unsigned long ra_pages; /* max readahead in PAGE_CACHE_SIZE units */
unsigned long state; /* Always use atomic bitops on this */
unsigned int capabilities; /* Device capabilities */
unsigned long ra_pages; /* max readahead in PAGE_CACHE_SIZE units */
unsigned long state; /* Always use atomic bitops on this */
unsigned int capabilities; /* Device capabilities */
EXPORT_SYMBOL_GPL(default_backing_dev_info);
static struct class *bdi_class;
EXPORT_SYMBOL_GPL(default_backing_dev_info);
static struct class *bdi_class;
+
+/*
+ * bdi_lock protects updates to bdi_list and bdi_pending_list, as well as
+ * reader side protection for bdi_pending_list. bdi_list has RCU reader side
+ * locking.
+ */
DEFINE_SPINLOCK(bdi_lock);
LIST_HEAD(bdi_list);
LIST_HEAD(bdi_pending_list);
DEFINE_SPINLOCK(bdi_lock);
LIST_HEAD(bdi_list);
LIST_HEAD(bdi_pending_list);
/*
* Add us to the active bdi_list
*/
/*
* Add us to the active bdi_list
*/
- spin_lock(&bdi_lock);
- list_add(&bdi->bdi_list, &bdi_list);
- spin_unlock(&bdi_lock);
+ spin_lock_bh(&bdi_lock);
+ list_add_rcu(&bdi->bdi_list, &bdi_list);
+ spin_unlock_bh(&bdi_lock);
if (wb_has_dirty_io(me) || !list_empty(&me->bdi->work_list))
wb_do_writeback(me, 0);
if (wb_has_dirty_io(me) || !list_empty(&me->bdi->work_list))
wb_do_writeback(me, 0);
+ spin_lock_bh(&bdi_lock);
/*
* Check if any existing bdi's have dirty data without
/*
* Check if any existing bdi's have dirty data without
if (list_empty(&bdi_pending_list)) {
unsigned long wait;
if (list_empty(&bdi_pending_list)) {
unsigned long wait;
- spin_unlock(&bdi_lock);
+ spin_unlock_bh(&bdi_lock);
wait = msecs_to_jiffies(dirty_writeback_interval * 10);
schedule_timeout(wait);
try_to_freeze();
wait = msecs_to_jiffies(dirty_writeback_interval * 10);
schedule_timeout(wait);
try_to_freeze();
bdi = list_entry(bdi_pending_list.next, struct backing_dev_info,
bdi_list);
list_del_init(&bdi->bdi_list);
bdi = list_entry(bdi_pending_list.next, struct backing_dev_info,
bdi_list);
list_del_init(&bdi->bdi_list);
- spin_unlock(&bdi_lock);
+ spin_unlock_bh(&bdi_lock);
wb = &bdi->wb;
wb->task = kthread_run(bdi_start_fn, wb, "flush-%s",
wb = &bdi->wb;
wb->task = kthread_run(bdi_start_fn, wb, "flush-%s",
* a chance to flush other bdi's to free
* memory.
*/
* a chance to flush other bdi's to free
* memory.
*/
+ spin_lock_bh(&bdi_lock);
list_add_tail(&bdi->bdi_list, &bdi_pending_list);
list_add_tail(&bdi->bdi_list, &bdi_pending_list);
- spin_unlock(&bdi_lock);
+ spin_unlock_bh(&bdi_lock);
+static void bdi_add_to_pending(struct rcu_head *head)
+{
+ struct backing_dev_info *bdi;
+
+ bdi = container_of(head, struct backing_dev_info, rcu_head);
+ INIT_LIST_HEAD(&bdi->bdi_list);
+
+ spin_lock(&bdi_lock);
+ list_add_tail(&bdi->bdi_list, &bdi_pending_list);
+ spin_unlock(&bdi_lock);
+
+ /*
+ * We are now on the pending list, wake up bdi_forker_task()
+ * to finish the job and add us back to the active bdi_list
+ */
+ wake_up_process(default_backing_dev_info.wb.task);
+}
+
/*
* Add the default flusher task that gets created for any bdi
* that has dirty data pending writeout
/*
* Add the default flusher task that gets created for any bdi
* that has dirty data pending writeout
* waiting for previous additions to finish.
*/
if (!test_and_set_bit(BDI_pending, &bdi->state)) {
* waiting for previous additions to finish.
*/
if (!test_and_set_bit(BDI_pending, &bdi->state)) {
- list_move_tail(&bdi->bdi_list, &bdi_pending_list);
+ list_del_rcu(&bdi->bdi_list);
- * We are now on the pending list, wake up bdi_forker_task()
- * to finish the job and add us back to the active bdi_list
+ * We must wait for the current RCU period to end before
+ * moving to the pending list. So schedule that operation
+ * from an RCU callback.
- wake_up_process(default_backing_dev_info.wb.task);
+ call_rcu(&bdi->rcu_head, bdi_add_to_pending);
+/*
+ * Remove bdi from bdi_list, and ensure that it is no longer visible
+ */
+static void bdi_remove_from_list(struct backing_dev_info *bdi)
+{
+ spin_lock_bh(&bdi_lock);
+ list_del_rcu(&bdi->bdi_list);
+ spin_unlock_bh(&bdi_lock);
+
+ synchronize_rcu();
+}
+
int bdi_register(struct backing_dev_info *bdi, struct device *parent,
const char *fmt, ...)
{
int bdi_register(struct backing_dev_info *bdi, struct device *parent,
const char *fmt, ...)
{
- spin_lock(&bdi_lock);
- list_add_tail(&bdi->bdi_list, &bdi_list);
- spin_unlock(&bdi_lock);
+ spin_lock_bh(&bdi_lock);
+ list_add_tail_rcu(&bdi->bdi_list, &bdi_list);
+ spin_unlock_bh(&bdi_lock);
wb->task = NULL;
ret = -ENOMEM;
wb->task = NULL;
ret = -ENOMEM;
- spin_lock(&bdi_lock);
- list_del(&bdi->bdi_list);
- spin_unlock(&bdi_lock);
+ bdi_remove_from_list(bdi);
/*
* Make sure nobody finds us on the bdi_list anymore
*/
/*
* Make sure nobody finds us on the bdi_list anymore
*/
- spin_lock(&bdi_lock);
- list_del(&bdi->bdi_list);
- spin_unlock(&bdi_lock);
+ bdi_remove_from_list(bdi);
/*
* Finally, kill the kernel threads. We don't need to be RCU
/*
* Finally, kill the kernel threads. We don't need to be RCU
bdi->max_ratio = 100;
bdi->max_prop_frac = PROP_FRAC_BASE;
spin_lock_init(&bdi->wb_lock);
bdi->max_ratio = 100;
bdi->max_prop_frac = PROP_FRAC_BASE;
spin_lock_init(&bdi->wb_lock);
+ INIT_RCU_HEAD(&bdi->rcu_head);
INIT_LIST_HEAD(&bdi->bdi_list);
INIT_LIST_HEAD(&bdi->wb_list);
INIT_LIST_HEAD(&bdi->work_list);
INIT_LIST_HEAD(&bdi->bdi_list);
INIT_LIST_HEAD(&bdi->wb_list);
INIT_LIST_HEAD(&bdi->work_list);
+ spin_lock_bh(&bdi_lock);
if (min_ratio > bdi->max_ratio) {
ret = -EINVAL;
} else {
if (min_ratio > bdi->max_ratio) {
ret = -EINVAL;
} else {
- spin_unlock(&bdi_lock);
+ spin_unlock_bh(&bdi_lock);
if (max_ratio > 100)
return -EINVAL;
if (max_ratio > 100)
return -EINVAL;
+ spin_lock_bh(&bdi_lock);
if (bdi->min_ratio > max_ratio) {
ret = -EINVAL;
} else {
bdi->max_ratio = max_ratio;
bdi->max_prop_frac = (PROP_FRAC_BASE * max_ratio) / 100;
}
if (bdi->min_ratio > max_ratio) {
ret = -EINVAL;
} else {
bdi->max_ratio = max_ratio;
bdi->max_prop_frac = (PROP_FRAC_BASE * max_ratio) / 100;
}
- spin_unlock(&bdi_lock);
+ spin_unlock_bh(&bdi_lock);