2 #include <linux/wait.h>
3 #include <linux/backing-dev.h>
4 #include <linux/kthread.h>
5 #include <linux/freezer.h>
7 #include <linux/pagemap.h>
9 #include <linux/sched.h>
10 #include <linux/module.h>
11 #include <linux/writeback.h>
12 #include <linux/device.h>
14 void default_unplug_io_fn(struct backing_dev_info *bdi, struct page *page)
17 EXPORT_SYMBOL(default_unplug_io_fn);
19 struct backing_dev_info default_backing_dev_info = {
20 .ra_pages = VM_MAX_READAHEAD * 1024 / PAGE_CACHE_SIZE,
22 .capabilities = BDI_CAP_MAP_COPY,
23 .unplug_io_fn = default_unplug_io_fn,
25 EXPORT_SYMBOL_GPL(default_backing_dev_info);
27 static struct class *bdi_class;
28 DEFINE_SPINLOCK(bdi_lock);
30 LIST_HEAD(bdi_pending_list);
32 static struct task_struct *sync_supers_tsk;
33 static struct timer_list sync_supers_timer;
35 static int bdi_sync_supers(void *);
36 static void sync_supers_timer_fn(unsigned long);
37 static void arm_supers_timer(void);
39 static void bdi_add_default_flusher_task(struct backing_dev_info *bdi);
41 #ifdef CONFIG_DEBUG_FS
42 #include <linux/debugfs.h>
43 #include <linux/seq_file.h>
45 static struct dentry *bdi_debug_root;
47 static void bdi_debug_init(void)
49 bdi_debug_root = debugfs_create_dir("bdi", NULL);
52 static int bdi_debug_stats_show(struct seq_file *m, void *v)
54 struct backing_dev_info *bdi = m->private;
55 unsigned long background_thresh;
56 unsigned long dirty_thresh;
57 unsigned long bdi_thresh;
59 get_dirty_limits(&background_thresh, &dirty_thresh, &bdi_thresh, bdi);
61 #define K(x) ((x) << (PAGE_SHIFT - 10))
63 "BdiWriteback: %8lu kB\n"
64 "BdiReclaimable: %8lu kB\n"
65 "BdiDirtyThresh: %8lu kB\n"
66 "DirtyThresh: %8lu kB\n"
67 "BackgroundThresh: %8lu kB\n",
68 (unsigned long) K(bdi_stat(bdi, BDI_WRITEBACK)),
69 (unsigned long) K(bdi_stat(bdi, BDI_RECLAIMABLE)),
72 K(background_thresh));
78 static int bdi_debug_stats_open(struct inode *inode, struct file *file)
80 return single_open(file, bdi_debug_stats_show, inode->i_private);
83 static const struct file_operations bdi_debug_stats_fops = {
84 .open = bdi_debug_stats_open,
87 .release = single_release,
90 static void bdi_debug_register(struct backing_dev_info *bdi, const char *name)
92 bdi->debug_dir = debugfs_create_dir(name, bdi_debug_root);
93 bdi->debug_stats = debugfs_create_file("stats", 0444, bdi->debug_dir,
94 bdi, &bdi_debug_stats_fops);
97 static void bdi_debug_unregister(struct backing_dev_info *bdi)
99 debugfs_remove(bdi->debug_stats);
100 debugfs_remove(bdi->debug_dir);
103 static inline void bdi_debug_init(void)
106 static inline void bdi_debug_register(struct backing_dev_info *bdi,
110 static inline void bdi_debug_unregister(struct backing_dev_info *bdi)
115 static ssize_t read_ahead_kb_store(struct device *dev,
116 struct device_attribute *attr,
117 const char *buf, size_t count)
119 struct backing_dev_info *bdi = dev_get_drvdata(dev);
121 unsigned long read_ahead_kb;
122 ssize_t ret = -EINVAL;
124 read_ahead_kb = simple_strtoul(buf, &end, 10);
125 if (*buf && (end[0] == '\0' || (end[0] == '\n' && end[1] == '\0'))) {
126 bdi->ra_pages = read_ahead_kb >> (PAGE_SHIFT - 10);
132 #define K(pages) ((pages) << (PAGE_SHIFT - 10))
134 #define BDI_SHOW(name, expr) \
135 static ssize_t name##_show(struct device *dev, \
136 struct device_attribute *attr, char *page) \
138 struct backing_dev_info *bdi = dev_get_drvdata(dev); \
140 return snprintf(page, PAGE_SIZE-1, "%lld\n", (long long)expr); \
143 BDI_SHOW(read_ahead_kb, K(bdi->ra_pages))
145 static ssize_t min_ratio_store(struct device *dev,
146 struct device_attribute *attr, const char *buf, size_t count)
148 struct backing_dev_info *bdi = dev_get_drvdata(dev);
151 ssize_t ret = -EINVAL;
153 ratio = simple_strtoul(buf, &end, 10);
154 if (*buf && (end[0] == '\0' || (end[0] == '\n' && end[1] == '\0'))) {
155 ret = bdi_set_min_ratio(bdi, ratio);
161 BDI_SHOW(min_ratio, bdi->min_ratio)
163 static ssize_t max_ratio_store(struct device *dev,
164 struct device_attribute *attr, const char *buf, size_t count)
166 struct backing_dev_info *bdi = dev_get_drvdata(dev);
169 ssize_t ret = -EINVAL;
171 ratio = simple_strtoul(buf, &end, 10);
172 if (*buf && (end[0] == '\0' || (end[0] == '\n' && end[1] == '\0'))) {
173 ret = bdi_set_max_ratio(bdi, ratio);
179 BDI_SHOW(max_ratio, bdi->max_ratio)
181 #define __ATTR_RW(attr) __ATTR(attr, 0644, attr##_show, attr##_store)
183 static struct device_attribute bdi_dev_attrs[] = {
184 __ATTR_RW(read_ahead_kb),
185 __ATTR_RW(min_ratio),
186 __ATTR_RW(max_ratio),
190 static __init int bdi_class_init(void)
192 bdi_class = class_create(THIS_MODULE, "bdi");
193 bdi_class->dev_attrs = bdi_dev_attrs;
197 postcore_initcall(bdi_class_init);
199 static int __init default_bdi_init(void)
203 sync_supers_tsk = kthread_run(bdi_sync_supers, NULL, "sync_supers");
204 BUG_ON(IS_ERR(sync_supers_tsk));
206 init_timer(&sync_supers_timer);
207 setup_timer(&sync_supers_timer, sync_supers_timer_fn, 0);
210 err = bdi_init(&default_backing_dev_info);
212 bdi_register(&default_backing_dev_info, NULL, "default");
216 subsys_initcall(default_bdi_init);
218 static void bdi_wb_init(struct bdi_writeback *wb, struct backing_dev_info *bdi)
220 memset(wb, 0, sizeof(*wb));
223 wb->last_old_flush = jiffies;
224 INIT_LIST_HEAD(&wb->b_dirty);
225 INIT_LIST_HEAD(&wb->b_io);
226 INIT_LIST_HEAD(&wb->b_more_io);
229 static void bdi_task_init(struct backing_dev_info *bdi,
230 struct bdi_writeback *wb)
232 struct task_struct *tsk = current;
234 spin_lock(&bdi->wb_lock);
235 list_add_tail_rcu(&wb->list, &bdi->wb_list);
236 spin_unlock(&bdi->wb_lock);
238 tsk->flags |= PF_FLUSHER | PF_SWAPWRITE;
242 * Our parent may run at a different priority, just set us to normal
244 set_user_nice(tsk, 0);
247 static int bdi_start_fn(void *ptr)
249 struct bdi_writeback *wb = ptr;
250 struct backing_dev_info *bdi = wb->bdi;
254 * Add us to the active bdi_list
256 spin_lock(&bdi_lock);
257 list_add(&bdi->bdi_list, &bdi_list);
258 spin_unlock(&bdi_lock);
260 bdi_task_init(bdi, wb);
263 * Clear pending bit and wakeup anybody waiting to tear us down
265 clear_bit(BDI_pending, &bdi->state);
266 smp_mb__after_clear_bit();
267 wake_up_bit(&bdi->state, BDI_pending);
269 ret = bdi_writeback_task(wb);
272 * Remove us from the list
274 spin_lock(&bdi->wb_lock);
275 list_del_rcu(&wb->list);
276 spin_unlock(&bdi->wb_lock);
279 * Flush any work that raced with us exiting. No new work
280 * will be added, since this bdi isn't discoverable anymore.
282 if (!list_empty(&bdi->work_list))
283 wb_do_writeback(wb, 1);
289 int bdi_has_dirty_io(struct backing_dev_info *bdi)
291 return wb_has_dirty_io(&bdi->wb);
294 static void bdi_flush_io(struct backing_dev_info *bdi)
296 struct writeback_control wbc = {
298 .sync_mode = WB_SYNC_NONE,
299 .older_than_this = NULL,
304 writeback_inodes_wbc(&wbc);
308 * kupdated() used to do this. We cannot do it from the bdi_forker_task()
309 * or we risk deadlocking on ->s_umount. The longer term solution would be
310 * to implement sync_supers_bdi() or similar and simply do it from the
311 * bdi writeback tasks individually.
313 static int bdi_sync_supers(void *unused)
315 set_user_nice(current, 0);
317 while (!kthread_should_stop()) {
318 set_current_state(TASK_INTERRUPTIBLE);
322 * Do this periodically, like kupdated() did before.
330 static void arm_supers_timer(void)
334 next = msecs_to_jiffies(dirty_writeback_interval * 10) + jiffies;
335 mod_timer(&sync_supers_timer, round_jiffies_up(next));
338 static void sync_supers_timer_fn(unsigned long unused)
340 wake_up_process(sync_supers_tsk);
344 static int bdi_forker_task(void *ptr)
346 struct bdi_writeback *me = ptr;
348 bdi_task_init(me->bdi, me);
351 struct backing_dev_info *bdi, *tmp;
352 struct bdi_writeback *wb;
355 * Temporary measure, we want to make sure we don't see
356 * dirty data on the default backing_dev_info
358 if (wb_has_dirty_io(me) || !list_empty(&me->bdi->work_list))
359 wb_do_writeback(me, 0);
361 spin_lock(&bdi_lock);
364 * Check if any existing bdi's have dirty data without
365 * a thread registered. If so, set that up.
367 list_for_each_entry_safe(bdi, tmp, &bdi_list, bdi_list) {
370 if (list_empty(&bdi->work_list) &&
371 !bdi_has_dirty_io(bdi))
374 bdi_add_default_flusher_task(bdi);
377 set_current_state(TASK_INTERRUPTIBLE);
379 if (list_empty(&bdi_pending_list)) {
382 spin_unlock(&bdi_lock);
383 wait = msecs_to_jiffies(dirty_writeback_interval * 10);
384 schedule_timeout(wait);
389 __set_current_state(TASK_RUNNING);
392 * This is our real job - check for pending entries in
393 * bdi_pending_list, and create the tasks that got added
395 bdi = list_entry(bdi_pending_list.next, struct backing_dev_info,
397 list_del_init(&bdi->bdi_list);
398 spin_unlock(&bdi_lock);
401 wb->task = kthread_run(bdi_start_fn, wb, "flush-%s",
404 * If task creation fails, then readd the bdi to
405 * the pending list and force writeout of the bdi
406 * from this forker thread. That will free some memory
407 * and we can try again.
409 if (IS_ERR(wb->task)) {
413 * Add this 'bdi' to the back, so we get
414 * a chance to flush other bdi's to free
417 spin_lock(&bdi_lock);
418 list_add_tail(&bdi->bdi_list, &bdi_pending_list);
419 spin_unlock(&bdi_lock);
429 * Add the default flusher task that gets created for any bdi
430 * that has dirty data pending writeout
432 void static bdi_add_default_flusher_task(struct backing_dev_info *bdi)
434 if (!bdi_cap_writeback_dirty(bdi))
438 * Check with the helper whether to proceed adding a task. Will only
439 * abort if we two or more simultanous calls to
440 * bdi_add_default_flusher_task() occured, further additions will block
441 * waiting for previous additions to finish.
443 if (!test_and_set_bit(BDI_pending, &bdi->state)) {
444 list_move_tail(&bdi->bdi_list, &bdi_pending_list);
447 * We are now on the pending list, wake up bdi_forker_task()
448 * to finish the job and add us back to the active bdi_list
450 wake_up_process(default_backing_dev_info.wb.task);
454 int bdi_register(struct backing_dev_info *bdi, struct device *parent,
455 const char *fmt, ...)
461 if (bdi->dev) /* The driver needs to use separate queues per device */
465 dev = device_create_vargs(bdi_class, parent, MKDEV(0, 0), bdi, fmt, args);
472 spin_lock(&bdi_lock);
473 list_add_tail(&bdi->bdi_list, &bdi_list);
474 spin_unlock(&bdi_lock);
479 * Just start the forker thread for our default backing_dev_info,
480 * and add other bdi's to the list. They will get a thread created
481 * on-demand when they need it.
483 if (bdi_cap_flush_forker(bdi)) {
484 struct bdi_writeback *wb = &bdi->wb;
486 wb->task = kthread_run(bdi_forker_task, wb, "bdi-%s",
488 if (IS_ERR(wb->task)) {
492 spin_lock(&bdi_lock);
493 list_del(&bdi->bdi_list);
494 spin_unlock(&bdi_lock);
499 bdi_debug_register(bdi, dev_name(dev));
503 EXPORT_SYMBOL(bdi_register);
505 int bdi_register_dev(struct backing_dev_info *bdi, dev_t dev)
507 return bdi_register(bdi, NULL, "%u:%u", MAJOR(dev), MINOR(dev));
509 EXPORT_SYMBOL(bdi_register_dev);
512 * Remove bdi from the global list and shutdown any threads we have running
514 static void bdi_wb_shutdown(struct backing_dev_info *bdi)
516 struct bdi_writeback *wb;
518 if (!bdi_cap_writeback_dirty(bdi))
522 * If setup is pending, wait for that to complete first
524 wait_on_bit(&bdi->state, BDI_pending, bdi_sched_wait,
525 TASK_UNINTERRUPTIBLE);
528 * Make sure nobody finds us on the bdi_list anymore
530 spin_lock(&bdi_lock);
531 list_del(&bdi->bdi_list);
532 spin_unlock(&bdi_lock);
535 * Finally, kill the kernel threads. We don't need to be RCU
536 * safe anymore, since the bdi is gone from visibility.
538 list_for_each_entry(wb, &bdi->wb_list, list)
539 kthread_stop(wb->task);
542 void bdi_unregister(struct backing_dev_info *bdi)
545 if (!bdi_cap_flush_forker(bdi))
546 bdi_wb_shutdown(bdi);
547 bdi_debug_unregister(bdi);
548 device_unregister(bdi->dev);
552 EXPORT_SYMBOL(bdi_unregister);
554 int bdi_init(struct backing_dev_info *bdi)
561 bdi->max_ratio = 100;
562 bdi->max_prop_frac = PROP_FRAC_BASE;
563 spin_lock_init(&bdi->wb_lock);
564 INIT_LIST_HEAD(&bdi->bdi_list);
565 INIT_LIST_HEAD(&bdi->wb_list);
566 INIT_LIST_HEAD(&bdi->work_list);
568 bdi_wb_init(&bdi->wb, bdi);
571 * Just one thread support for now, hard code mask and count
576 for (i = 0; i < NR_BDI_STAT_ITEMS; i++) {
577 err = percpu_counter_init(&bdi->bdi_stat[i], 0);
582 bdi->dirty_exceeded = 0;
583 err = prop_local_init_percpu(&bdi->completions);
588 percpu_counter_destroy(&bdi->bdi_stat[i]);
593 EXPORT_SYMBOL(bdi_init);
595 void bdi_destroy(struct backing_dev_info *bdi)
599 WARN_ON(bdi_has_dirty_io(bdi));
603 for (i = 0; i < NR_BDI_STAT_ITEMS; i++)
604 percpu_counter_destroy(&bdi->bdi_stat[i]);
606 prop_local_destroy_percpu(&bdi->completions);
608 EXPORT_SYMBOL(bdi_destroy);
610 static wait_queue_head_t congestion_wqh[2] = {
611 __WAIT_QUEUE_HEAD_INITIALIZER(congestion_wqh[0]),
612 __WAIT_QUEUE_HEAD_INITIALIZER(congestion_wqh[1])
615 void clear_bdi_congested(struct backing_dev_info *bdi, int sync)
618 wait_queue_head_t *wqh = &congestion_wqh[sync];
620 bit = sync ? BDI_sync_congested : BDI_async_congested;
621 clear_bit(bit, &bdi->state);
622 smp_mb__after_clear_bit();
623 if (waitqueue_active(wqh))
626 EXPORT_SYMBOL(clear_bdi_congested);
628 void set_bdi_congested(struct backing_dev_info *bdi, int sync)
632 bit = sync ? BDI_sync_congested : BDI_async_congested;
633 set_bit(bit, &bdi->state);
635 EXPORT_SYMBOL(set_bdi_congested);
638 * congestion_wait - wait for a backing_dev to become uncongested
639 * @sync: SYNC or ASYNC IO
640 * @timeout: timeout in jiffies
642 * Waits for up to @timeout jiffies for a backing_dev (any backing_dev) to exit
643 * write congestion. If no backing_devs are congested then just wait for the
644 * next write to be completed.
646 long congestion_wait(int sync, long timeout)
650 wait_queue_head_t *wqh = &congestion_wqh[sync];
652 prepare_to_wait(wqh, &wait, TASK_UNINTERRUPTIBLE);
653 ret = io_schedule_timeout(timeout);
654 finish_wait(wqh, &wait);
657 EXPORT_SYMBOL(congestion_wait);