writeback: Fix bdi use after free in wb_work_complete()
[safe/jmp/linux-2.6] / fs / fs-writeback.c
1 /*
2  * fs/fs-writeback.c
3  *
4  * Copyright (C) 2002, Linus Torvalds.
5  *
6  * Contains all the functions related to writing back and waiting
7  * upon dirty inodes against superblocks, and writing back dirty
8  * pages against inodes.  ie: data writeback.  Writeout of the
9  * inode itself is not handled here.
10  *
11  * 10Apr2002    Andrew Morton
12  *              Split out of fs/inode.c
13  *              Additions for address_space-based writeback
14  */
15
16 #include <linux/kernel.h>
17 #include <linux/module.h>
18 #include <linux/spinlock.h>
19 #include <linux/sched.h>
20 #include <linux/fs.h>
21 #include <linux/mm.h>
22 #include <linux/kthread.h>
23 #include <linux/freezer.h>
24 #include <linux/writeback.h>
25 #include <linux/blkdev.h>
26 #include <linux/backing-dev.h>
27 #include <linux/buffer_head.h>
28 #include "internal.h"
29
30 #define inode_to_bdi(inode)     ((inode)->i_mapping->backing_dev_info)
31
32 /*
33  * We don't actually have pdflush, but this one is exported though /proc...
34  */
35 int nr_pdflush_threads;
36
37 /*
38  * Passed into wb_writeback(), essentially a subset of writeback_control
39  */
40 struct wb_writeback_args {
41         long nr_pages;
42         struct super_block *sb;
43         enum writeback_sync_modes sync_mode;
44         int for_kupdate;
45         int range_cyclic;
46 };
47
48 /*
49  * Work items for the bdi_writeback threads
50  */
51 struct bdi_work {
52         struct list_head list;          /* pending work list */
53         struct rcu_head rcu_head;       /* for RCU free/clear of work */
54
55         unsigned long seen;             /* threads that have seen this work */
56         atomic_t pending;               /* number of threads still to do work */
57
58         struct wb_writeback_args args;  /* writeback arguments */
59
60         unsigned long state;            /* flag bits, see WS_* */
61 };
62
63 enum {
64         WS_USED_B = 0,
65         WS_ONSTACK_B,
66 };
67
68 #define WS_USED (1 << WS_USED_B)
69 #define WS_ONSTACK (1 << WS_ONSTACK_B)
70
71 static inline bool bdi_work_on_stack(struct bdi_work *work)
72 {
73         return test_bit(WS_ONSTACK_B, &work->state);
74 }
75
76 static inline void bdi_work_init(struct bdi_work *work,
77                                  struct wb_writeback_args *args)
78 {
79         INIT_RCU_HEAD(&work->rcu_head);
80         work->args = *args;
81         work->state = WS_USED;
82 }
83
84 /**
85  * writeback_in_progress - determine whether there is writeback in progress
86  * @bdi: the device's backing_dev_info structure.
87  *
88  * Determine whether there is writeback waiting to be handled against a
89  * backing device.
90  */
91 int writeback_in_progress(struct backing_dev_info *bdi)
92 {
93         return !list_empty(&bdi->work_list);
94 }
95
96 static void bdi_work_clear(struct bdi_work *work)
97 {
98         clear_bit(WS_USED_B, &work->state);
99         smp_mb__after_clear_bit();
100         wake_up_bit(&work->state, WS_USED_B);
101 }
102
103 static void bdi_work_free(struct rcu_head *head)
104 {
105         struct bdi_work *work = container_of(head, struct bdi_work, rcu_head);
106
107         if (!bdi_work_on_stack(work))
108                 kfree(work);
109         else
110                 bdi_work_clear(work);
111 }
112
113 static void wb_work_complete(struct bdi_work *work)
114 {
115         const enum writeback_sync_modes sync_mode = work->args.sync_mode;
116         int onstack = bdi_work_on_stack(work);
117
118         /*
119          * For allocated work, we can clear the done/seen bit right here.
120          * For on-stack work, we need to postpone both the clear and free
121          * to after the RCU grace period, since the stack could be invalidated
122          * as soon as bdi_work_clear() has done the wakeup.
123          */
124         if (!onstack)
125                 bdi_work_clear(work);
126         if (sync_mode == WB_SYNC_NONE || onstack)
127                 call_rcu(&work->rcu_head, bdi_work_free);
128 }
129
130 static void wb_clear_pending(struct bdi_writeback *wb, struct bdi_work *work)
131 {
132         /*
133          * The caller has retrieved the work arguments from this work,
134          * drop our reference. If this is the last ref, delete and free it
135          */
136         if (atomic_dec_and_test(&work->pending)) {
137                 struct backing_dev_info *bdi = wb->bdi;
138
139                 spin_lock(&bdi->wb_lock);
140                 list_del_rcu(&work->list);
141                 spin_unlock(&bdi->wb_lock);
142
143                 wb_work_complete(work);
144         }
145 }
146
147 static void bdi_queue_work(struct backing_dev_info *bdi, struct bdi_work *work)
148 {
149         work->seen = bdi->wb_mask;
150         BUG_ON(!work->seen);
151         atomic_set(&work->pending, bdi->wb_cnt);
152         BUG_ON(!bdi->wb_cnt);
153
154         /*
155          * list_add_tail_rcu() contains the necessary barriers to
156          * make sure the above stores are seen before the item is
157          * noticed on the list
158          */
159         spin_lock(&bdi->wb_lock);
160         list_add_tail_rcu(&work->list, &bdi->work_list);
161         spin_unlock(&bdi->wb_lock);
162
163         /*
164          * If the default thread isn't there, make sure we add it. When
165          * it gets created and wakes up, we'll run this work.
166          */
167         if (unlikely(list_empty_careful(&bdi->wb_list)))
168                 wake_up_process(default_backing_dev_info.wb.task);
169         else {
170                 struct bdi_writeback *wb = &bdi->wb;
171
172                 /*
173                  * End work now if this wb has no dirty IO pending. Otherwise
174                  * wakeup the handling thread
175                  */
176                 if (!wb_has_dirty_io(wb))
177                         wb_clear_pending(wb, work);
178                 else if (wb->task)
179                         wake_up_process(wb->task);
180         }
181 }
182
183 /*
184  * Used for on-stack allocated work items. The caller needs to wait until
185  * the wb threads have acked the work before it's safe to continue.
186  */
187 static void bdi_wait_on_work_clear(struct bdi_work *work)
188 {
189         wait_on_bit(&work->state, WS_USED_B, bdi_sched_wait,
190                     TASK_UNINTERRUPTIBLE);
191 }
192
193 static void bdi_alloc_queue_work(struct backing_dev_info *bdi,
194                                  struct wb_writeback_args *args)
195 {
196         struct bdi_work *work;
197
198         /*
199          * This is WB_SYNC_NONE writeback, so if allocation fails just
200          * wakeup the thread for old dirty data writeback
201          */
202         work = kmalloc(sizeof(*work), GFP_ATOMIC);
203         if (work) {
204                 bdi_work_init(work, args);
205                 bdi_queue_work(bdi, work);
206         } else {
207                 struct bdi_writeback *wb = &bdi->wb;
208
209                 if (wb->task)
210                         wake_up_process(wb->task);
211         }
212 }
213
214 /**
215  * bdi_sync_writeback - start and wait for writeback
216  * @bdi: the backing device to write from
217  * @sb: write inodes from this super_block
218  *
219  * Description:
220  *   This does WB_SYNC_ALL data integrity writeback and waits for the
221  *   IO to complete. Callers must hold the sb s_umount semaphore for
222  *   reading, to avoid having the super disappear before we are done.
223  */
224 static void bdi_sync_writeback(struct backing_dev_info *bdi,
225                                struct super_block *sb)
226 {
227         struct wb_writeback_args args = {
228                 .sb             = sb,
229                 .sync_mode      = WB_SYNC_ALL,
230                 .nr_pages       = LONG_MAX,
231                 .range_cyclic   = 0,
232         };
233         struct bdi_work work;
234
235         bdi_work_init(&work, &args);
236         work.state |= WS_ONSTACK;
237
238         bdi_queue_work(bdi, &work);
239         bdi_wait_on_work_clear(&work);
240 }
241
242 /**
243  * bdi_start_writeback - start writeback
244  * @bdi: the backing device to write from
245  * @nr_pages: the number of pages to write
246  *
247  * Description:
248  *   This does WB_SYNC_NONE opportunistic writeback. The IO is only
249  *   started when this function returns, we make no guarentees on
250  *   completion. Caller need not hold sb s_umount semaphore.
251  *
252  */
253 void bdi_start_writeback(struct backing_dev_info *bdi, long nr_pages)
254 {
255         struct wb_writeback_args args = {
256                 .sync_mode      = WB_SYNC_NONE,
257                 .nr_pages       = nr_pages,
258                 .range_cyclic   = 1,
259         };
260
261         bdi_alloc_queue_work(bdi, &args);
262 }
263
264 /*
265  * Redirty an inode: set its when-it-was dirtied timestamp and move it to the
266  * furthest end of its superblock's dirty-inode list.
267  *
268  * Before stamping the inode's ->dirtied_when, we check to see whether it is
269  * already the most-recently-dirtied inode on the b_dirty list.  If that is
270  * the case then the inode must have been redirtied while it was being written
271  * out and we don't reset its dirtied_when.
272  */
273 static void redirty_tail(struct inode *inode)
274 {
275         struct bdi_writeback *wb = &inode_to_bdi(inode)->wb;
276
277         if (!list_empty(&wb->b_dirty)) {
278                 struct inode *tail;
279
280                 tail = list_entry(wb->b_dirty.next, struct inode, i_list);
281                 if (time_before(inode->dirtied_when, tail->dirtied_when))
282                         inode->dirtied_when = jiffies;
283         }
284         list_move(&inode->i_list, &wb->b_dirty);
285 }
286
287 /*
288  * requeue inode for re-scanning after bdi->b_io list is exhausted.
289  */
290 static void requeue_io(struct inode *inode)
291 {
292         struct bdi_writeback *wb = &inode_to_bdi(inode)->wb;
293
294         list_move(&inode->i_list, &wb->b_more_io);
295 }
296
297 static void inode_sync_complete(struct inode *inode)
298 {
299         /*
300          * Prevent speculative execution through spin_unlock(&inode_lock);
301          */
302         smp_mb();
303         wake_up_bit(&inode->i_state, __I_SYNC);
304 }
305
306 static bool inode_dirtied_after(struct inode *inode, unsigned long t)
307 {
308         bool ret = time_after(inode->dirtied_when, t);
309 #ifndef CONFIG_64BIT
310         /*
311          * For inodes being constantly redirtied, dirtied_when can get stuck.
312          * It _appears_ to be in the future, but is actually in distant past.
313          * This test is necessary to prevent such wrapped-around relative times
314          * from permanently stopping the whole pdflush writeback.
315          */
316         ret = ret && time_before_eq(inode->dirtied_when, jiffies);
317 #endif
318         return ret;
319 }
320
321 /*
322  * Move expired dirty inodes from @delaying_queue to @dispatch_queue.
323  */
324 static void move_expired_inodes(struct list_head *delaying_queue,
325                                struct list_head *dispatch_queue,
326                                 unsigned long *older_than_this)
327 {
328         while (!list_empty(delaying_queue)) {
329                 struct inode *inode = list_entry(delaying_queue->prev,
330                                                 struct inode, i_list);
331                 if (older_than_this &&
332                     inode_dirtied_after(inode, *older_than_this))
333                         break;
334                 list_move(&inode->i_list, dispatch_queue);
335         }
336 }
337
338 /*
339  * Queue all expired dirty inodes for io, eldest first.
340  */
341 static void queue_io(struct bdi_writeback *wb, unsigned long *older_than_this)
342 {
343         list_splice_init(&wb->b_more_io, wb->b_io.prev);
344         move_expired_inodes(&wb->b_dirty, &wb->b_io, older_than_this);
345 }
346
347 static int write_inode(struct inode *inode, int sync)
348 {
349         if (inode->i_sb->s_op->write_inode && !is_bad_inode(inode))
350                 return inode->i_sb->s_op->write_inode(inode, sync);
351         return 0;
352 }
353
354 /*
355  * Wait for writeback on an inode to complete.
356  */
357 static void inode_wait_for_writeback(struct inode *inode)
358 {
359         DEFINE_WAIT_BIT(wq, &inode->i_state, __I_SYNC);
360         wait_queue_head_t *wqh;
361
362         wqh = bit_waitqueue(&inode->i_state, __I_SYNC);
363         do {
364                 spin_unlock(&inode_lock);
365                 __wait_on_bit(wqh, &wq, inode_wait, TASK_UNINTERRUPTIBLE);
366                 spin_lock(&inode_lock);
367         } while (inode->i_state & I_SYNC);
368 }
369
370 /*
371  * Write out an inode's dirty pages.  Called under inode_lock.  Either the
372  * caller has ref on the inode (either via __iget or via syscall against an fd)
373  * or the inode has I_WILL_FREE set (via generic_forget_inode)
374  *
375  * If `wait' is set, wait on the writeout.
376  *
377  * The whole writeout design is quite complex and fragile.  We want to avoid
378  * starvation of particular inodes when others are being redirtied, prevent
379  * livelocks, etc.
380  *
381  * Called under inode_lock.
382  */
383 static int
384 writeback_single_inode(struct inode *inode, struct writeback_control *wbc)
385 {
386         struct address_space *mapping = inode->i_mapping;
387         int wait = wbc->sync_mode == WB_SYNC_ALL;
388         unsigned dirty;
389         int ret;
390
391         if (!atomic_read(&inode->i_count))
392                 WARN_ON(!(inode->i_state & (I_WILL_FREE|I_FREEING)));
393         else
394                 WARN_ON(inode->i_state & I_WILL_FREE);
395
396         if (inode->i_state & I_SYNC) {
397                 /*
398                  * If this inode is locked for writeback and we are not doing
399                  * writeback-for-data-integrity, move it to b_more_io so that
400                  * writeback can proceed with the other inodes on s_io.
401                  *
402                  * We'll have another go at writing back this inode when we
403                  * completed a full scan of b_io.
404                  */
405                 if (!wait) {
406                         requeue_io(inode);
407                         return 0;
408                 }
409
410                 /*
411                  * It's a data-integrity sync.  We must wait.
412                  */
413                 inode_wait_for_writeback(inode);
414         }
415
416         BUG_ON(inode->i_state & I_SYNC);
417
418         /* Set I_SYNC, reset I_DIRTY */
419         dirty = inode->i_state & I_DIRTY;
420         inode->i_state |= I_SYNC;
421         inode->i_state &= ~I_DIRTY;
422
423         spin_unlock(&inode_lock);
424
425         ret = do_writepages(mapping, wbc);
426
427         /* Don't write the inode if only I_DIRTY_PAGES was set */
428         if (dirty & (I_DIRTY_SYNC | I_DIRTY_DATASYNC)) {
429                 int err = write_inode(inode, wait);
430                 if (ret == 0)
431                         ret = err;
432         }
433
434         if (wait) {
435                 int err = filemap_fdatawait(mapping);
436                 if (ret == 0)
437                         ret = err;
438         }
439
440         spin_lock(&inode_lock);
441         inode->i_state &= ~I_SYNC;
442         if (!(inode->i_state & (I_FREEING | I_CLEAR))) {
443                 if (!(inode->i_state & I_DIRTY) &&
444                     mapping_tagged(mapping, PAGECACHE_TAG_DIRTY)) {
445                         /*
446                          * We didn't write back all the pages.  nfs_writepages()
447                          * sometimes bales out without doing anything. Redirty
448                          * the inode; Move it from b_io onto b_more_io/b_dirty.
449                          */
450                         /*
451                          * akpm: if the caller was the kupdate function we put
452                          * this inode at the head of b_dirty so it gets first
453                          * consideration.  Otherwise, move it to the tail, for
454                          * the reasons described there.  I'm not really sure
455                          * how much sense this makes.  Presumably I had a good
456                          * reasons for doing it this way, and I'd rather not
457                          * muck with it at present.
458                          */
459                         if (wbc->for_kupdate) {
460                                 /*
461                                  * For the kupdate function we move the inode
462                                  * to b_more_io so it will get more writeout as
463                                  * soon as the queue becomes uncongested.
464                                  */
465                                 inode->i_state |= I_DIRTY_PAGES;
466                                 if (wbc->nr_to_write <= 0) {
467                                         /*
468                                          * slice used up: queue for next turn
469                                          */
470                                         requeue_io(inode);
471                                 } else {
472                                         /*
473                                          * somehow blocked: retry later
474                                          */
475                                         redirty_tail(inode);
476                                 }
477                         } else {
478                                 /*
479                                  * Otherwise fully redirty the inode so that
480                                  * other inodes on this superblock will get some
481                                  * writeout.  Otherwise heavy writing to one
482                                  * file would indefinitely suspend writeout of
483                                  * all the other files.
484                                  */
485                                 inode->i_state |= I_DIRTY_PAGES;
486                                 redirty_tail(inode);
487                         }
488                 } else if (inode->i_state & I_DIRTY) {
489                         /*
490                          * Someone redirtied the inode while were writing back
491                          * the pages.
492                          */
493                         redirty_tail(inode);
494                 } else if (atomic_read(&inode->i_count)) {
495                         /*
496                          * The inode is clean, inuse
497                          */
498                         list_move(&inode->i_list, &inode_in_use);
499                 } else {
500                         /*
501                          * The inode is clean, unused
502                          */
503                         list_move(&inode->i_list, &inode_unused);
504                 }
505         }
506         inode_sync_complete(inode);
507         return ret;
508 }
509
510 /*
511  * For WB_SYNC_NONE writeback, the caller does not have the sb pinned
512  * before calling writeback. So make sure that we do pin it, so it doesn't
513  * go away while we are writing inodes from it.
514  *
515  * Returns 0 if the super was successfully pinned (or pinning wasn't needed),
516  * 1 if we failed.
517  */
518 static int pin_sb_for_writeback(struct writeback_control *wbc,
519                                    struct inode *inode)
520 {
521         struct super_block *sb = inode->i_sb;
522
523         /*
524          * Caller must already hold the ref for this
525          */
526         if (wbc->sync_mode == WB_SYNC_ALL) {
527                 WARN_ON(!rwsem_is_locked(&sb->s_umount));
528                 return 0;
529         }
530
531         spin_lock(&sb_lock);
532         sb->s_count++;
533         if (down_read_trylock(&sb->s_umount)) {
534                 if (sb->s_root) {
535                         spin_unlock(&sb_lock);
536                         return 0;
537                 }
538                 /*
539                  * umounted, drop rwsem again and fall through to failure
540                  */
541                 up_read(&sb->s_umount);
542         }
543
544         sb->s_count--;
545         spin_unlock(&sb_lock);
546         return 1;
547 }
548
549 static void unpin_sb_for_writeback(struct writeback_control *wbc,
550                                    struct inode *inode)
551 {
552         struct super_block *sb = inode->i_sb;
553
554         if (wbc->sync_mode == WB_SYNC_ALL)
555                 return;
556
557         up_read(&sb->s_umount);
558         put_super(sb);
559 }
560
561 static void writeback_inodes_wb(struct bdi_writeback *wb,
562                                 struct writeback_control *wbc)
563 {
564         struct super_block *sb = wbc->sb;
565         const int is_blkdev_sb = sb_is_blkdev_sb(sb);
566         const unsigned long start = jiffies;    /* livelock avoidance */
567
568         spin_lock(&inode_lock);
569
570         if (!wbc->for_kupdate || list_empty(&wb->b_io))
571                 queue_io(wb, wbc->older_than_this);
572
573         while (!list_empty(&wb->b_io)) {
574                 struct inode *inode = list_entry(wb->b_io.prev,
575                                                 struct inode, i_list);
576                 long pages_skipped;
577
578                 /*
579                  * super block given and doesn't match, skip this inode
580                  */
581                 if (sb && sb != inode->i_sb) {
582                         redirty_tail(inode);
583                         continue;
584                 }
585
586                 if (!bdi_cap_writeback_dirty(wb->bdi)) {
587                         redirty_tail(inode);
588                         if (is_blkdev_sb) {
589                                 /*
590                                  * Dirty memory-backed blockdev: the ramdisk
591                                  * driver does this.  Skip just this inode
592                                  */
593                                 continue;
594                         }
595                         /*
596                          * Dirty memory-backed inode against a filesystem other
597                          * than the kernel-internal bdev filesystem.  Skip the
598                          * entire superblock.
599                          */
600                         break;
601                 }
602
603                 if (inode->i_state & (I_NEW | I_WILL_FREE)) {
604                         requeue_io(inode);
605                         continue;
606                 }
607
608                 if (wbc->nonblocking && bdi_write_congested(wb->bdi)) {
609                         wbc->encountered_congestion = 1;
610                         if (!is_blkdev_sb)
611                                 break;          /* Skip a congested fs */
612                         requeue_io(inode);
613                         continue;               /* Skip a congested blockdev */
614                 }
615
616                 /*
617                  * Was this inode dirtied after sync_sb_inodes was called?
618                  * This keeps sync from extra jobs and livelock.
619                  */
620                 if (inode_dirtied_after(inode, start))
621                         break;
622
623                 if (pin_sb_for_writeback(wbc, inode)) {
624                         requeue_io(inode);
625                         continue;
626                 }
627
628                 BUG_ON(inode->i_state & (I_FREEING | I_CLEAR));
629                 __iget(inode);
630                 pages_skipped = wbc->pages_skipped;
631                 writeback_single_inode(inode, wbc);
632                 unpin_sb_for_writeback(wbc, inode);
633                 if (wbc->pages_skipped != pages_skipped) {
634                         /*
635                          * writeback is not making progress due to locked
636                          * buffers.  Skip this inode for now.
637                          */
638                         redirty_tail(inode);
639                 }
640                 spin_unlock(&inode_lock);
641                 iput(inode);
642                 cond_resched();
643                 spin_lock(&inode_lock);
644                 if (wbc->nr_to_write <= 0) {
645                         wbc->more_io = 1;
646                         break;
647                 }
648                 if (!list_empty(&wb->b_more_io))
649                         wbc->more_io = 1;
650         }
651
652         spin_unlock(&inode_lock);
653         /* Leave any unwritten inodes on b_io */
654 }
655
656 void writeback_inodes_wbc(struct writeback_control *wbc)
657 {
658         struct backing_dev_info *bdi = wbc->bdi;
659
660         writeback_inodes_wb(&bdi->wb, wbc);
661 }
662
663 /*
664  * The maximum number of pages to writeout in a single bdi flush/kupdate
665  * operation.  We do this so we don't hold I_SYNC against an inode for
666  * enormous amounts of time, which would block a userspace task which has
667  * been forced to throttle against that inode.  Also, the code reevaluates
668  * the dirty each time it has written this many pages.
669  */
670 #define MAX_WRITEBACK_PAGES     1024
671
672 static inline bool over_bground_thresh(void)
673 {
674         unsigned long background_thresh, dirty_thresh;
675
676         get_dirty_limits(&background_thresh, &dirty_thresh, NULL, NULL);
677
678         return (global_page_state(NR_FILE_DIRTY) +
679                 global_page_state(NR_UNSTABLE_NFS) >= background_thresh);
680 }
681
682 /*
683  * Explicit flushing or periodic writeback of "old" data.
684  *
685  * Define "old": the first time one of an inode's pages is dirtied, we mark the
686  * dirtying-time in the inode's address_space.  So this periodic writeback code
687  * just walks the superblock inode list, writing back any inodes which are
688  * older than a specific point in time.
689  *
690  * Try to run once per dirty_writeback_interval.  But if a writeback event
691  * takes longer than a dirty_writeback_interval interval, then leave a
692  * one-second gap.
693  *
694  * older_than_this takes precedence over nr_to_write.  So we'll only write back
695  * all dirty pages if they are all attached to "old" mappings.
696  */
697 static long wb_writeback(struct bdi_writeback *wb,
698                          struct wb_writeback_args *args)
699 {
700         struct writeback_control wbc = {
701                 .bdi                    = wb->bdi,
702                 .sb                     = args->sb,
703                 .sync_mode              = args->sync_mode,
704                 .older_than_this        = NULL,
705                 .for_kupdate            = args->for_kupdate,
706                 .range_cyclic           = args->range_cyclic,
707         };
708         unsigned long oldest_jif;
709         long wrote = 0;
710
711         if (wbc.for_kupdate) {
712                 wbc.older_than_this = &oldest_jif;
713                 oldest_jif = jiffies -
714                                 msecs_to_jiffies(dirty_expire_interval * 10);
715         }
716         if (!wbc.range_cyclic) {
717                 wbc.range_start = 0;
718                 wbc.range_end = LLONG_MAX;
719         }
720
721         for (;;) {
722                 /*
723                  * Don't flush anything for non-integrity writeback where
724                  * no nr_pages was given
725                  */
726                 if (!args->for_kupdate && args->nr_pages <= 0 &&
727                      args->sync_mode == WB_SYNC_NONE)
728                         break;
729
730                 /*
731                  * If no specific pages were given and this is just a
732                  * periodic background writeout and we are below the
733                  * background dirty threshold, don't do anything
734                  */
735                 if (args->for_kupdate && args->nr_pages <= 0 &&
736                     !over_bground_thresh())
737                         break;
738
739                 wbc.more_io = 0;
740                 wbc.encountered_congestion = 0;
741                 wbc.nr_to_write = MAX_WRITEBACK_PAGES;
742                 wbc.pages_skipped = 0;
743                 writeback_inodes_wb(wb, &wbc);
744                 args->nr_pages -= MAX_WRITEBACK_PAGES - wbc.nr_to_write;
745                 wrote += MAX_WRITEBACK_PAGES - wbc.nr_to_write;
746
747                 /*
748                  * If we ran out of stuff to write, bail unless more_io got set
749                  */
750                 if (wbc.nr_to_write > 0 || wbc.pages_skipped > 0) {
751                         if (wbc.more_io && !wbc.for_kupdate)
752                                 continue;
753                         break;
754                 }
755         }
756
757         return wrote;
758 }
759
760 /*
761  * Return the next bdi_work struct that hasn't been processed by this
762  * wb thread yet. ->seen is initially set for each thread that exists
763  * for this device, when a thread first notices a piece of work it
764  * clears its bit. Depending on writeback type, the thread will notify
765  * completion on either receiving the work (WB_SYNC_NONE) or after
766  * it is done (WB_SYNC_ALL).
767  */
768 static struct bdi_work *get_next_work_item(struct backing_dev_info *bdi,
769                                            struct bdi_writeback *wb)
770 {
771         struct bdi_work *work, *ret = NULL;
772
773         rcu_read_lock();
774
775         list_for_each_entry_rcu(work, &bdi->work_list, list) {
776                 if (!test_bit(wb->nr, &work->seen))
777                         continue;
778                 clear_bit(wb->nr, &work->seen);
779
780                 ret = work;
781                 break;
782         }
783
784         rcu_read_unlock();
785         return ret;
786 }
787
788 static long wb_check_old_data_flush(struct bdi_writeback *wb)
789 {
790         unsigned long expired;
791         long nr_pages;
792
793         expired = wb->last_old_flush +
794                         msecs_to_jiffies(dirty_writeback_interval * 10);
795         if (time_before(jiffies, expired))
796                 return 0;
797
798         wb->last_old_flush = jiffies;
799         nr_pages = global_page_state(NR_FILE_DIRTY) +
800                         global_page_state(NR_UNSTABLE_NFS) +
801                         (inodes_stat.nr_inodes - inodes_stat.nr_unused);
802
803         if (nr_pages) {
804                 struct wb_writeback_args args = {
805                         .nr_pages       = nr_pages,
806                         .sync_mode      = WB_SYNC_NONE,
807                         .for_kupdate    = 1,
808                         .range_cyclic   = 1,
809                 };
810
811                 return wb_writeback(wb, &args);
812         }
813
814         return 0;
815 }
816
817 /*
818  * Retrieve work items and do the writeback they describe
819  */
820 long wb_do_writeback(struct bdi_writeback *wb, int force_wait)
821 {
822         struct backing_dev_info *bdi = wb->bdi;
823         struct bdi_work *work;
824         long wrote = 0;
825
826         while ((work = get_next_work_item(bdi, wb)) != NULL) {
827                 struct wb_writeback_args args = work->args;
828
829                 /*
830                  * Override sync mode, in case we must wait for completion
831                  */
832                 if (force_wait)
833                         work->args.sync_mode = args.sync_mode = WB_SYNC_ALL;
834
835                 /*
836                  * If this isn't a data integrity operation, just notify
837                  * that we have seen this work and we are now starting it.
838                  */
839                 if (args.sync_mode == WB_SYNC_NONE)
840                         wb_clear_pending(wb, work);
841
842                 wrote += wb_writeback(wb, &args);
843
844                 /*
845                  * This is a data integrity writeback, so only do the
846                  * notification when we have completed the work.
847                  */
848                 if (args.sync_mode == WB_SYNC_ALL)
849                         wb_clear_pending(wb, work);
850         }
851
852         /*
853          * Check for periodic writeback, kupdated() style
854          */
855         wrote += wb_check_old_data_flush(wb);
856
857         return wrote;
858 }
859
860 /*
861  * Handle writeback of dirty data for the device backed by this bdi. Also
862  * wakes up periodically and does kupdated style flushing.
863  */
864 int bdi_writeback_task(struct bdi_writeback *wb)
865 {
866         unsigned long last_active = jiffies;
867         unsigned long wait_jiffies = -1UL;
868         long pages_written;
869
870         while (!kthread_should_stop()) {
871                 pages_written = wb_do_writeback(wb, 0);
872
873                 if (pages_written)
874                         last_active = jiffies;
875                 else if (wait_jiffies != -1UL) {
876                         unsigned long max_idle;
877
878                         /*
879                          * Longest period of inactivity that we tolerate. If we
880                          * see dirty data again later, the task will get
881                          * recreated automatically.
882                          */
883                         max_idle = max(5UL * 60 * HZ, wait_jiffies);
884                         if (time_after(jiffies, max_idle + last_active))
885                                 break;
886                 }
887
888                 wait_jiffies = msecs_to_jiffies(dirty_writeback_interval * 10);
889                 schedule_timeout_interruptible(wait_jiffies);
890                 try_to_freeze();
891         }
892
893         return 0;
894 }
895
896 /*
897  * Schedule writeback for all backing devices. This does WB_SYNC_NONE
898  * writeback, for integrity writeback see bdi_sync_writeback().
899  */
900 static void bdi_writeback_all(struct super_block *sb, long nr_pages)
901 {
902         struct wb_writeback_args args = {
903                 .sb             = sb,
904                 .nr_pages       = nr_pages,
905                 .sync_mode      = WB_SYNC_NONE,
906         };
907         struct backing_dev_info *bdi;
908
909         rcu_read_lock();
910
911         list_for_each_entry_rcu(bdi, &bdi_list, bdi_list) {
912                 if (!bdi_has_dirty_io(bdi))
913                         continue;
914
915                 bdi_alloc_queue_work(bdi, &args);
916         }
917
918         rcu_read_unlock();
919 }
920
921 /*
922  * Start writeback of `nr_pages' pages.  If `nr_pages' is zero, write back
923  * the whole world.
924  */
925 void wakeup_flusher_threads(long nr_pages)
926 {
927         if (nr_pages == 0)
928                 nr_pages = global_page_state(NR_FILE_DIRTY) +
929                                 global_page_state(NR_UNSTABLE_NFS);
930         bdi_writeback_all(NULL, nr_pages);
931 }
932
933 static noinline void block_dump___mark_inode_dirty(struct inode *inode)
934 {
935         if (inode->i_ino || strcmp(inode->i_sb->s_id, "bdev")) {
936                 struct dentry *dentry;
937                 const char *name = "?";
938
939                 dentry = d_find_alias(inode);
940                 if (dentry) {
941                         spin_lock(&dentry->d_lock);
942                         name = (const char *) dentry->d_name.name;
943                 }
944                 printk(KERN_DEBUG
945                        "%s(%d): dirtied inode %lu (%s) on %s\n",
946                        current->comm, task_pid_nr(current), inode->i_ino,
947                        name, inode->i_sb->s_id);
948                 if (dentry) {
949                         spin_unlock(&dentry->d_lock);
950                         dput(dentry);
951                 }
952         }
953 }
954
955 /**
956  *      __mark_inode_dirty -    internal function
957  *      @inode: inode to mark
958  *      @flags: what kind of dirty (i.e. I_DIRTY_SYNC)
959  *      Mark an inode as dirty. Callers should use mark_inode_dirty or
960  *      mark_inode_dirty_sync.
961  *
962  * Put the inode on the super block's dirty list.
963  *
964  * CAREFUL! We mark it dirty unconditionally, but move it onto the
965  * dirty list only if it is hashed or if it refers to a blockdev.
966  * If it was not hashed, it will never be added to the dirty list
967  * even if it is later hashed, as it will have been marked dirty already.
968  *
969  * In short, make sure you hash any inodes _before_ you start marking
970  * them dirty.
971  *
972  * This function *must* be atomic for the I_DIRTY_PAGES case -
973  * set_page_dirty() is called under spinlock in several places.
974  *
975  * Note that for blockdevs, inode->dirtied_when represents the dirtying time of
976  * the block-special inode (/dev/hda1) itself.  And the ->dirtied_when field of
977  * the kernel-internal blockdev inode represents the dirtying time of the
978  * blockdev's pages.  This is why for I_DIRTY_PAGES we always use
979  * page->mapping->host, so the page-dirtying time is recorded in the internal
980  * blockdev inode.
981  */
982 void __mark_inode_dirty(struct inode *inode, int flags)
983 {
984         struct super_block *sb = inode->i_sb;
985
986         /*
987          * Don't do this for I_DIRTY_PAGES - that doesn't actually
988          * dirty the inode itself
989          */
990         if (flags & (I_DIRTY_SYNC | I_DIRTY_DATASYNC)) {
991                 if (sb->s_op->dirty_inode)
992                         sb->s_op->dirty_inode(inode);
993         }
994
995         /*
996          * make sure that changes are seen by all cpus before we test i_state
997          * -- mikulas
998          */
999         smp_mb();
1000
1001         /* avoid the locking if we can */
1002         if ((inode->i_state & flags) == flags)
1003                 return;
1004
1005         if (unlikely(block_dump))
1006                 block_dump___mark_inode_dirty(inode);
1007
1008         spin_lock(&inode_lock);
1009         if ((inode->i_state & flags) != flags) {
1010                 const int was_dirty = inode->i_state & I_DIRTY;
1011
1012                 inode->i_state |= flags;
1013
1014                 /*
1015                  * If the inode is being synced, just update its dirty state.
1016                  * The unlocker will place the inode on the appropriate
1017                  * superblock list, based upon its state.
1018                  */
1019                 if (inode->i_state & I_SYNC)
1020                         goto out;
1021
1022                 /*
1023                  * Only add valid (hashed) inodes to the superblock's
1024                  * dirty list.  Add blockdev inodes as well.
1025                  */
1026                 if (!S_ISBLK(inode->i_mode)) {
1027                         if (hlist_unhashed(&inode->i_hash))
1028                                 goto out;
1029                 }
1030                 if (inode->i_state & (I_FREEING|I_CLEAR))
1031                         goto out;
1032
1033                 /*
1034                  * If the inode was already on b_dirty/b_io/b_more_io, don't
1035                  * reposition it (that would break b_dirty time-ordering).
1036                  */
1037                 if (!was_dirty) {
1038                         struct bdi_writeback *wb = &inode_to_bdi(inode)->wb;
1039                         struct backing_dev_info *bdi = wb->bdi;
1040
1041                         if (bdi_cap_writeback_dirty(bdi) &&
1042                             !test_bit(BDI_registered, &bdi->state)) {
1043                                 WARN_ON(1);
1044                                 printk(KERN_ERR "bdi-%s not registered\n",
1045                                                                 bdi->name);
1046                         }
1047
1048                         inode->dirtied_when = jiffies;
1049                         list_move(&inode->i_list, &wb->b_dirty);
1050                 }
1051         }
1052 out:
1053         spin_unlock(&inode_lock);
1054 }
1055 EXPORT_SYMBOL(__mark_inode_dirty);
1056
1057 /*
1058  * Write out a superblock's list of dirty inodes.  A wait will be performed
1059  * upon no inodes, all inodes or the final one, depending upon sync_mode.
1060  *
1061  * If older_than_this is non-NULL, then only write out inodes which
1062  * had their first dirtying at a time earlier than *older_than_this.
1063  *
1064  * If we're a pdlfush thread, then implement pdflush collision avoidance
1065  * against the entire list.
1066  *
1067  * If `bdi' is non-zero then we're being asked to writeback a specific queue.
1068  * This function assumes that the blockdev superblock's inodes are backed by
1069  * a variety of queues, so all inodes are searched.  For other superblocks,
1070  * assume that all inodes are backed by the same queue.
1071  *
1072  * The inodes to be written are parked on bdi->b_io.  They are moved back onto
1073  * bdi->b_dirty as they are selected for writing.  This way, none can be missed
1074  * on the writer throttling path, and we get decent balancing between many
1075  * throttled threads: we don't want them all piling up on inode_sync_wait.
1076  */
1077 static void wait_sb_inodes(struct super_block *sb)
1078 {
1079         struct inode *inode, *old_inode = NULL;
1080
1081         /*
1082          * We need to be protected against the filesystem going from
1083          * r/o to r/w or vice versa.
1084          */
1085         WARN_ON(!rwsem_is_locked(&sb->s_umount));
1086
1087         spin_lock(&inode_lock);
1088
1089         /*
1090          * Data integrity sync. Must wait for all pages under writeback,
1091          * because there may have been pages dirtied before our sync
1092          * call, but which had writeout started before we write it out.
1093          * In which case, the inode may not be on the dirty list, but
1094          * we still have to wait for that writeout.
1095          */
1096         list_for_each_entry(inode, &sb->s_inodes, i_sb_list) {
1097                 struct address_space *mapping;
1098
1099                 if (inode->i_state & (I_FREEING|I_CLEAR|I_WILL_FREE|I_NEW))
1100                         continue;
1101                 mapping = inode->i_mapping;
1102                 if (mapping->nrpages == 0)
1103                         continue;
1104                 __iget(inode);
1105                 spin_unlock(&inode_lock);
1106                 /*
1107                  * We hold a reference to 'inode' so it couldn't have
1108                  * been removed from s_inodes list while we dropped the
1109                  * inode_lock.  We cannot iput the inode now as we can
1110                  * be holding the last reference and we cannot iput it
1111                  * under inode_lock. So we keep the reference and iput
1112                  * it later.
1113                  */
1114                 iput(old_inode);
1115                 old_inode = inode;
1116
1117                 filemap_fdatawait(mapping);
1118
1119                 cond_resched();
1120
1121                 spin_lock(&inode_lock);
1122         }
1123         spin_unlock(&inode_lock);
1124         iput(old_inode);
1125 }
1126
1127 /**
1128  * writeback_inodes_sb  -       writeback dirty inodes from given super_block
1129  * @sb: the superblock
1130  *
1131  * Start writeback on some inodes on this super_block. No guarantees are made
1132  * on how many (if any) will be written, and this function does not wait
1133  * for IO completion of submitted IO. The number of pages submitted is
1134  * returned.
1135  */
1136 void writeback_inodes_sb(struct super_block *sb)
1137 {
1138         unsigned long nr_dirty = global_page_state(NR_FILE_DIRTY);
1139         unsigned long nr_unstable = global_page_state(NR_UNSTABLE_NFS);
1140         long nr_to_write;
1141
1142         nr_to_write = nr_dirty + nr_unstable +
1143                         (inodes_stat.nr_inodes - inodes_stat.nr_unused);
1144
1145         bdi_writeback_all(sb, nr_to_write);
1146 }
1147 EXPORT_SYMBOL(writeback_inodes_sb);
1148
1149 /**
1150  * sync_inodes_sb       -       sync sb inode pages
1151  * @sb: the superblock
1152  *
1153  * This function writes and waits on any dirty inode belonging to this
1154  * super_block. The number of pages synced is returned.
1155  */
1156 void sync_inodes_sb(struct super_block *sb)
1157 {
1158         bdi_sync_writeback(sb->s_bdi, sb);
1159         wait_sb_inodes(sb);
1160 }
1161 EXPORT_SYMBOL(sync_inodes_sb);
1162
1163 /**
1164  * write_inode_now      -       write an inode to disk
1165  * @inode: inode to write to disk
1166  * @sync: whether the write should be synchronous or not
1167  *
1168  * This function commits an inode to disk immediately if it is dirty. This is
1169  * primarily needed by knfsd.
1170  *
1171  * The caller must either have a ref on the inode or must have set I_WILL_FREE.
1172  */
1173 int write_inode_now(struct inode *inode, int sync)
1174 {
1175         int ret;
1176         struct writeback_control wbc = {
1177                 .nr_to_write = LONG_MAX,
1178                 .sync_mode = sync ? WB_SYNC_ALL : WB_SYNC_NONE,
1179                 .range_start = 0,
1180                 .range_end = LLONG_MAX,
1181         };
1182
1183         if (!mapping_cap_writeback_dirty(inode->i_mapping))
1184                 wbc.nr_to_write = 0;
1185
1186         might_sleep();
1187         spin_lock(&inode_lock);
1188         ret = writeback_single_inode(inode, &wbc);
1189         spin_unlock(&inode_lock);
1190         if (sync)
1191                 inode_sync_wait(inode);
1192         return ret;
1193 }
1194 EXPORT_SYMBOL(write_inode_now);
1195
1196 /**
1197  * sync_inode - write an inode and its pages to disk.
1198  * @inode: the inode to sync
1199  * @wbc: controls the writeback mode
1200  *
1201  * sync_inode() will write an inode and its pages to disk.  It will also
1202  * correctly update the inode on its superblock's dirty inode lists and will
1203  * update inode->i_state.
1204  *
1205  * The caller must have a ref on the inode.
1206  */
1207 int sync_inode(struct inode *inode, struct writeback_control *wbc)
1208 {
1209         int ret;
1210
1211         spin_lock(&inode_lock);
1212         ret = writeback_single_inode(inode, wbc);
1213         spin_unlock(&inode_lock);
1214         return ret;
1215 }
1216 EXPORT_SYMBOL(sync_inode);