writeback: inline allocation failure handling in bdi_alloc_queue_work()
[safe/jmp/linux-2.6] / fs / fs-writeback.c
1 /*
2  * fs/fs-writeback.c
3  *
4  * Copyright (C) 2002, Linus Torvalds.
5  *
6  * Contains all the functions related to writing back and waiting
7  * upon dirty inodes against superblocks, and writing back dirty
8  * pages against inodes.  ie: data writeback.  Writeout of the
9  * inode itself is not handled here.
10  *
11  * 10Apr2002    Andrew Morton
12  *              Split out of fs/inode.c
13  *              Additions for address_space-based writeback
14  */
15
16 #include <linux/kernel.h>
17 #include <linux/module.h>
18 #include <linux/spinlock.h>
19 #include <linux/sched.h>
20 #include <linux/fs.h>
21 #include <linux/mm.h>
22 #include <linux/kthread.h>
23 #include <linux/freezer.h>
24 #include <linux/writeback.h>
25 #include <linux/blkdev.h>
26 #include <linux/backing-dev.h>
27 #include <linux/buffer_head.h>
28 #include "internal.h"
29
30 #define inode_to_bdi(inode)     ((inode)->i_mapping->backing_dev_info)
31
32 /*
33  * We don't actually have pdflush, but this one is exported though /proc...
34  */
35 int nr_pdflush_threads;
36
37 /*
38  * Passed into wb_writeback(), essentially a subset of writeback_control
39  */
40 struct wb_writeback_args {
41         long nr_pages;
42         struct super_block *sb;
43         enum writeback_sync_modes sync_mode;
44         int for_kupdate;
45         int range_cyclic;
46 };
47
48 /*
49  * Work items for the bdi_writeback threads
50  */
51 struct bdi_work {
52         struct list_head list;
53         struct rcu_head rcu_head;
54
55         unsigned long seen;
56         atomic_t pending;
57
58         struct wb_writeback_args args;
59
60         unsigned long state;
61 };
62
63 enum {
64         WS_USED_B = 0,
65         WS_ONSTACK_B,
66 };
67
68 #define WS_USED (1 << WS_USED_B)
69 #define WS_ONSTACK (1 << WS_ONSTACK_B)
70
71 static inline bool bdi_work_on_stack(struct bdi_work *work)
72 {
73         return test_bit(WS_ONSTACK_B, &work->state);
74 }
75
76 static inline void bdi_work_init(struct bdi_work *work,
77                                  struct writeback_control *wbc)
78 {
79         INIT_RCU_HEAD(&work->rcu_head);
80         work->args.sb = wbc->sb;
81         work->args.nr_pages = wbc->nr_to_write;
82         work->args.sync_mode = wbc->sync_mode;
83         work->args.range_cyclic = wbc->range_cyclic;
84         work->args.for_kupdate = 0;
85         work->state = WS_USED;
86 }
87
88 /**
89  * writeback_in_progress - determine whether there is writeback in progress
90  * @bdi: the device's backing_dev_info structure.
91  *
92  * Determine whether there is writeback waiting to be handled against a
93  * backing device.
94  */
95 int writeback_in_progress(struct backing_dev_info *bdi)
96 {
97         return !list_empty(&bdi->work_list);
98 }
99
100 static void bdi_work_clear(struct bdi_work *work)
101 {
102         clear_bit(WS_USED_B, &work->state);
103         smp_mb__after_clear_bit();
104         wake_up_bit(&work->state, WS_USED_B);
105 }
106
107 static void bdi_work_free(struct rcu_head *head)
108 {
109         struct bdi_work *work = container_of(head, struct bdi_work, rcu_head);
110
111         if (!bdi_work_on_stack(work))
112                 kfree(work);
113         else
114                 bdi_work_clear(work);
115 }
116
117 static void wb_work_complete(struct bdi_work *work)
118 {
119         const enum writeback_sync_modes sync_mode = work->args.sync_mode;
120
121         /*
122          * For allocated work, we can clear the done/seen bit right here.
123          * For on-stack work, we need to postpone both the clear and free
124          * to after the RCU grace period, since the stack could be invalidated
125          * as soon as bdi_work_clear() has done the wakeup.
126          */
127         if (!bdi_work_on_stack(work))
128                 bdi_work_clear(work);
129         if (sync_mode == WB_SYNC_NONE || bdi_work_on_stack(work))
130                 call_rcu(&work->rcu_head, bdi_work_free);
131 }
132
133 static void wb_clear_pending(struct bdi_writeback *wb, struct bdi_work *work)
134 {
135         /*
136          * The caller has retrieved the work arguments from this work,
137          * drop our reference. If this is the last ref, delete and free it
138          */
139         if (atomic_dec_and_test(&work->pending)) {
140                 struct backing_dev_info *bdi = wb->bdi;
141
142                 spin_lock(&bdi->wb_lock);
143                 list_del_rcu(&work->list);
144                 spin_unlock(&bdi->wb_lock);
145
146                 wb_work_complete(work);
147         }
148 }
149
150 static void bdi_queue_work(struct backing_dev_info *bdi, struct bdi_work *work)
151 {
152         work->seen = bdi->wb_mask;
153         BUG_ON(!work->seen);
154         atomic_set(&work->pending, bdi->wb_cnt);
155         BUG_ON(!bdi->wb_cnt);
156
157         /*
158          * Make sure stores are seen before it appears on the list
159          */
160         smp_mb();
161
162         spin_lock(&bdi->wb_lock);
163         list_add_tail_rcu(&work->list, &bdi->work_list);
164         spin_unlock(&bdi->wb_lock);
165
166         /*
167          * If the default thread isn't there, make sure we add it. When
168          * it gets created and wakes up, we'll run this work.
169          */
170         if (unlikely(list_empty_careful(&bdi->wb_list)))
171                 wake_up_process(default_backing_dev_info.wb.task);
172         else {
173                 struct bdi_writeback *wb = &bdi->wb;
174
175                 /*
176                  * End work now if this wb has no dirty IO pending. Otherwise
177                  * wakeup the handling thread
178                  */
179                 if (!wb_has_dirty_io(wb))
180                         wb_clear_pending(wb, work);
181                 else if (wb->task)
182                         wake_up_process(wb->task);
183         }
184 }
185
186 /*
187  * Used for on-stack allocated work items. The caller needs to wait until
188  * the wb threads have acked the work before it's safe to continue.
189  */
190 static void bdi_wait_on_work_clear(struct bdi_work *work)
191 {
192         wait_on_bit(&work->state, WS_USED_B, bdi_sched_wait,
193                     TASK_UNINTERRUPTIBLE);
194 }
195
196 static void bdi_alloc_queue_work(struct backing_dev_info *bdi,
197                                  struct writeback_control *wbc)
198 {
199         struct bdi_work *work;
200
201         /*
202          * This is WB_SYNC_NONE writeback, so if allocation fails just
203          * wakeup the thread for old dirty data writeback
204          */
205         work = kmalloc(sizeof(*work), GFP_ATOMIC);
206         if (work) {
207                 bdi_work_init(work, wbc);
208                 bdi_queue_work(bdi, work);
209         } else {
210                 struct bdi_writeback *wb = &bdi->wb;
211
212                 if (wb->task)
213                         wake_up_process(wb->task);
214         }
215 }
216
217 void bdi_start_writeback(struct writeback_control *wbc)
218 {
219         /*
220          * WB_SYNC_NONE is opportunistic writeback. If this allocation fails,
221          * bdi_queue_work() will wake up the thread and flush old data. This
222          * should ensure some amount of progress in freeing memory.
223          */
224         if (wbc->sync_mode != WB_SYNC_ALL)
225                 bdi_alloc_queue_work(wbc->bdi, wbc);
226         else {
227                 struct bdi_work work;
228
229                 bdi_work_init(&work, wbc);
230                 work.state |= WS_ONSTACK;
231
232                 bdi_queue_work(wbc->bdi, &work);
233                 bdi_wait_on_work_clear(&work);
234         }
235 }
236
237 /*
238  * Redirty an inode: set its when-it-was dirtied timestamp and move it to the
239  * furthest end of its superblock's dirty-inode list.
240  *
241  * Before stamping the inode's ->dirtied_when, we check to see whether it is
242  * already the most-recently-dirtied inode on the b_dirty list.  If that is
243  * the case then the inode must have been redirtied while it was being written
244  * out and we don't reset its dirtied_when.
245  */
246 static void redirty_tail(struct inode *inode)
247 {
248         struct bdi_writeback *wb = &inode_to_bdi(inode)->wb;
249
250         if (!list_empty(&wb->b_dirty)) {
251                 struct inode *tail;
252
253                 tail = list_entry(wb->b_dirty.next, struct inode, i_list);
254                 if (time_before(inode->dirtied_when, tail->dirtied_when))
255                         inode->dirtied_when = jiffies;
256         }
257         list_move(&inode->i_list, &wb->b_dirty);
258 }
259
260 /*
261  * requeue inode for re-scanning after bdi->b_io list is exhausted.
262  */
263 static void requeue_io(struct inode *inode)
264 {
265         struct bdi_writeback *wb = &inode_to_bdi(inode)->wb;
266
267         list_move(&inode->i_list, &wb->b_more_io);
268 }
269
270 static void inode_sync_complete(struct inode *inode)
271 {
272         /*
273          * Prevent speculative execution through spin_unlock(&inode_lock);
274          */
275         smp_mb();
276         wake_up_bit(&inode->i_state, __I_SYNC);
277 }
278
279 static bool inode_dirtied_after(struct inode *inode, unsigned long t)
280 {
281         bool ret = time_after(inode->dirtied_when, t);
282 #ifndef CONFIG_64BIT
283         /*
284          * For inodes being constantly redirtied, dirtied_when can get stuck.
285          * It _appears_ to be in the future, but is actually in distant past.
286          * This test is necessary to prevent such wrapped-around relative times
287          * from permanently stopping the whole pdflush writeback.
288          */
289         ret = ret && time_before_eq(inode->dirtied_when, jiffies);
290 #endif
291         return ret;
292 }
293
294 /*
295  * Move expired dirty inodes from @delaying_queue to @dispatch_queue.
296  */
297 static void move_expired_inodes(struct list_head *delaying_queue,
298                                struct list_head *dispatch_queue,
299                                 unsigned long *older_than_this)
300 {
301         while (!list_empty(delaying_queue)) {
302                 struct inode *inode = list_entry(delaying_queue->prev,
303                                                 struct inode, i_list);
304                 if (older_than_this &&
305                     inode_dirtied_after(inode, *older_than_this))
306                         break;
307                 list_move(&inode->i_list, dispatch_queue);
308         }
309 }
310
311 /*
312  * Queue all expired dirty inodes for io, eldest first.
313  */
314 static void queue_io(struct bdi_writeback *wb, unsigned long *older_than_this)
315 {
316         list_splice_init(&wb->b_more_io, wb->b_io.prev);
317         move_expired_inodes(&wb->b_dirty, &wb->b_io, older_than_this);
318 }
319
320 static int write_inode(struct inode *inode, int sync)
321 {
322         if (inode->i_sb->s_op->write_inode && !is_bad_inode(inode))
323                 return inode->i_sb->s_op->write_inode(inode, sync);
324         return 0;
325 }
326
327 /*
328  * Wait for writeback on an inode to complete.
329  */
330 static void inode_wait_for_writeback(struct inode *inode)
331 {
332         DEFINE_WAIT_BIT(wq, &inode->i_state, __I_SYNC);
333         wait_queue_head_t *wqh;
334
335         wqh = bit_waitqueue(&inode->i_state, __I_SYNC);
336         do {
337                 spin_unlock(&inode_lock);
338                 __wait_on_bit(wqh, &wq, inode_wait, TASK_UNINTERRUPTIBLE);
339                 spin_lock(&inode_lock);
340         } while (inode->i_state & I_SYNC);
341 }
342
343 /*
344  * Write out an inode's dirty pages.  Called under inode_lock.  Either the
345  * caller has ref on the inode (either via __iget or via syscall against an fd)
346  * or the inode has I_WILL_FREE set (via generic_forget_inode)
347  *
348  * If `wait' is set, wait on the writeout.
349  *
350  * The whole writeout design is quite complex and fragile.  We want to avoid
351  * starvation of particular inodes when others are being redirtied, prevent
352  * livelocks, etc.
353  *
354  * Called under inode_lock.
355  */
356 static int
357 writeback_single_inode(struct inode *inode, struct writeback_control *wbc)
358 {
359         struct address_space *mapping = inode->i_mapping;
360         int wait = wbc->sync_mode == WB_SYNC_ALL;
361         unsigned dirty;
362         int ret;
363
364         if (!atomic_read(&inode->i_count))
365                 WARN_ON(!(inode->i_state & (I_WILL_FREE|I_FREEING)));
366         else
367                 WARN_ON(inode->i_state & I_WILL_FREE);
368
369         if (inode->i_state & I_SYNC) {
370                 /*
371                  * If this inode is locked for writeback and we are not doing
372                  * writeback-for-data-integrity, move it to b_more_io so that
373                  * writeback can proceed with the other inodes on s_io.
374                  *
375                  * We'll have another go at writing back this inode when we
376                  * completed a full scan of b_io.
377                  */
378                 if (!wait) {
379                         requeue_io(inode);
380                         return 0;
381                 }
382
383                 /*
384                  * It's a data-integrity sync.  We must wait.
385                  */
386                 inode_wait_for_writeback(inode);
387         }
388
389         BUG_ON(inode->i_state & I_SYNC);
390
391         /* Set I_SYNC, reset I_DIRTY */
392         dirty = inode->i_state & I_DIRTY;
393         inode->i_state |= I_SYNC;
394         inode->i_state &= ~I_DIRTY;
395
396         spin_unlock(&inode_lock);
397
398         ret = do_writepages(mapping, wbc);
399
400         /* Don't write the inode if only I_DIRTY_PAGES was set */
401         if (dirty & (I_DIRTY_SYNC | I_DIRTY_DATASYNC)) {
402                 int err = write_inode(inode, wait);
403                 if (ret == 0)
404                         ret = err;
405         }
406
407         if (wait) {
408                 int err = filemap_fdatawait(mapping);
409                 if (ret == 0)
410                         ret = err;
411         }
412
413         spin_lock(&inode_lock);
414         inode->i_state &= ~I_SYNC;
415         if (!(inode->i_state & (I_FREEING | I_CLEAR))) {
416                 if (!(inode->i_state & I_DIRTY) &&
417                     mapping_tagged(mapping, PAGECACHE_TAG_DIRTY)) {
418                         /*
419                          * We didn't write back all the pages.  nfs_writepages()
420                          * sometimes bales out without doing anything. Redirty
421                          * the inode; Move it from b_io onto b_more_io/b_dirty.
422                          */
423                         /*
424                          * akpm: if the caller was the kupdate function we put
425                          * this inode at the head of b_dirty so it gets first
426                          * consideration.  Otherwise, move it to the tail, for
427                          * the reasons described there.  I'm not really sure
428                          * how much sense this makes.  Presumably I had a good
429                          * reasons for doing it this way, and I'd rather not
430                          * muck with it at present.
431                          */
432                         if (wbc->for_kupdate) {
433                                 /*
434                                  * For the kupdate function we move the inode
435                                  * to b_more_io so it will get more writeout as
436                                  * soon as the queue becomes uncongested.
437                                  */
438                                 inode->i_state |= I_DIRTY_PAGES;
439                                 if (wbc->nr_to_write <= 0) {
440                                         /*
441                                          * slice used up: queue for next turn
442                                          */
443                                         requeue_io(inode);
444                                 } else {
445                                         /*
446                                          * somehow blocked: retry later
447                                          */
448                                         redirty_tail(inode);
449                                 }
450                         } else {
451                                 /*
452                                  * Otherwise fully redirty the inode so that
453                                  * other inodes on this superblock will get some
454                                  * writeout.  Otherwise heavy writing to one
455                                  * file would indefinitely suspend writeout of
456                                  * all the other files.
457                                  */
458                                 inode->i_state |= I_DIRTY_PAGES;
459                                 redirty_tail(inode);
460                         }
461                 } else if (inode->i_state & I_DIRTY) {
462                         /*
463                          * Someone redirtied the inode while were writing back
464                          * the pages.
465                          */
466                         redirty_tail(inode);
467                 } else if (atomic_read(&inode->i_count)) {
468                         /*
469                          * The inode is clean, inuse
470                          */
471                         list_move(&inode->i_list, &inode_in_use);
472                 } else {
473                         /*
474                          * The inode is clean, unused
475                          */
476                         list_move(&inode->i_list, &inode_unused);
477                 }
478         }
479         inode_sync_complete(inode);
480         return ret;
481 }
482
483 /*
484  * For WB_SYNC_NONE writeback, the caller does not have the sb pinned
485  * before calling writeback. So make sure that we do pin it, so it doesn't
486  * go away while we are writing inodes from it.
487  *
488  * Returns 0 if the super was successfully pinned (or pinning wasn't needed),
489  * 1 if we failed.
490  */
491 static int pin_sb_for_writeback(struct writeback_control *wbc,
492                                    struct inode *inode)
493 {
494         struct super_block *sb = inode->i_sb;
495
496         /*
497          * Caller must already hold the ref for this
498          */
499         if (wbc->sync_mode == WB_SYNC_ALL) {
500                 WARN_ON(!rwsem_is_locked(&sb->s_umount));
501                 return 0;
502         }
503
504         spin_lock(&sb_lock);
505         sb->s_count++;
506         if (down_read_trylock(&sb->s_umount)) {
507                 if (sb->s_root) {
508                         spin_unlock(&sb_lock);
509                         return 0;
510                 }
511                 /*
512                  * umounted, drop rwsem again and fall through to failure
513                  */
514                 up_read(&sb->s_umount);
515         }
516
517         sb->s_count--;
518         spin_unlock(&sb_lock);
519         return 1;
520 }
521
522 static void unpin_sb_for_writeback(struct writeback_control *wbc,
523                                    struct inode *inode)
524 {
525         struct super_block *sb = inode->i_sb;
526
527         if (wbc->sync_mode == WB_SYNC_ALL)
528                 return;
529
530         up_read(&sb->s_umount);
531         put_super(sb);
532 }
533
534 static void writeback_inodes_wb(struct bdi_writeback *wb,
535                                 struct writeback_control *wbc)
536 {
537         struct super_block *sb = wbc->sb;
538         const int is_blkdev_sb = sb_is_blkdev_sb(sb);
539         const unsigned long start = jiffies;    /* livelock avoidance */
540
541         spin_lock(&inode_lock);
542
543         if (!wbc->for_kupdate || list_empty(&wb->b_io))
544                 queue_io(wb, wbc->older_than_this);
545
546         while (!list_empty(&wb->b_io)) {
547                 struct inode *inode = list_entry(wb->b_io.prev,
548                                                 struct inode, i_list);
549                 long pages_skipped;
550
551                 /*
552                  * super block given and doesn't match, skip this inode
553                  */
554                 if (sb && sb != inode->i_sb) {
555                         redirty_tail(inode);
556                         continue;
557                 }
558
559                 if (!bdi_cap_writeback_dirty(wb->bdi)) {
560                         redirty_tail(inode);
561                         if (is_blkdev_sb) {
562                                 /*
563                                  * Dirty memory-backed blockdev: the ramdisk
564                                  * driver does this.  Skip just this inode
565                                  */
566                                 continue;
567                         }
568                         /*
569                          * Dirty memory-backed inode against a filesystem other
570                          * than the kernel-internal bdev filesystem.  Skip the
571                          * entire superblock.
572                          */
573                         break;
574                 }
575
576                 if (inode->i_state & (I_NEW | I_WILL_FREE)) {
577                         requeue_io(inode);
578                         continue;
579                 }
580
581                 if (wbc->nonblocking && bdi_write_congested(wb->bdi)) {
582                         wbc->encountered_congestion = 1;
583                         if (!is_blkdev_sb)
584                                 break;          /* Skip a congested fs */
585                         requeue_io(inode);
586                         continue;               /* Skip a congested blockdev */
587                 }
588
589                 /*
590                  * Was this inode dirtied after sync_sb_inodes was called?
591                  * This keeps sync from extra jobs and livelock.
592                  */
593                 if (inode_dirtied_after(inode, start))
594                         break;
595
596                 if (pin_sb_for_writeback(wbc, inode)) {
597                         requeue_io(inode);
598                         continue;
599                 }
600
601                 BUG_ON(inode->i_state & (I_FREEING | I_CLEAR));
602                 __iget(inode);
603                 pages_skipped = wbc->pages_skipped;
604                 writeback_single_inode(inode, wbc);
605                 unpin_sb_for_writeback(wbc, inode);
606                 if (wbc->pages_skipped != pages_skipped) {
607                         /*
608                          * writeback is not making progress due to locked
609                          * buffers.  Skip this inode for now.
610                          */
611                         redirty_tail(inode);
612                 }
613                 spin_unlock(&inode_lock);
614                 iput(inode);
615                 cond_resched();
616                 spin_lock(&inode_lock);
617                 if (wbc->nr_to_write <= 0) {
618                         wbc->more_io = 1;
619                         break;
620                 }
621                 if (!list_empty(&wb->b_more_io))
622                         wbc->more_io = 1;
623         }
624
625         spin_unlock(&inode_lock);
626         /* Leave any unwritten inodes on b_io */
627 }
628
629 void writeback_inodes_wbc(struct writeback_control *wbc)
630 {
631         struct backing_dev_info *bdi = wbc->bdi;
632
633         writeback_inodes_wb(&bdi->wb, wbc);
634 }
635
636 /*
637  * The maximum number of pages to writeout in a single bdi flush/kupdate
638  * operation.  We do this so we don't hold I_SYNC against an inode for
639  * enormous amounts of time, which would block a userspace task which has
640  * been forced to throttle against that inode.  Also, the code reevaluates
641  * the dirty each time it has written this many pages.
642  */
643 #define MAX_WRITEBACK_PAGES     1024
644
645 static inline bool over_bground_thresh(void)
646 {
647         unsigned long background_thresh, dirty_thresh;
648
649         get_dirty_limits(&background_thresh, &dirty_thresh, NULL, NULL);
650
651         return (global_page_state(NR_FILE_DIRTY) +
652                 global_page_state(NR_UNSTABLE_NFS) >= background_thresh);
653 }
654
655 /*
656  * Explicit flushing or periodic writeback of "old" data.
657  *
658  * Define "old": the first time one of an inode's pages is dirtied, we mark the
659  * dirtying-time in the inode's address_space.  So this periodic writeback code
660  * just walks the superblock inode list, writing back any inodes which are
661  * older than a specific point in time.
662  *
663  * Try to run once per dirty_writeback_interval.  But if a writeback event
664  * takes longer than a dirty_writeback_interval interval, then leave a
665  * one-second gap.
666  *
667  * older_than_this takes precedence over nr_to_write.  So we'll only write back
668  * all dirty pages if they are all attached to "old" mappings.
669  */
670 static long wb_writeback(struct bdi_writeback *wb,
671                          struct wb_writeback_args *args)
672 {
673         struct writeback_control wbc = {
674                 .bdi                    = wb->bdi,
675                 .sb                     = args->sb,
676                 .sync_mode              = args->sync_mode,
677                 .older_than_this        = NULL,
678                 .for_kupdate            = args->for_kupdate,
679                 .range_cyclic           = args->range_cyclic,
680         };
681         unsigned long oldest_jif;
682         long wrote = 0;
683
684         if (wbc.for_kupdate) {
685                 wbc.older_than_this = &oldest_jif;
686                 oldest_jif = jiffies -
687                                 msecs_to_jiffies(dirty_expire_interval * 10);
688         }
689         if (!wbc.range_cyclic) {
690                 wbc.range_start = 0;
691                 wbc.range_end = LLONG_MAX;
692         }
693
694         for (;;) {
695                 /*
696                  * Don't flush anything for non-integrity writeback where
697                  * no nr_pages was given
698                  */
699                 if (!args->for_kupdate && args->nr_pages <= 0 &&
700                      args->sync_mode == WB_SYNC_NONE)
701                         break;
702
703                 /*
704                  * If no specific pages were given and this is just a
705                  * periodic background writeout and we are below the
706                  * background dirty threshold, don't do anything
707                  */
708                 if (args->for_kupdate && args->nr_pages <= 0 &&
709                     !over_bground_thresh())
710                         break;
711
712                 wbc.more_io = 0;
713                 wbc.encountered_congestion = 0;
714                 wbc.nr_to_write = MAX_WRITEBACK_PAGES;
715                 wbc.pages_skipped = 0;
716                 writeback_inodes_wb(wb, &wbc);
717                 args->nr_pages -= MAX_WRITEBACK_PAGES - wbc.nr_to_write;
718                 wrote += MAX_WRITEBACK_PAGES - wbc.nr_to_write;
719
720                 /*
721                  * If we ran out of stuff to write, bail unless more_io got set
722                  */
723                 if (wbc.nr_to_write > 0 || wbc.pages_skipped > 0) {
724                         if (wbc.more_io && !wbc.for_kupdate)
725                                 continue;
726                         break;
727                 }
728         }
729
730         return wrote;
731 }
732
733 /*
734  * Return the next bdi_work struct that hasn't been processed by this
735  * wb thread yet
736  */
737 static struct bdi_work *get_next_work_item(struct backing_dev_info *bdi,
738                                            struct bdi_writeback *wb)
739 {
740         struct bdi_work *work, *ret = NULL;
741
742         rcu_read_lock();
743
744         list_for_each_entry_rcu(work, &bdi->work_list, list) {
745                 if (!test_and_clear_bit(wb->nr, &work->seen))
746                         continue;
747
748                 ret = work;
749                 break;
750         }
751
752         rcu_read_unlock();
753         return ret;
754 }
755
756 static long wb_check_old_data_flush(struct bdi_writeback *wb)
757 {
758         unsigned long expired;
759         long nr_pages;
760
761         expired = wb->last_old_flush +
762                         msecs_to_jiffies(dirty_writeback_interval * 10);
763         if (time_before(jiffies, expired))
764                 return 0;
765
766         wb->last_old_flush = jiffies;
767         nr_pages = global_page_state(NR_FILE_DIRTY) +
768                         global_page_state(NR_UNSTABLE_NFS) +
769                         (inodes_stat.nr_inodes - inodes_stat.nr_unused);
770
771         if (nr_pages) {
772                 struct wb_writeback_args args = {
773                         .nr_pages       = nr_pages,
774                         .sync_mode      = WB_SYNC_NONE,
775                         .for_kupdate    = 1,
776                         .range_cyclic   = 1,
777                 };
778
779                 return wb_writeback(wb, &args);
780         }
781
782         return 0;
783 }
784
785 /*
786  * Retrieve work items and do the writeback they describe
787  */
788 long wb_do_writeback(struct bdi_writeback *wb, int force_wait)
789 {
790         struct backing_dev_info *bdi = wb->bdi;
791         struct bdi_work *work;
792         long wrote = 0;
793
794         while ((work = get_next_work_item(bdi, wb)) != NULL) {
795                 struct wb_writeback_args args = work->args;
796
797                 /*
798                  * Override sync mode, in case we must wait for completion
799                  */
800                 if (force_wait)
801                         work->args.sync_mode = args.sync_mode = WB_SYNC_ALL;
802
803                 /*
804                  * If this isn't a data integrity operation, just notify
805                  * that we have seen this work and we are now starting it.
806                  */
807                 if (args.sync_mode == WB_SYNC_NONE)
808                         wb_clear_pending(wb, work);
809
810                 wrote += wb_writeback(wb, &args);
811
812                 /*
813                  * This is a data integrity writeback, so only do the
814                  * notification when we have completed the work.
815                  */
816                 if (args.sync_mode == WB_SYNC_ALL)
817                         wb_clear_pending(wb, work);
818         }
819
820         /*
821          * Check for periodic writeback, kupdated() style
822          */
823         wrote += wb_check_old_data_flush(wb);
824
825         return wrote;
826 }
827
828 /*
829  * Handle writeback of dirty data for the device backed by this bdi. Also
830  * wakes up periodically and does kupdated style flushing.
831  */
832 int bdi_writeback_task(struct bdi_writeback *wb)
833 {
834         unsigned long last_active = jiffies;
835         unsigned long wait_jiffies = -1UL;
836         long pages_written;
837
838         while (!kthread_should_stop()) {
839                 pages_written = wb_do_writeback(wb, 0);
840
841                 if (pages_written)
842                         last_active = jiffies;
843                 else if (wait_jiffies != -1UL) {
844                         unsigned long max_idle;
845
846                         /*
847                          * Longest period of inactivity that we tolerate. If we
848                          * see dirty data again later, the task will get
849                          * recreated automatically.
850                          */
851                         max_idle = max(5UL * 60 * HZ, wait_jiffies);
852                         if (time_after(jiffies, max_idle + last_active))
853                                 break;
854                 }
855
856                 wait_jiffies = msecs_to_jiffies(dirty_writeback_interval * 10);
857                 set_current_state(TASK_INTERRUPTIBLE);
858                 schedule_timeout(wait_jiffies);
859                 try_to_freeze();
860         }
861
862         return 0;
863 }
864
865 /*
866  * Schedule writeback for all backing devices. Can only be used for
867  * WB_SYNC_NONE writeback, WB_SYNC_ALL should use bdi_start_writeback()
868  * and pass in the superblock.
869  */
870 static void bdi_writeback_all(struct writeback_control *wbc)
871 {
872         struct backing_dev_info *bdi;
873
874         WARN_ON(wbc->sync_mode == WB_SYNC_ALL);
875
876         rcu_read_lock();
877
878         list_for_each_entry_rcu(bdi, &bdi_list, bdi_list) {
879                 if (!bdi_has_dirty_io(bdi))
880                         continue;
881
882                 bdi_alloc_queue_work(bdi, wbc);
883         }
884
885         rcu_read_unlock();
886 }
887
888 /*
889  * Start writeback of `nr_pages' pages.  If `nr_pages' is zero, write back
890  * the whole world.
891  */
892 void wakeup_flusher_threads(long nr_pages)
893 {
894         struct writeback_control wbc = {
895                 .sync_mode      = WB_SYNC_NONE,
896                 .older_than_this = NULL,
897                 .range_cyclic   = 1,
898         };
899
900         if (nr_pages == 0)
901                 nr_pages = global_page_state(NR_FILE_DIRTY) +
902                                 global_page_state(NR_UNSTABLE_NFS);
903         wbc.nr_to_write = nr_pages;
904         bdi_writeback_all(&wbc);
905 }
906
907 static noinline void block_dump___mark_inode_dirty(struct inode *inode)
908 {
909         if (inode->i_ino || strcmp(inode->i_sb->s_id, "bdev")) {
910                 struct dentry *dentry;
911                 const char *name = "?";
912
913                 dentry = d_find_alias(inode);
914                 if (dentry) {
915                         spin_lock(&dentry->d_lock);
916                         name = (const char *) dentry->d_name.name;
917                 }
918                 printk(KERN_DEBUG
919                        "%s(%d): dirtied inode %lu (%s) on %s\n",
920                        current->comm, task_pid_nr(current), inode->i_ino,
921                        name, inode->i_sb->s_id);
922                 if (dentry) {
923                         spin_unlock(&dentry->d_lock);
924                         dput(dentry);
925                 }
926         }
927 }
928
929 /**
930  *      __mark_inode_dirty -    internal function
931  *      @inode: inode to mark
932  *      @flags: what kind of dirty (i.e. I_DIRTY_SYNC)
933  *      Mark an inode as dirty. Callers should use mark_inode_dirty or
934  *      mark_inode_dirty_sync.
935  *
936  * Put the inode on the super block's dirty list.
937  *
938  * CAREFUL! We mark it dirty unconditionally, but move it onto the
939  * dirty list only if it is hashed or if it refers to a blockdev.
940  * If it was not hashed, it will never be added to the dirty list
941  * even if it is later hashed, as it will have been marked dirty already.
942  *
943  * In short, make sure you hash any inodes _before_ you start marking
944  * them dirty.
945  *
946  * This function *must* be atomic for the I_DIRTY_PAGES case -
947  * set_page_dirty() is called under spinlock in several places.
948  *
949  * Note that for blockdevs, inode->dirtied_when represents the dirtying time of
950  * the block-special inode (/dev/hda1) itself.  And the ->dirtied_when field of
951  * the kernel-internal blockdev inode represents the dirtying time of the
952  * blockdev's pages.  This is why for I_DIRTY_PAGES we always use
953  * page->mapping->host, so the page-dirtying time is recorded in the internal
954  * blockdev inode.
955  */
956 void __mark_inode_dirty(struct inode *inode, int flags)
957 {
958         struct super_block *sb = inode->i_sb;
959
960         /*
961          * Don't do this for I_DIRTY_PAGES - that doesn't actually
962          * dirty the inode itself
963          */
964         if (flags & (I_DIRTY_SYNC | I_DIRTY_DATASYNC)) {
965                 if (sb->s_op->dirty_inode)
966                         sb->s_op->dirty_inode(inode);
967         }
968
969         /*
970          * make sure that changes are seen by all cpus before we test i_state
971          * -- mikulas
972          */
973         smp_mb();
974
975         /* avoid the locking if we can */
976         if ((inode->i_state & flags) == flags)
977                 return;
978
979         if (unlikely(block_dump))
980                 block_dump___mark_inode_dirty(inode);
981
982         spin_lock(&inode_lock);
983         if ((inode->i_state & flags) != flags) {
984                 const int was_dirty = inode->i_state & I_DIRTY;
985
986                 inode->i_state |= flags;
987
988                 /*
989                  * If the inode is being synced, just update its dirty state.
990                  * The unlocker will place the inode on the appropriate
991                  * superblock list, based upon its state.
992                  */
993                 if (inode->i_state & I_SYNC)
994                         goto out;
995
996                 /*
997                  * Only add valid (hashed) inodes to the superblock's
998                  * dirty list.  Add blockdev inodes as well.
999                  */
1000                 if (!S_ISBLK(inode->i_mode)) {
1001                         if (hlist_unhashed(&inode->i_hash))
1002                                 goto out;
1003                 }
1004                 if (inode->i_state & (I_FREEING|I_CLEAR))
1005                         goto out;
1006
1007                 /*
1008                  * If the inode was already on b_dirty/b_io/b_more_io, don't
1009                  * reposition it (that would break b_dirty time-ordering).
1010                  */
1011                 if (!was_dirty) {
1012                         struct bdi_writeback *wb = &inode_to_bdi(inode)->wb;
1013                         struct backing_dev_info *bdi = wb->bdi;
1014
1015                         if (bdi_cap_writeback_dirty(bdi) &&
1016                             !test_bit(BDI_registered, &bdi->state)) {
1017                                 WARN_ON(1);
1018                                 printk(KERN_ERR "bdi-%s not registered\n",
1019                                                                 bdi->name);
1020                         }
1021
1022                         inode->dirtied_when = jiffies;
1023                         list_move(&inode->i_list, &wb->b_dirty);
1024                 }
1025         }
1026 out:
1027         spin_unlock(&inode_lock);
1028 }
1029 EXPORT_SYMBOL(__mark_inode_dirty);
1030
1031 /*
1032  * Write out a superblock's list of dirty inodes.  A wait will be performed
1033  * upon no inodes, all inodes or the final one, depending upon sync_mode.
1034  *
1035  * If older_than_this is non-NULL, then only write out inodes which
1036  * had their first dirtying at a time earlier than *older_than_this.
1037  *
1038  * If we're a pdlfush thread, then implement pdflush collision avoidance
1039  * against the entire list.
1040  *
1041  * If `bdi' is non-zero then we're being asked to writeback a specific queue.
1042  * This function assumes that the blockdev superblock's inodes are backed by
1043  * a variety of queues, so all inodes are searched.  For other superblocks,
1044  * assume that all inodes are backed by the same queue.
1045  *
1046  * The inodes to be written are parked on bdi->b_io.  They are moved back onto
1047  * bdi->b_dirty as they are selected for writing.  This way, none can be missed
1048  * on the writer throttling path, and we get decent balancing between many
1049  * throttled threads: we don't want them all piling up on inode_sync_wait.
1050  */
1051 static void wait_sb_inodes(struct writeback_control *wbc)
1052 {
1053         struct inode *inode, *old_inode = NULL;
1054
1055         /*
1056          * We need to be protected against the filesystem going from
1057          * r/o to r/w or vice versa.
1058          */
1059         WARN_ON(!rwsem_is_locked(&wbc->sb->s_umount));
1060
1061         spin_lock(&inode_lock);
1062
1063         /*
1064          * Data integrity sync. Must wait for all pages under writeback,
1065          * because there may have been pages dirtied before our sync
1066          * call, but which had writeout started before we write it out.
1067          * In which case, the inode may not be on the dirty list, but
1068          * we still have to wait for that writeout.
1069          */
1070         list_for_each_entry(inode, &wbc->sb->s_inodes, i_sb_list) {
1071                 struct address_space *mapping;
1072
1073                 if (inode->i_state & (I_FREEING|I_CLEAR|I_WILL_FREE|I_NEW))
1074                         continue;
1075                 mapping = inode->i_mapping;
1076                 if (mapping->nrpages == 0)
1077                         continue;
1078                 __iget(inode);
1079                 spin_unlock(&inode_lock);
1080                 /*
1081                  * We hold a reference to 'inode' so it couldn't have
1082                  * been removed from s_inodes list while we dropped the
1083                  * inode_lock.  We cannot iput the inode now as we can
1084                  * be holding the last reference and we cannot iput it
1085                  * under inode_lock. So we keep the reference and iput
1086                  * it later.
1087                  */
1088                 iput(old_inode);
1089                 old_inode = inode;
1090
1091                 filemap_fdatawait(mapping);
1092
1093                 cond_resched();
1094
1095                 spin_lock(&inode_lock);
1096         }
1097         spin_unlock(&inode_lock);
1098         iput(old_inode);
1099 }
1100
1101 /**
1102  * writeback_inodes_sb  -       writeback dirty inodes from given super_block
1103  * @sb: the superblock
1104  *
1105  * Start writeback on some inodes on this super_block. No guarantees are made
1106  * on how many (if any) will be written, and this function does not wait
1107  * for IO completion of submitted IO. The number of pages submitted is
1108  * returned.
1109  */
1110 long writeback_inodes_sb(struct super_block *sb)
1111 {
1112         struct writeback_control wbc = {
1113                 .sb             = sb,
1114                 .sync_mode      = WB_SYNC_NONE,
1115                 .range_start    = 0,
1116                 .range_end      = LLONG_MAX,
1117         };
1118         unsigned long nr_dirty = global_page_state(NR_FILE_DIRTY);
1119         unsigned long nr_unstable = global_page_state(NR_UNSTABLE_NFS);
1120         long nr_to_write;
1121
1122         nr_to_write = nr_dirty + nr_unstable +
1123                         (inodes_stat.nr_inodes - inodes_stat.nr_unused);
1124
1125         wbc.nr_to_write = nr_to_write;
1126         bdi_writeback_all(&wbc);
1127         return nr_to_write - wbc.nr_to_write;
1128 }
1129 EXPORT_SYMBOL(writeback_inodes_sb);
1130
1131 /**
1132  * sync_inodes_sb       -       sync sb inode pages
1133  * @sb: the superblock
1134  *
1135  * This function writes and waits on any dirty inode belonging to this
1136  * super_block. The number of pages synced is returned.
1137  */
1138 long sync_inodes_sb(struct super_block *sb)
1139 {
1140         struct writeback_control wbc = {
1141                 .sb             = sb,
1142                 .bdi            = sb->s_bdi,
1143                 .sync_mode      = WB_SYNC_ALL,
1144                 .range_start    = 0,
1145                 .range_end      = LLONG_MAX,
1146         };
1147         long nr_to_write = LONG_MAX; /* doesn't actually matter */
1148
1149         wbc.nr_to_write = nr_to_write;
1150         bdi_start_writeback(&wbc);
1151         wait_sb_inodes(&wbc);
1152         return nr_to_write - wbc.nr_to_write;
1153 }
1154 EXPORT_SYMBOL(sync_inodes_sb);
1155
1156 /**
1157  * write_inode_now      -       write an inode to disk
1158  * @inode: inode to write to disk
1159  * @sync: whether the write should be synchronous or not
1160  *
1161  * This function commits an inode to disk immediately if it is dirty. This is
1162  * primarily needed by knfsd.
1163  *
1164  * The caller must either have a ref on the inode or must have set I_WILL_FREE.
1165  */
1166 int write_inode_now(struct inode *inode, int sync)
1167 {
1168         int ret;
1169         struct writeback_control wbc = {
1170                 .nr_to_write = LONG_MAX,
1171                 .sync_mode = sync ? WB_SYNC_ALL : WB_SYNC_NONE,
1172                 .range_start = 0,
1173                 .range_end = LLONG_MAX,
1174         };
1175
1176         if (!mapping_cap_writeback_dirty(inode->i_mapping))
1177                 wbc.nr_to_write = 0;
1178
1179         might_sleep();
1180         spin_lock(&inode_lock);
1181         ret = writeback_single_inode(inode, &wbc);
1182         spin_unlock(&inode_lock);
1183         if (sync)
1184                 inode_sync_wait(inode);
1185         return ret;
1186 }
1187 EXPORT_SYMBOL(write_inode_now);
1188
1189 /**
1190  * sync_inode - write an inode and its pages to disk.
1191  * @inode: the inode to sync
1192  * @wbc: controls the writeback mode
1193  *
1194  * sync_inode() will write an inode and its pages to disk.  It will also
1195  * correctly update the inode on its superblock's dirty inode lists and will
1196  * update inode->i_state.
1197  *
1198  * The caller must have a ref on the inode.
1199  */
1200 int sync_inode(struct inode *inode, struct writeback_control *wbc)
1201 {
1202         int ret;
1203
1204         spin_lock(&inode_lock);
1205         ret = writeback_single_inode(inode, wbc);
1206         spin_unlock(&inode_lock);
1207         return ret;
1208 }
1209 EXPORT_SYMBOL(sync_inode);