mm: speed up writeback ramp-up on clean systems
[safe/jmp/linux-2.6] / mm / page-writeback.c
1 /*
2  * mm/page-writeback.c
3  *
4  * Copyright (C) 2002, Linus Torvalds.
5  * Copyright (C) 2007 Red Hat, Inc., Peter Zijlstra <pzijlstr@redhat.com>
6  *
7  * Contains functions related to writing back dirty pages at the
8  * address_space level.
9  *
10  * 10Apr2002    akpm@zip.com.au
11  *              Initial version
12  */
13
14 #include <linux/kernel.h>
15 #include <linux/module.h>
16 #include <linux/spinlock.h>
17 #include <linux/fs.h>
18 #include <linux/mm.h>
19 #include <linux/swap.h>
20 #include <linux/slab.h>
21 #include <linux/pagemap.h>
22 #include <linux/writeback.h>
23 #include <linux/init.h>
24 #include <linux/backing-dev.h>
25 #include <linux/task_io_accounting_ops.h>
26 #include <linux/blkdev.h>
27 #include <linux/mpage.h>
28 #include <linux/rmap.h>
29 #include <linux/percpu.h>
30 #include <linux/notifier.h>
31 #include <linux/smp.h>
32 #include <linux/sysctl.h>
33 #include <linux/cpu.h>
34 #include <linux/syscalls.h>
35 #include <linux/buffer_head.h>
36 #include <linux/pagevec.h>
37
38 /*
39  * The maximum number of pages to writeout in a single bdflush/kupdate
40  * operation.  We do this so we don't hold I_SYNC against an inode for
41  * enormous amounts of time, which would block a userspace task which has
42  * been forced to throttle against that inode.  Also, the code reevaluates
43  * the dirty each time it has written this many pages.
44  */
45 #define MAX_WRITEBACK_PAGES     1024
46
47 /*
48  * After a CPU has dirtied this many pages, balance_dirty_pages_ratelimited
49  * will look to see if it needs to force writeback or throttling.
50  */
51 static long ratelimit_pages = 32;
52
53 /*
54  * When balance_dirty_pages decides that the caller needs to perform some
55  * non-background writeback, this is how many pages it will attempt to write.
56  * It should be somewhat larger than RATELIMIT_PAGES to ensure that reasonably
57  * large amounts of I/O are submitted.
58  */
59 static inline long sync_writeback_pages(void)
60 {
61         return ratelimit_pages + ratelimit_pages / 2;
62 }
63
64 /* The following parameters are exported via /proc/sys/vm */
65
66 /*
67  * Start background writeback (via pdflush) at this percentage
68  */
69 int dirty_background_ratio = 5;
70
71 /*
72  * The generator of dirty data starts writeback at this percentage
73  */
74 int vm_dirty_ratio = 10;
75
76 /*
77  * The interval between `kupdate'-style writebacks, in jiffies
78  */
79 int dirty_writeback_interval = 5 * HZ;
80
81 /*
82  * The longest number of jiffies for which data is allowed to remain dirty
83  */
84 int dirty_expire_interval = 30 * HZ;
85
86 /*
87  * Flag that makes the machine dump writes/reads and block dirtyings.
88  */
89 int block_dump;
90
91 /*
92  * Flag that puts the machine in "laptop mode". Doubles as a timeout in jiffies:
93  * a full sync is triggered after this time elapses without any disk activity.
94  */
95 int laptop_mode;
96
97 EXPORT_SYMBOL(laptop_mode);
98
99 /* End of sysctl-exported parameters */
100
101
102 static void background_writeout(unsigned long _min_pages);
103
104 /*
105  * Scale the writeback cache size proportional to the relative writeout speeds.
106  *
107  * We do this by keeping a floating proportion between BDIs, based on page
108  * writeback completions [end_page_writeback()]. Those devices that write out
109  * pages fastest will get the larger share, while the slower will get a smaller
110  * share.
111  *
112  * We use page writeout completions because we are interested in getting rid of
113  * dirty pages. Having them written out is the primary goal.
114  *
115  * We introduce a concept of time, a period over which we measure these events,
116  * because demand can/will vary over time. The length of this period itself is
117  * measured in page writeback completions.
118  *
119  */
120 static struct prop_descriptor vm_completions;
121 static struct prop_descriptor vm_dirties;
122
123 static unsigned long determine_dirtyable_memory(void);
124
125 /*
126  * couple the period to the dirty_ratio:
127  *
128  *   period/2 ~ roundup_pow_of_two(dirty limit)
129  */
130 static int calc_period_shift(void)
131 {
132         unsigned long dirty_total;
133
134         dirty_total = (vm_dirty_ratio * determine_dirtyable_memory()) / 100;
135         return 2 + ilog2(dirty_total - 1);
136 }
137
138 /*
139  * update the period when the dirty ratio changes.
140  */
141 int dirty_ratio_handler(struct ctl_table *table, int write,
142                 struct file *filp, void __user *buffer, size_t *lenp,
143                 loff_t *ppos)
144 {
145         int old_ratio = vm_dirty_ratio;
146         int ret = proc_dointvec_minmax(table, write, filp, buffer, lenp, ppos);
147         if (ret == 0 && write && vm_dirty_ratio != old_ratio) {
148                 int shift = calc_period_shift();
149                 prop_change_shift(&vm_completions, shift);
150                 prop_change_shift(&vm_dirties, shift);
151         }
152         return ret;
153 }
154
155 /*
156  * Increment the BDI's writeout completion count and the global writeout
157  * completion count. Called from test_clear_page_writeback().
158  */
159 static inline void __bdi_writeout_inc(struct backing_dev_info *bdi)
160 {
161         __prop_inc_percpu(&vm_completions, &bdi->completions);
162 }
163
164 static inline void task_dirty_inc(struct task_struct *tsk)
165 {
166         prop_inc_single(&vm_dirties, &tsk->dirties);
167 }
168
169 /*
170  * Obtain an accurate fraction of the BDI's portion.
171  */
172 static void bdi_writeout_fraction(struct backing_dev_info *bdi,
173                 long *numerator, long *denominator)
174 {
175         if (bdi_cap_writeback_dirty(bdi)) {
176                 prop_fraction_percpu(&vm_completions, &bdi->completions,
177                                 numerator, denominator);
178         } else {
179                 *numerator = 0;
180                 *denominator = 1;
181         }
182 }
183
184 /*
185  * Clip the earned share of dirty pages to that which is actually available.
186  * This avoids exceeding the total dirty_limit when the floating averages
187  * fluctuate too quickly.
188  */
189 static void
190 clip_bdi_dirty_limit(struct backing_dev_info *bdi, long dirty, long *pbdi_dirty)
191 {
192         long avail_dirty;
193
194         avail_dirty = dirty -
195                 (global_page_state(NR_FILE_DIRTY) +
196                  global_page_state(NR_WRITEBACK) +
197                  global_page_state(NR_UNSTABLE_NFS));
198
199         if (avail_dirty < 0)
200                 avail_dirty = 0;
201
202         avail_dirty += bdi_stat(bdi, BDI_RECLAIMABLE) +
203                 bdi_stat(bdi, BDI_WRITEBACK);
204
205         *pbdi_dirty = min(*pbdi_dirty, avail_dirty);
206 }
207
208 static inline void task_dirties_fraction(struct task_struct *tsk,
209                 long *numerator, long *denominator)
210 {
211         prop_fraction_single(&vm_dirties, &tsk->dirties,
212                                 numerator, denominator);
213 }
214
215 /*
216  * scale the dirty limit
217  *
218  * task specific dirty limit:
219  *
220  *   dirty -= (dirty/8) * p_{t}
221  */
222 void task_dirty_limit(struct task_struct *tsk, long *pdirty)
223 {
224         long numerator, denominator;
225         long dirty = *pdirty;
226         u64 inv = dirty >> 3;
227
228         task_dirties_fraction(tsk, &numerator, &denominator);
229         inv *= numerator;
230         do_div(inv, denominator);
231
232         dirty -= inv;
233         if (dirty < *pdirty/2)
234                 dirty = *pdirty/2;
235
236         *pdirty = dirty;
237 }
238
239 /*
240  * Work out the current dirty-memory clamping and background writeout
241  * thresholds.
242  *
243  * The main aim here is to lower them aggressively if there is a lot of mapped
244  * memory around.  To avoid stressing page reclaim with lots of unreclaimable
245  * pages.  It is better to clamp down on writers than to start swapping, and
246  * performing lots of scanning.
247  *
248  * We only allow 1/2 of the currently-unmapped memory to be dirtied.
249  *
250  * We don't permit the clamping level to fall below 5% - that is getting rather
251  * excessive.
252  *
253  * We make sure that the background writeout level is below the adjusted
254  * clamping level.
255  */
256
257 static unsigned long highmem_dirtyable_memory(unsigned long total)
258 {
259 #ifdef CONFIG_HIGHMEM
260         int node;
261         unsigned long x = 0;
262
263         for_each_node_state(node, N_HIGH_MEMORY) {
264                 struct zone *z =
265                         &NODE_DATA(node)->node_zones[ZONE_HIGHMEM];
266
267                 x += zone_page_state(z, NR_FREE_PAGES)
268                         + zone_page_state(z, NR_INACTIVE)
269                         + zone_page_state(z, NR_ACTIVE);
270         }
271         /*
272          * Make sure that the number of highmem pages is never larger
273          * than the number of the total dirtyable memory. This can only
274          * occur in very strange VM situations but we want to make sure
275          * that this does not occur.
276          */
277         return min(x, total);
278 #else
279         return 0;
280 #endif
281 }
282
283 static unsigned long determine_dirtyable_memory(void)
284 {
285         unsigned long x;
286
287         x = global_page_state(NR_FREE_PAGES)
288                 + global_page_state(NR_INACTIVE)
289                 + global_page_state(NR_ACTIVE);
290         x -= highmem_dirtyable_memory(x);
291         return x + 1;   /* Ensure that we never return 0 */
292 }
293
294 static void
295 get_dirty_limits(long *pbackground, long *pdirty, long *pbdi_dirty,
296                  struct backing_dev_info *bdi)
297 {
298         int background_ratio;           /* Percentages */
299         int dirty_ratio;
300         int unmapped_ratio;
301         long background;
302         long dirty;
303         unsigned long available_memory = determine_dirtyable_memory();
304         struct task_struct *tsk;
305
306         unmapped_ratio = 100 - ((global_page_state(NR_FILE_MAPPED) +
307                                 global_page_state(NR_ANON_PAGES)) * 100) /
308                                         available_memory;
309
310         dirty_ratio = vm_dirty_ratio;
311         if (dirty_ratio > unmapped_ratio / 2)
312                 dirty_ratio = unmapped_ratio / 2;
313
314         if (dirty_ratio < 5)
315                 dirty_ratio = 5;
316
317         background_ratio = dirty_background_ratio;
318         if (background_ratio >= dirty_ratio)
319                 background_ratio = dirty_ratio / 2;
320
321         background = (background_ratio * available_memory) / 100;
322         dirty = (dirty_ratio * available_memory) / 100;
323         tsk = current;
324         if (tsk->flags & PF_LESS_THROTTLE || rt_task(tsk)) {
325                 background += background / 4;
326                 dirty += dirty / 4;
327         }
328         *pbackground = background;
329         *pdirty = dirty;
330
331         if (bdi) {
332                 u64 bdi_dirty = dirty;
333                 long numerator, denominator;
334
335                 /*
336                  * Calculate this BDI's share of the dirty ratio.
337                  */
338                 bdi_writeout_fraction(bdi, &numerator, &denominator);
339
340                 bdi_dirty *= numerator;
341                 do_div(bdi_dirty, denominator);
342
343                 *pbdi_dirty = bdi_dirty;
344                 clip_bdi_dirty_limit(bdi, dirty, pbdi_dirty);
345                 task_dirty_limit(current, pbdi_dirty);
346         }
347 }
348
349 /*
350  * balance_dirty_pages() must be called by processes which are generating dirty
351  * data.  It looks at the number of dirty pages in the machine and will force
352  * the caller to perform writeback if the system is over `vm_dirty_ratio'.
353  * If we're over `background_thresh' then pdflush is woken to perform some
354  * writeout.
355  */
356 static void balance_dirty_pages(struct address_space *mapping)
357 {
358         long nr_reclaimable, bdi_nr_reclaimable;
359         long nr_writeback, bdi_nr_writeback;
360         long background_thresh;
361         long dirty_thresh;
362         long bdi_thresh;
363         unsigned long pages_written = 0;
364         unsigned long write_chunk = sync_writeback_pages();
365
366         struct backing_dev_info *bdi = mapping->backing_dev_info;
367
368         for (;;) {
369                 struct writeback_control wbc = {
370                         .bdi            = bdi,
371                         .sync_mode      = WB_SYNC_NONE,
372                         .older_than_this = NULL,
373                         .nr_to_write    = write_chunk,
374                         .range_cyclic   = 1,
375                 };
376
377                 get_dirty_limits(&background_thresh, &dirty_thresh,
378                                 &bdi_thresh, bdi);
379
380                 nr_reclaimable = global_page_state(NR_FILE_DIRTY) +
381                                         global_page_state(NR_UNSTABLE_NFS);
382                 nr_writeback = global_page_state(NR_WRITEBACK);
383
384                 bdi_nr_reclaimable = bdi_stat(bdi, BDI_RECLAIMABLE);
385                 bdi_nr_writeback = bdi_stat(bdi, BDI_WRITEBACK);
386
387                 if (bdi_nr_reclaimable + bdi_nr_writeback <= bdi_thresh)
388                         break;
389
390                 /*
391                  * Throttle it only when the background writeback cannot
392                  * catch-up. This avoids (excessively) small writeouts
393                  * when the bdi limits are ramping up.
394                  */
395                 if (nr_reclaimable + nr_writeback <
396                                 (background_thresh + dirty_thresh) / 2)
397                         break;
398
399                 if (!bdi->dirty_exceeded)
400                         bdi->dirty_exceeded = 1;
401
402                 /* Note: nr_reclaimable denotes nr_dirty + nr_unstable.
403                  * Unstable writes are a feature of certain networked
404                  * filesystems (i.e. NFS) in which data may have been
405                  * written to the server's write cache, but has not yet
406                  * been flushed to permanent storage.
407                  */
408                 if (bdi_nr_reclaimable) {
409                         writeback_inodes(&wbc);
410                         pages_written += write_chunk - wbc.nr_to_write;
411                         get_dirty_limits(&background_thresh, &dirty_thresh,
412                                        &bdi_thresh, bdi);
413                 }
414
415                 /*
416                  * In order to avoid the stacked BDI deadlock we need
417                  * to ensure we accurately count the 'dirty' pages when
418                  * the threshold is low.
419                  *
420                  * Otherwise it would be possible to get thresh+n pages
421                  * reported dirty, even though there are thresh-m pages
422                  * actually dirty; with m+n sitting in the percpu
423                  * deltas.
424                  */
425                 if (bdi_thresh < 2*bdi_stat_error(bdi)) {
426                         bdi_nr_reclaimable = bdi_stat_sum(bdi, BDI_RECLAIMABLE);
427                         bdi_nr_writeback = bdi_stat_sum(bdi, BDI_WRITEBACK);
428                 } else if (bdi_nr_reclaimable) {
429                         bdi_nr_reclaimable = bdi_stat(bdi, BDI_RECLAIMABLE);
430                         bdi_nr_writeback = bdi_stat(bdi, BDI_WRITEBACK);
431                 }
432
433                 if (bdi_nr_reclaimable + bdi_nr_writeback <= bdi_thresh)
434                         break;
435                 if (pages_written >= write_chunk)
436                         break;          /* We've done our duty */
437
438                 congestion_wait(WRITE, HZ/10);
439         }
440
441         if (bdi_nr_reclaimable + bdi_nr_writeback < bdi_thresh &&
442                         bdi->dirty_exceeded)
443                 bdi->dirty_exceeded = 0;
444
445         if (writeback_in_progress(bdi))
446                 return;         /* pdflush is already working this queue */
447
448         /*
449          * In laptop mode, we wait until hitting the higher threshold before
450          * starting background writeout, and then write out all the way down
451          * to the lower threshold.  So slow writers cause minimal disk activity.
452          *
453          * In normal mode, we start background writeout at the lower
454          * background_thresh, to keep the amount of dirty memory low.
455          */
456         if ((laptop_mode && pages_written) ||
457                         (!laptop_mode && (global_page_state(NR_FILE_DIRTY)
458                                           + global_page_state(NR_UNSTABLE_NFS)
459                                           > background_thresh)))
460                 pdflush_operation(background_writeout, 0);
461 }
462
463 void set_page_dirty_balance(struct page *page, int page_mkwrite)
464 {
465         if (set_page_dirty(page) || page_mkwrite) {
466                 struct address_space *mapping = page_mapping(page);
467
468                 if (mapping)
469                         balance_dirty_pages_ratelimited(mapping);
470         }
471 }
472
473 /**
474  * balance_dirty_pages_ratelimited_nr - balance dirty memory state
475  * @mapping: address_space which was dirtied
476  * @nr_pages_dirtied: number of pages which the caller has just dirtied
477  *
478  * Processes which are dirtying memory should call in here once for each page
479  * which was newly dirtied.  The function will periodically check the system's
480  * dirty state and will initiate writeback if needed.
481  *
482  * On really big machines, get_writeback_state is expensive, so try to avoid
483  * calling it too often (ratelimiting).  But once we're over the dirty memory
484  * limit we decrease the ratelimiting by a lot, to prevent individual processes
485  * from overshooting the limit by (ratelimit_pages) each.
486  */
487 void balance_dirty_pages_ratelimited_nr(struct address_space *mapping,
488                                         unsigned long nr_pages_dirtied)
489 {
490         static DEFINE_PER_CPU(unsigned long, ratelimits) = 0;
491         unsigned long ratelimit;
492         unsigned long *p;
493
494         ratelimit = ratelimit_pages;
495         if (mapping->backing_dev_info->dirty_exceeded)
496                 ratelimit = 8;
497
498         /*
499          * Check the rate limiting. Also, we do not want to throttle real-time
500          * tasks in balance_dirty_pages(). Period.
501          */
502         preempt_disable();
503         p =  &__get_cpu_var(ratelimits);
504         *p += nr_pages_dirtied;
505         if (unlikely(*p >= ratelimit)) {
506                 *p = 0;
507                 preempt_enable();
508                 balance_dirty_pages(mapping);
509                 return;
510         }
511         preempt_enable();
512 }
513 EXPORT_SYMBOL(balance_dirty_pages_ratelimited_nr);
514
515 void throttle_vm_writeout(gfp_t gfp_mask)
516 {
517         long background_thresh;
518         long dirty_thresh;
519
520         for ( ; ; ) {
521                 get_dirty_limits(&background_thresh, &dirty_thresh, NULL, NULL);
522
523                 /*
524                  * Boost the allowable dirty threshold a bit for page
525                  * allocators so they don't get DoS'ed by heavy writers
526                  */
527                 dirty_thresh += dirty_thresh / 10;      /* wheeee... */
528
529                 if (global_page_state(NR_UNSTABLE_NFS) +
530                         global_page_state(NR_WRITEBACK) <= dirty_thresh)
531                                 break;
532                 congestion_wait(WRITE, HZ/10);
533
534                 /*
535                  * The caller might hold locks which can prevent IO completion
536                  * or progress in the filesystem.  So we cannot just sit here
537                  * waiting for IO to complete.
538                  */
539                 if ((gfp_mask & (__GFP_FS|__GFP_IO)) != (__GFP_FS|__GFP_IO))
540                         break;
541         }
542 }
543
544 /*
545  * writeback at least _min_pages, and keep writing until the amount of dirty
546  * memory is less than the background threshold, or until we're all clean.
547  */
548 static void background_writeout(unsigned long _min_pages)
549 {
550         long min_pages = _min_pages;
551         struct writeback_control wbc = {
552                 .bdi            = NULL,
553                 .sync_mode      = WB_SYNC_NONE,
554                 .older_than_this = NULL,
555                 .nr_to_write    = 0,
556                 .nonblocking    = 1,
557                 .range_cyclic   = 1,
558         };
559
560         for ( ; ; ) {
561                 long background_thresh;
562                 long dirty_thresh;
563
564                 get_dirty_limits(&background_thresh, &dirty_thresh, NULL, NULL);
565                 if (global_page_state(NR_FILE_DIRTY) +
566                         global_page_state(NR_UNSTABLE_NFS) < background_thresh
567                                 && min_pages <= 0)
568                         break;
569                 wbc.more_io = 0;
570                 wbc.encountered_congestion = 0;
571                 wbc.nr_to_write = MAX_WRITEBACK_PAGES;
572                 wbc.pages_skipped = 0;
573                 writeback_inodes(&wbc);
574                 min_pages -= MAX_WRITEBACK_PAGES - wbc.nr_to_write;
575                 if (wbc.nr_to_write > 0 || wbc.pages_skipped > 0) {
576                         /* Wrote less than expected */
577                         if (wbc.encountered_congestion || wbc.more_io)
578                                 congestion_wait(WRITE, HZ/10);
579                         else
580                                 break;
581                 }
582         }
583 }
584
585 /*
586  * Start writeback of `nr_pages' pages.  If `nr_pages' is zero, write back
587  * the whole world.  Returns 0 if a pdflush thread was dispatched.  Returns
588  * -1 if all pdflush threads were busy.
589  */
590 int wakeup_pdflush(long nr_pages)
591 {
592         if (nr_pages == 0)
593                 nr_pages = global_page_state(NR_FILE_DIRTY) +
594                                 global_page_state(NR_UNSTABLE_NFS);
595         return pdflush_operation(background_writeout, nr_pages);
596 }
597
598 static void wb_timer_fn(unsigned long unused);
599 static void laptop_timer_fn(unsigned long unused);
600
601 static DEFINE_TIMER(wb_timer, wb_timer_fn, 0, 0);
602 static DEFINE_TIMER(laptop_mode_wb_timer, laptop_timer_fn, 0, 0);
603
604 /*
605  * Periodic writeback of "old" data.
606  *
607  * Define "old": the first time one of an inode's pages is dirtied, we mark the
608  * dirtying-time in the inode's address_space.  So this periodic writeback code
609  * just walks the superblock inode list, writing back any inodes which are
610  * older than a specific point in time.
611  *
612  * Try to run once per dirty_writeback_interval.  But if a writeback event
613  * takes longer than a dirty_writeback_interval interval, then leave a
614  * one-second gap.
615  *
616  * older_than_this takes precedence over nr_to_write.  So we'll only write back
617  * all dirty pages if they are all attached to "old" mappings.
618  */
619 static void wb_kupdate(unsigned long arg)
620 {
621         unsigned long oldest_jif;
622         unsigned long start_jif;
623         unsigned long next_jif;
624         long nr_to_write;
625         struct writeback_control wbc = {
626                 .bdi            = NULL,
627                 .sync_mode      = WB_SYNC_NONE,
628                 .older_than_this = &oldest_jif,
629                 .nr_to_write    = 0,
630                 .nonblocking    = 1,
631                 .for_kupdate    = 1,
632                 .range_cyclic   = 1,
633         };
634
635         sync_supers();
636
637         oldest_jif = jiffies - dirty_expire_interval;
638         start_jif = jiffies;
639         next_jif = start_jif + dirty_writeback_interval;
640         nr_to_write = global_page_state(NR_FILE_DIRTY) +
641                         global_page_state(NR_UNSTABLE_NFS) +
642                         (inodes_stat.nr_inodes - inodes_stat.nr_unused);
643         while (nr_to_write > 0) {
644                 wbc.more_io = 0;
645                 wbc.encountered_congestion = 0;
646                 wbc.nr_to_write = MAX_WRITEBACK_PAGES;
647                 writeback_inodes(&wbc);
648                 if (wbc.nr_to_write > 0) {
649                         if (wbc.encountered_congestion || wbc.more_io)
650                                 congestion_wait(WRITE, HZ/10);
651                         else
652                                 break;  /* All the old data is written */
653                 }
654                 nr_to_write -= MAX_WRITEBACK_PAGES - wbc.nr_to_write;
655         }
656         if (time_before(next_jif, jiffies + HZ))
657                 next_jif = jiffies + HZ;
658         if (dirty_writeback_interval)
659                 mod_timer(&wb_timer, next_jif);
660 }
661
662 /*
663  * sysctl handler for /proc/sys/vm/dirty_writeback_centisecs
664  */
665 int dirty_writeback_centisecs_handler(ctl_table *table, int write,
666         struct file *file, void __user *buffer, size_t *length, loff_t *ppos)
667 {
668         proc_dointvec_userhz_jiffies(table, write, file, buffer, length, ppos);
669         if (dirty_writeback_interval)
670                 mod_timer(&wb_timer, jiffies + dirty_writeback_interval);
671         else
672                 del_timer(&wb_timer);
673         return 0;
674 }
675
676 static void wb_timer_fn(unsigned long unused)
677 {
678         if (pdflush_operation(wb_kupdate, 0) < 0)
679                 mod_timer(&wb_timer, jiffies + HZ); /* delay 1 second */
680 }
681
682 static void laptop_flush(unsigned long unused)
683 {
684         sys_sync();
685 }
686
687 static void laptop_timer_fn(unsigned long unused)
688 {
689         pdflush_operation(laptop_flush, 0);
690 }
691
692 /*
693  * We've spun up the disk and we're in laptop mode: schedule writeback
694  * of all dirty data a few seconds from now.  If the flush is already scheduled
695  * then push it back - the user is still using the disk.
696  */
697 void laptop_io_completion(void)
698 {
699         mod_timer(&laptop_mode_wb_timer, jiffies + laptop_mode);
700 }
701
702 /*
703  * We're in laptop mode and we've just synced. The sync's writes will have
704  * caused another writeback to be scheduled by laptop_io_completion.
705  * Nothing needs to be written back anymore, so we unschedule the writeback.
706  */
707 void laptop_sync_completion(void)
708 {
709         del_timer(&laptop_mode_wb_timer);
710 }
711
712 /*
713  * If ratelimit_pages is too high then we can get into dirty-data overload
714  * if a large number of processes all perform writes at the same time.
715  * If it is too low then SMP machines will call the (expensive)
716  * get_writeback_state too often.
717  *
718  * Here we set ratelimit_pages to a level which ensures that when all CPUs are
719  * dirtying in parallel, we cannot go more than 3% (1/32) over the dirty memory
720  * thresholds before writeback cuts in.
721  *
722  * But the limit should not be set too high.  Because it also controls the
723  * amount of memory which the balance_dirty_pages() caller has to write back.
724  * If this is too large then the caller will block on the IO queue all the
725  * time.  So limit it to four megabytes - the balance_dirty_pages() caller
726  * will write six megabyte chunks, max.
727  */
728
729 void writeback_set_ratelimit(void)
730 {
731         ratelimit_pages = vm_total_pages / (num_online_cpus() * 32);
732         if (ratelimit_pages < 16)
733                 ratelimit_pages = 16;
734         if (ratelimit_pages * PAGE_CACHE_SIZE > 4096 * 1024)
735                 ratelimit_pages = (4096 * 1024) / PAGE_CACHE_SIZE;
736 }
737
738 static int __cpuinit
739 ratelimit_handler(struct notifier_block *self, unsigned long u, void *v)
740 {
741         writeback_set_ratelimit();
742         return NOTIFY_DONE;
743 }
744
745 static struct notifier_block __cpuinitdata ratelimit_nb = {
746         .notifier_call  = ratelimit_handler,
747         .next           = NULL,
748 };
749
750 /*
751  * Called early on to tune the page writeback dirty limits.
752  *
753  * We used to scale dirty pages according to how total memory
754  * related to pages that could be allocated for buffers (by
755  * comparing nr_free_buffer_pages() to vm_total_pages.
756  *
757  * However, that was when we used "dirty_ratio" to scale with
758  * all memory, and we don't do that any more. "dirty_ratio"
759  * is now applied to total non-HIGHPAGE memory (by subtracting
760  * totalhigh_pages from vm_total_pages), and as such we can't
761  * get into the old insane situation any more where we had
762  * large amounts of dirty pages compared to a small amount of
763  * non-HIGHMEM memory.
764  *
765  * But we might still want to scale the dirty_ratio by how
766  * much memory the box has..
767  */
768 void __init page_writeback_init(void)
769 {
770         int shift;
771
772         mod_timer(&wb_timer, jiffies + dirty_writeback_interval);
773         writeback_set_ratelimit();
774         register_cpu_notifier(&ratelimit_nb);
775
776         shift = calc_period_shift();
777         prop_descriptor_init(&vm_completions, shift);
778         prop_descriptor_init(&vm_dirties, shift);
779 }
780
781 /**
782  * write_cache_pages - walk the list of dirty pages of the given address space and write all of them.
783  * @mapping: address space structure to write
784  * @wbc: subtract the number of written pages from *@wbc->nr_to_write
785  * @writepage: function called for each page
786  * @data: data passed to writepage function
787  *
788  * If a page is already under I/O, write_cache_pages() skips it, even
789  * if it's dirty.  This is desirable behaviour for memory-cleaning writeback,
790  * but it is INCORRECT for data-integrity system calls such as fsync().  fsync()
791  * and msync() need to guarantee that all the data which was dirty at the time
792  * the call was made get new I/O started against them.  If wbc->sync_mode is
793  * WB_SYNC_ALL then we were called for data integrity and we must wait for
794  * existing IO to complete.
795  */
796 int write_cache_pages(struct address_space *mapping,
797                       struct writeback_control *wbc, writepage_t writepage,
798                       void *data)
799 {
800         struct backing_dev_info *bdi = mapping->backing_dev_info;
801         int ret = 0;
802         int done = 0;
803         struct pagevec pvec;
804         int nr_pages;
805         pgoff_t index;
806         pgoff_t end;            /* Inclusive */
807         int scanned = 0;
808         int range_whole = 0;
809
810         if (wbc->nonblocking && bdi_write_congested(bdi)) {
811                 wbc->encountered_congestion = 1;
812                 return 0;
813         }
814
815         pagevec_init(&pvec, 0);
816         if (wbc->range_cyclic) {
817                 index = mapping->writeback_index; /* Start from prev offset */
818                 end = -1;
819         } else {
820                 index = wbc->range_start >> PAGE_CACHE_SHIFT;
821                 end = wbc->range_end >> PAGE_CACHE_SHIFT;
822                 if (wbc->range_start == 0 && wbc->range_end == LLONG_MAX)
823                         range_whole = 1;
824                 scanned = 1;
825         }
826 retry:
827         while (!done && (index <= end) &&
828                (nr_pages = pagevec_lookup_tag(&pvec, mapping, &index,
829                                               PAGECACHE_TAG_DIRTY,
830                                               min(end - index, (pgoff_t)PAGEVEC_SIZE-1) + 1))) {
831                 unsigned i;
832
833                 scanned = 1;
834                 for (i = 0; i < nr_pages; i++) {
835                         struct page *page = pvec.pages[i];
836
837                         /*
838                          * At this point we hold neither mapping->tree_lock nor
839                          * lock on the page itself: the page may be truncated or
840                          * invalidated (changing page->mapping to NULL), or even
841                          * swizzled back from swapper_space to tmpfs file
842                          * mapping
843                          */
844                         lock_page(page);
845
846                         if (unlikely(page->mapping != mapping)) {
847                                 unlock_page(page);
848                                 continue;
849                         }
850
851                         if (!wbc->range_cyclic && page->index > end) {
852                                 done = 1;
853                                 unlock_page(page);
854                                 continue;
855                         }
856
857                         if (wbc->sync_mode != WB_SYNC_NONE)
858                                 wait_on_page_writeback(page);
859
860                         if (PageWriteback(page) ||
861                             !clear_page_dirty_for_io(page)) {
862                                 unlock_page(page);
863                                 continue;
864                         }
865
866                         ret = (*writepage)(page, wbc, data);
867
868                         if (unlikely(ret == AOP_WRITEPAGE_ACTIVATE)) {
869                                 unlock_page(page);
870                                 ret = 0;
871                         }
872                         if (ret || (--(wbc->nr_to_write) <= 0))
873                                 done = 1;
874                         if (wbc->nonblocking && bdi_write_congested(bdi)) {
875                                 wbc->encountered_congestion = 1;
876                                 done = 1;
877                         }
878                 }
879                 pagevec_release(&pvec);
880                 cond_resched();
881         }
882         if (!scanned && !done) {
883                 /*
884                  * We hit the last page and there is more work to be done: wrap
885                  * back to the start of the file
886                  */
887                 scanned = 1;
888                 index = 0;
889                 goto retry;
890         }
891         if (wbc->range_cyclic || (range_whole && wbc->nr_to_write > 0))
892                 mapping->writeback_index = index;
893         return ret;
894 }
895 EXPORT_SYMBOL(write_cache_pages);
896
897 /*
898  * Function used by generic_writepages to call the real writepage
899  * function and set the mapping flags on error
900  */
901 static int __writepage(struct page *page, struct writeback_control *wbc,
902                        void *data)
903 {
904         struct address_space *mapping = data;
905         int ret = mapping->a_ops->writepage(page, wbc);
906         mapping_set_error(mapping, ret);
907         return ret;
908 }
909
910 /**
911  * generic_writepages - walk the list of dirty pages of the given address space and writepage() all of them.
912  * @mapping: address space structure to write
913  * @wbc: subtract the number of written pages from *@wbc->nr_to_write
914  *
915  * This is a library function, which implements the writepages()
916  * address_space_operation.
917  */
918 int generic_writepages(struct address_space *mapping,
919                        struct writeback_control *wbc)
920 {
921         /* deal with chardevs and other special file */
922         if (!mapping->a_ops->writepage)
923                 return 0;
924
925         return write_cache_pages(mapping, wbc, __writepage, mapping);
926 }
927
928 EXPORT_SYMBOL(generic_writepages);
929
930 int do_writepages(struct address_space *mapping, struct writeback_control *wbc)
931 {
932         int ret;
933
934         if (wbc->nr_to_write <= 0)
935                 return 0;
936         wbc->for_writepages = 1;
937         if (mapping->a_ops->writepages)
938                 ret = mapping->a_ops->writepages(mapping, wbc);
939         else
940                 ret = generic_writepages(mapping, wbc);
941         wbc->for_writepages = 0;
942         return ret;
943 }
944
945 /**
946  * write_one_page - write out a single page and optionally wait on I/O
947  * @page: the page to write
948  * @wait: if true, wait on writeout
949  *
950  * The page must be locked by the caller and will be unlocked upon return.
951  *
952  * write_one_page() returns a negative error code if I/O failed.
953  */
954 int write_one_page(struct page *page, int wait)
955 {
956         struct address_space *mapping = page->mapping;
957         int ret = 0;
958         struct writeback_control wbc = {
959                 .sync_mode = WB_SYNC_ALL,
960                 .nr_to_write = 1,
961         };
962
963         BUG_ON(!PageLocked(page));
964
965         if (wait)
966                 wait_on_page_writeback(page);
967
968         if (clear_page_dirty_for_io(page)) {
969                 page_cache_get(page);
970                 ret = mapping->a_ops->writepage(page, &wbc);
971                 if (ret == 0 && wait) {
972                         wait_on_page_writeback(page);
973                         if (PageError(page))
974                                 ret = -EIO;
975                 }
976                 page_cache_release(page);
977         } else {
978                 unlock_page(page);
979         }
980         return ret;
981 }
982 EXPORT_SYMBOL(write_one_page);
983
984 /*
985  * For address_spaces which do not use buffers nor write back.
986  */
987 int __set_page_dirty_no_writeback(struct page *page)
988 {
989         if (!PageDirty(page))
990                 SetPageDirty(page);
991         return 0;
992 }
993
994 /*
995  * For address_spaces which do not use buffers.  Just tag the page as dirty in
996  * its radix tree.
997  *
998  * This is also used when a single buffer is being dirtied: we want to set the
999  * page dirty in that case, but not all the buffers.  This is a "bottom-up"
1000  * dirtying, whereas __set_page_dirty_buffers() is a "top-down" dirtying.
1001  *
1002  * Most callers have locked the page, which pins the address_space in memory.
1003  * But zap_pte_range() does not lock the page, however in that case the
1004  * mapping is pinned by the vma's ->vm_file reference.
1005  *
1006  * We take care to handle the case where the page was truncated from the
1007  * mapping by re-checking page_mapping() inside tree_lock.
1008  */
1009 int __set_page_dirty_nobuffers(struct page *page)
1010 {
1011         if (!TestSetPageDirty(page)) {
1012                 struct address_space *mapping = page_mapping(page);
1013                 struct address_space *mapping2;
1014
1015                 if (!mapping)
1016                         return 1;
1017
1018                 write_lock_irq(&mapping->tree_lock);
1019                 mapping2 = page_mapping(page);
1020                 if (mapping2) { /* Race with truncate? */
1021                         BUG_ON(mapping2 != mapping);
1022                         WARN_ON_ONCE(!PagePrivate(page) && !PageUptodate(page));
1023                         if (mapping_cap_account_dirty(mapping)) {
1024                                 __inc_zone_page_state(page, NR_FILE_DIRTY);
1025                                 __inc_bdi_stat(mapping->backing_dev_info,
1026                                                 BDI_RECLAIMABLE);
1027                                 task_io_account_write(PAGE_CACHE_SIZE);
1028                         }
1029                         radix_tree_tag_set(&mapping->page_tree,
1030                                 page_index(page), PAGECACHE_TAG_DIRTY);
1031                 }
1032                 write_unlock_irq(&mapping->tree_lock);
1033                 if (mapping->host) {
1034                         /* !PageAnon && !swapper_space */
1035                         __mark_inode_dirty(mapping->host, I_DIRTY_PAGES);
1036                 }
1037                 return 1;
1038         }
1039         return 0;
1040 }
1041 EXPORT_SYMBOL(__set_page_dirty_nobuffers);
1042
1043 /*
1044  * When a writepage implementation decides that it doesn't want to write this
1045  * page for some reason, it should redirty the locked page via
1046  * redirty_page_for_writepage() and it should then unlock the page and return 0
1047  */
1048 int redirty_page_for_writepage(struct writeback_control *wbc, struct page *page)
1049 {
1050         wbc->pages_skipped++;
1051         return __set_page_dirty_nobuffers(page);
1052 }
1053 EXPORT_SYMBOL(redirty_page_for_writepage);
1054
1055 /*
1056  * If the mapping doesn't provide a set_page_dirty a_op, then
1057  * just fall through and assume that it wants buffer_heads.
1058  */
1059 static int __set_page_dirty(struct page *page)
1060 {
1061         struct address_space *mapping = page_mapping(page);
1062
1063         if (likely(mapping)) {
1064                 int (*spd)(struct page *) = mapping->a_ops->set_page_dirty;
1065 #ifdef CONFIG_BLOCK
1066                 if (!spd)
1067                         spd = __set_page_dirty_buffers;
1068 #endif
1069                 return (*spd)(page);
1070         }
1071         if (!PageDirty(page)) {
1072                 if (!TestSetPageDirty(page))
1073                         return 1;
1074         }
1075         return 0;
1076 }
1077
1078 int fastcall set_page_dirty(struct page *page)
1079 {
1080         int ret = __set_page_dirty(page);
1081         if (ret)
1082                 task_dirty_inc(current);
1083         return ret;
1084 }
1085 EXPORT_SYMBOL(set_page_dirty);
1086
1087 /*
1088  * set_page_dirty() is racy if the caller has no reference against
1089  * page->mapping->host, and if the page is unlocked.  This is because another
1090  * CPU could truncate the page off the mapping and then free the mapping.
1091  *
1092  * Usually, the page _is_ locked, or the caller is a user-space process which
1093  * holds a reference on the inode by having an open file.
1094  *
1095  * In other cases, the page should be locked before running set_page_dirty().
1096  */
1097 int set_page_dirty_lock(struct page *page)
1098 {
1099         int ret;
1100
1101         lock_page_nosync(page);
1102         ret = set_page_dirty(page);
1103         unlock_page(page);
1104         return ret;
1105 }
1106 EXPORT_SYMBOL(set_page_dirty_lock);
1107
1108 /*
1109  * Clear a page's dirty flag, while caring for dirty memory accounting.
1110  * Returns true if the page was previously dirty.
1111  *
1112  * This is for preparing to put the page under writeout.  We leave the page
1113  * tagged as dirty in the radix tree so that a concurrent write-for-sync
1114  * can discover it via a PAGECACHE_TAG_DIRTY walk.  The ->writepage
1115  * implementation will run either set_page_writeback() or set_page_dirty(),
1116  * at which stage we bring the page's dirty flag and radix-tree dirty tag
1117  * back into sync.
1118  *
1119  * This incoherency between the page's dirty flag and radix-tree tag is
1120  * unfortunate, but it only exists while the page is locked.
1121  */
1122 int clear_page_dirty_for_io(struct page *page)
1123 {
1124         struct address_space *mapping = page_mapping(page);
1125
1126         BUG_ON(!PageLocked(page));
1127
1128         ClearPageReclaim(page);
1129         if (mapping && mapping_cap_account_dirty(mapping)) {
1130                 /*
1131                  * Yes, Virginia, this is indeed insane.
1132                  *
1133                  * We use this sequence to make sure that
1134                  *  (a) we account for dirty stats properly
1135                  *  (b) we tell the low-level filesystem to
1136                  *      mark the whole page dirty if it was
1137                  *      dirty in a pagetable. Only to then
1138                  *  (c) clean the page again and return 1 to
1139                  *      cause the writeback.
1140                  *
1141                  * This way we avoid all nasty races with the
1142                  * dirty bit in multiple places and clearing
1143                  * them concurrently from different threads.
1144                  *
1145                  * Note! Normally the "set_page_dirty(page)"
1146                  * has no effect on the actual dirty bit - since
1147                  * that will already usually be set. But we
1148                  * need the side effects, and it can help us
1149                  * avoid races.
1150                  *
1151                  * We basically use the page "master dirty bit"
1152                  * as a serialization point for all the different
1153                  * threads doing their things.
1154                  */
1155                 if (page_mkclean(page))
1156                         set_page_dirty(page);
1157                 /*
1158                  * We carefully synchronise fault handlers against
1159                  * installing a dirty pte and marking the page dirty
1160                  * at this point. We do this by having them hold the
1161                  * page lock at some point after installing their
1162                  * pte, but before marking the page dirty.
1163                  * Pages are always locked coming in here, so we get
1164                  * the desired exclusion. See mm/memory.c:do_wp_page()
1165                  * for more comments.
1166                  */
1167                 if (TestClearPageDirty(page)) {
1168                         dec_zone_page_state(page, NR_FILE_DIRTY);
1169                         dec_bdi_stat(mapping->backing_dev_info,
1170                                         BDI_RECLAIMABLE);
1171                         return 1;
1172                 }
1173                 return 0;
1174         }
1175         return TestClearPageDirty(page);
1176 }
1177 EXPORT_SYMBOL(clear_page_dirty_for_io);
1178
1179 int test_clear_page_writeback(struct page *page)
1180 {
1181         struct address_space *mapping = page_mapping(page);
1182         int ret;
1183
1184         if (mapping) {
1185                 struct backing_dev_info *bdi = mapping->backing_dev_info;
1186                 unsigned long flags;
1187
1188                 write_lock_irqsave(&mapping->tree_lock, flags);
1189                 ret = TestClearPageWriteback(page);
1190                 if (ret) {
1191                         radix_tree_tag_clear(&mapping->page_tree,
1192                                                 page_index(page),
1193                                                 PAGECACHE_TAG_WRITEBACK);
1194                         if (bdi_cap_writeback_dirty(bdi)) {
1195                                 __dec_bdi_stat(bdi, BDI_WRITEBACK);
1196                                 __bdi_writeout_inc(bdi);
1197                         }
1198                 }
1199                 write_unlock_irqrestore(&mapping->tree_lock, flags);
1200         } else {
1201                 ret = TestClearPageWriteback(page);
1202         }
1203         if (ret)
1204                 dec_zone_page_state(page, NR_WRITEBACK);
1205         return ret;
1206 }
1207
1208 int test_set_page_writeback(struct page *page)
1209 {
1210         struct address_space *mapping = page_mapping(page);
1211         int ret;
1212
1213         if (mapping) {
1214                 struct backing_dev_info *bdi = mapping->backing_dev_info;
1215                 unsigned long flags;
1216
1217                 write_lock_irqsave(&mapping->tree_lock, flags);
1218                 ret = TestSetPageWriteback(page);
1219                 if (!ret) {
1220                         radix_tree_tag_set(&mapping->page_tree,
1221                                                 page_index(page),
1222                                                 PAGECACHE_TAG_WRITEBACK);
1223                         if (bdi_cap_writeback_dirty(bdi))
1224                                 __inc_bdi_stat(bdi, BDI_WRITEBACK);
1225                 }
1226                 if (!PageDirty(page))
1227                         radix_tree_tag_clear(&mapping->page_tree,
1228                                                 page_index(page),
1229                                                 PAGECACHE_TAG_DIRTY);
1230                 write_unlock_irqrestore(&mapping->tree_lock, flags);
1231         } else {
1232                 ret = TestSetPageWriteback(page);
1233         }
1234         if (!ret)
1235                 inc_zone_page_state(page, NR_WRITEBACK);
1236         return ret;
1237
1238 }
1239 EXPORT_SYMBOL(test_set_page_writeback);
1240
1241 /*
1242  * Return true if any of the pages in the mapping are marked with the
1243  * passed tag.
1244  */
1245 int mapping_tagged(struct address_space *mapping, int tag)
1246 {
1247         int ret;
1248         rcu_read_lock();
1249         ret = radix_tree_tagged(&mapping->page_tree, tag);
1250         rcu_read_unlock();
1251         return ret;
1252 }
1253 EXPORT_SYMBOL(mapping_tagged);