[PATCH] zoned vm counters: conversion of nr_writeback to per zone counter
[safe/jmp/linux-2.6] / mm / page-writeback.c
1 /*
2  * mm/page-writeback.c.
3  *
4  * Copyright (C) 2002, Linus Torvalds.
5  *
6  * Contains functions related to writing back dirty pages at the
7  * address_space level.
8  *
9  * 10Apr2002    akpm@zip.com.au
10  *              Initial version
11  */
12
13 #include <linux/kernel.h>
14 #include <linux/module.h>
15 #include <linux/spinlock.h>
16 #include <linux/fs.h>
17 #include <linux/mm.h>
18 #include <linux/swap.h>
19 #include <linux/slab.h>
20 #include <linux/pagemap.h>
21 #include <linux/writeback.h>
22 #include <linux/init.h>
23 #include <linux/backing-dev.h>
24 #include <linux/blkdev.h>
25 #include <linux/mpage.h>
26 #include <linux/percpu.h>
27 #include <linux/notifier.h>
28 #include <linux/smp.h>
29 #include <linux/sysctl.h>
30 #include <linux/cpu.h>
31 #include <linux/syscalls.h>
32
33 /*
34  * The maximum number of pages to writeout in a single bdflush/kupdate
35  * operation.  We do this so we don't hold I_LOCK against an inode for
36  * enormous amounts of time, which would block a userspace task which has
37  * been forced to throttle against that inode.  Also, the code reevaluates
38  * the dirty each time it has written this many pages.
39  */
40 #define MAX_WRITEBACK_PAGES     1024
41
42 /*
43  * After a CPU has dirtied this many pages, balance_dirty_pages_ratelimited
44  * will look to see if it needs to force writeback or throttling.
45  */
46 static long ratelimit_pages = 32;
47
48 static long total_pages;        /* The total number of pages in the machine. */
49 static int dirty_exceeded __cacheline_aligned_in_smp;   /* Dirty mem may be over limit */
50
51 /*
52  * When balance_dirty_pages decides that the caller needs to perform some
53  * non-background writeback, this is how many pages it will attempt to write.
54  * It should be somewhat larger than RATELIMIT_PAGES to ensure that reasonably
55  * large amounts of I/O are submitted.
56  */
57 static inline long sync_writeback_pages(void)
58 {
59         return ratelimit_pages + ratelimit_pages / 2;
60 }
61
62 /* The following parameters are exported via /proc/sys/vm */
63
64 /*
65  * Start background writeback (via pdflush) at this percentage
66  */
67 int dirty_background_ratio = 10;
68
69 /*
70  * The generator of dirty data starts writeback at this percentage
71  */
72 int vm_dirty_ratio = 40;
73
74 /*
75  * The interval between `kupdate'-style writebacks, in jiffies
76  */
77 int dirty_writeback_interval = 5 * HZ;
78
79 /*
80  * The longest number of jiffies for which data is allowed to remain dirty
81  */
82 int dirty_expire_interval = 30 * HZ;
83
84 /*
85  * Flag that makes the machine dump writes/reads and block dirtyings.
86  */
87 int block_dump;
88
89 /*
90  * Flag that puts the machine in "laptop mode". Doubles as a timeout in jiffies:
91  * a full sync is triggered after this time elapses without any disk activity.
92  */
93 int laptop_mode;
94
95 EXPORT_SYMBOL(laptop_mode);
96
97 /* End of sysctl-exported parameters */
98
99
100 static void background_writeout(unsigned long _min_pages);
101
102 struct writeback_state
103 {
104         unsigned long nr_dirty;
105         unsigned long nr_unstable;
106         unsigned long nr_mapped;
107         unsigned long nr_writeback;
108 };
109
110 static void get_writeback_state(struct writeback_state *wbs)
111 {
112         wbs->nr_dirty = global_page_state(NR_FILE_DIRTY);
113         wbs->nr_unstable = read_page_state(nr_unstable);
114         wbs->nr_mapped = global_page_state(NR_FILE_MAPPED) +
115                                 global_page_state(NR_ANON_PAGES);
116         wbs->nr_writeback = global_page_state(NR_WRITEBACK);
117 }
118
119 /*
120  * Work out the current dirty-memory clamping and background writeout
121  * thresholds.
122  *
123  * The main aim here is to lower them aggressively if there is a lot of mapped
124  * memory around.  To avoid stressing page reclaim with lots of unreclaimable
125  * pages.  It is better to clamp down on writers than to start swapping, and
126  * performing lots of scanning.
127  *
128  * We only allow 1/2 of the currently-unmapped memory to be dirtied.
129  *
130  * We don't permit the clamping level to fall below 5% - that is getting rather
131  * excessive.
132  *
133  * We make sure that the background writeout level is below the adjusted
134  * clamping level.
135  */
136 static void
137 get_dirty_limits(struct writeback_state *wbs, long *pbackground, long *pdirty,
138                 struct address_space *mapping)
139 {
140         int background_ratio;           /* Percentages */
141         int dirty_ratio;
142         int unmapped_ratio;
143         long background;
144         long dirty;
145         unsigned long available_memory = total_pages;
146         struct task_struct *tsk;
147
148         get_writeback_state(wbs);
149
150 #ifdef CONFIG_HIGHMEM
151         /*
152          * If this mapping can only allocate from low memory,
153          * we exclude high memory from our count.
154          */
155         if (mapping && !(mapping_gfp_mask(mapping) & __GFP_HIGHMEM))
156                 available_memory -= totalhigh_pages;
157 #endif
158
159
160         unmapped_ratio = 100 - (wbs->nr_mapped * 100) / total_pages;
161
162         dirty_ratio = vm_dirty_ratio;
163         if (dirty_ratio > unmapped_ratio / 2)
164                 dirty_ratio = unmapped_ratio / 2;
165
166         if (dirty_ratio < 5)
167                 dirty_ratio = 5;
168
169         background_ratio = dirty_background_ratio;
170         if (background_ratio >= dirty_ratio)
171                 background_ratio = dirty_ratio / 2;
172
173         background = (background_ratio * available_memory) / 100;
174         dirty = (dirty_ratio * available_memory) / 100;
175         tsk = current;
176         if (tsk->flags & PF_LESS_THROTTLE || rt_task(tsk)) {
177                 background += background / 4;
178                 dirty += dirty / 4;
179         }
180         *pbackground = background;
181         *pdirty = dirty;
182 }
183
184 /*
185  * balance_dirty_pages() must be called by processes which are generating dirty
186  * data.  It looks at the number of dirty pages in the machine and will force
187  * the caller to perform writeback if the system is over `vm_dirty_ratio'.
188  * If we're over `background_thresh' then pdflush is woken to perform some
189  * writeout.
190  */
191 static void balance_dirty_pages(struct address_space *mapping)
192 {
193         struct writeback_state wbs;
194         long nr_reclaimable;
195         long background_thresh;
196         long dirty_thresh;
197         unsigned long pages_written = 0;
198         unsigned long write_chunk = sync_writeback_pages();
199
200         struct backing_dev_info *bdi = mapping->backing_dev_info;
201
202         for (;;) {
203                 struct writeback_control wbc = {
204                         .bdi            = bdi,
205                         .sync_mode      = WB_SYNC_NONE,
206                         .older_than_this = NULL,
207                         .nr_to_write    = write_chunk,
208                         .range_cyclic   = 1,
209                 };
210
211                 get_dirty_limits(&wbs, &background_thresh,
212                                         &dirty_thresh, mapping);
213                 nr_reclaimable = wbs.nr_dirty + wbs.nr_unstable;
214                 if (nr_reclaimable + wbs.nr_writeback <= dirty_thresh)
215                         break;
216
217                 if (!dirty_exceeded)
218                         dirty_exceeded = 1;
219
220                 /* Note: nr_reclaimable denotes nr_dirty + nr_unstable.
221                  * Unstable writes are a feature of certain networked
222                  * filesystems (i.e. NFS) in which data may have been
223                  * written to the server's write cache, but has not yet
224                  * been flushed to permanent storage.
225                  */
226                 if (nr_reclaimable) {
227                         writeback_inodes(&wbc);
228                         get_dirty_limits(&wbs, &background_thresh,
229                                         &dirty_thresh, mapping);
230                         nr_reclaimable = wbs.nr_dirty + wbs.nr_unstable;
231                         if (nr_reclaimable + wbs.nr_writeback <= dirty_thresh)
232                                 break;
233                         pages_written += write_chunk - wbc.nr_to_write;
234                         if (pages_written >= write_chunk)
235                                 break;          /* We've done our duty */
236                 }
237                 blk_congestion_wait(WRITE, HZ/10);
238         }
239
240         if (nr_reclaimable + wbs.nr_writeback <= dirty_thresh && dirty_exceeded)
241                 dirty_exceeded = 0;
242
243         if (writeback_in_progress(bdi))
244                 return;         /* pdflush is already working this queue */
245
246         /*
247          * In laptop mode, we wait until hitting the higher threshold before
248          * starting background writeout, and then write out all the way down
249          * to the lower threshold.  So slow writers cause minimal disk activity.
250          *
251          * In normal mode, we start background writeout at the lower
252          * background_thresh, to keep the amount of dirty memory low.
253          */
254         if ((laptop_mode && pages_written) ||
255              (!laptop_mode && (nr_reclaimable > background_thresh)))
256                 pdflush_operation(background_writeout, 0);
257 }
258
259 /**
260  * balance_dirty_pages_ratelimited_nr - balance dirty memory state
261  * @mapping: address_space which was dirtied
262  * @nr_pages_dirtied: number of pages which the caller has just dirtied
263  *
264  * Processes which are dirtying memory should call in here once for each page
265  * which was newly dirtied.  The function will periodically check the system's
266  * dirty state and will initiate writeback if needed.
267  *
268  * On really big machines, get_writeback_state is expensive, so try to avoid
269  * calling it too often (ratelimiting).  But once we're over the dirty memory
270  * limit we decrease the ratelimiting by a lot, to prevent individual processes
271  * from overshooting the limit by (ratelimit_pages) each.
272  */
273 void balance_dirty_pages_ratelimited_nr(struct address_space *mapping,
274                                         unsigned long nr_pages_dirtied)
275 {
276         static DEFINE_PER_CPU(unsigned long, ratelimits) = 0;
277         unsigned long ratelimit;
278         unsigned long *p;
279
280         ratelimit = ratelimit_pages;
281         if (dirty_exceeded)
282                 ratelimit = 8;
283
284         /*
285          * Check the rate limiting. Also, we do not want to throttle real-time
286          * tasks in balance_dirty_pages(). Period.
287          */
288         preempt_disable();
289         p =  &__get_cpu_var(ratelimits);
290         *p += nr_pages_dirtied;
291         if (unlikely(*p >= ratelimit)) {
292                 *p = 0;
293                 preempt_enable();
294                 balance_dirty_pages(mapping);
295                 return;
296         }
297         preempt_enable();
298 }
299 EXPORT_SYMBOL(balance_dirty_pages_ratelimited_nr);
300
301 void throttle_vm_writeout(void)
302 {
303         struct writeback_state wbs;
304         long background_thresh;
305         long dirty_thresh;
306
307         for ( ; ; ) {
308                 get_dirty_limits(&wbs, &background_thresh, &dirty_thresh, NULL);
309
310                 /*
311                  * Boost the allowable dirty threshold a bit for page
312                  * allocators so they don't get DoS'ed by heavy writers
313                  */
314                 dirty_thresh += dirty_thresh / 10;      /* wheeee... */
315
316                 if (wbs.nr_unstable + wbs.nr_writeback <= dirty_thresh)
317                         break;
318                 blk_congestion_wait(WRITE, HZ/10);
319         }
320 }
321
322
323 /*
324  * writeback at least _min_pages, and keep writing until the amount of dirty
325  * memory is less than the background threshold, or until we're all clean.
326  */
327 static void background_writeout(unsigned long _min_pages)
328 {
329         long min_pages = _min_pages;
330         struct writeback_control wbc = {
331                 .bdi            = NULL,
332                 .sync_mode      = WB_SYNC_NONE,
333                 .older_than_this = NULL,
334                 .nr_to_write    = 0,
335                 .nonblocking    = 1,
336                 .range_cyclic   = 1,
337         };
338
339         for ( ; ; ) {
340                 struct writeback_state wbs;
341                 long background_thresh;
342                 long dirty_thresh;
343
344                 get_dirty_limits(&wbs, &background_thresh, &dirty_thresh, NULL);
345                 if (wbs.nr_dirty + wbs.nr_unstable < background_thresh
346                                 && min_pages <= 0)
347                         break;
348                 wbc.encountered_congestion = 0;
349                 wbc.nr_to_write = MAX_WRITEBACK_PAGES;
350                 wbc.pages_skipped = 0;
351                 writeback_inodes(&wbc);
352                 min_pages -= MAX_WRITEBACK_PAGES - wbc.nr_to_write;
353                 if (wbc.nr_to_write > 0 || wbc.pages_skipped > 0) {
354                         /* Wrote less than expected */
355                         blk_congestion_wait(WRITE, HZ/10);
356                         if (!wbc.encountered_congestion)
357                                 break;
358                 }
359         }
360 }
361
362 /*
363  * Start writeback of `nr_pages' pages.  If `nr_pages' is zero, write back
364  * the whole world.  Returns 0 if a pdflush thread was dispatched.  Returns
365  * -1 if all pdflush threads were busy.
366  */
367 int wakeup_pdflush(long nr_pages)
368 {
369         if (nr_pages == 0) {
370                 struct writeback_state wbs;
371
372                 get_writeback_state(&wbs);
373                 nr_pages = wbs.nr_dirty + wbs.nr_unstable;
374         }
375         return pdflush_operation(background_writeout, nr_pages);
376 }
377
378 static void wb_timer_fn(unsigned long unused);
379 static void laptop_timer_fn(unsigned long unused);
380
381 static DEFINE_TIMER(wb_timer, wb_timer_fn, 0, 0);
382 static DEFINE_TIMER(laptop_mode_wb_timer, laptop_timer_fn, 0, 0);
383
384 /*
385  * Periodic writeback of "old" data.
386  *
387  * Define "old": the first time one of an inode's pages is dirtied, we mark the
388  * dirtying-time in the inode's address_space.  So this periodic writeback code
389  * just walks the superblock inode list, writing back any inodes which are
390  * older than a specific point in time.
391  *
392  * Try to run once per dirty_writeback_interval.  But if a writeback event
393  * takes longer than a dirty_writeback_interval interval, then leave a
394  * one-second gap.
395  *
396  * older_than_this takes precedence over nr_to_write.  So we'll only write back
397  * all dirty pages if they are all attached to "old" mappings.
398  */
399 static void wb_kupdate(unsigned long arg)
400 {
401         unsigned long oldest_jif;
402         unsigned long start_jif;
403         unsigned long next_jif;
404         long nr_to_write;
405         struct writeback_state wbs;
406         struct writeback_control wbc = {
407                 .bdi            = NULL,
408                 .sync_mode      = WB_SYNC_NONE,
409                 .older_than_this = &oldest_jif,
410                 .nr_to_write    = 0,
411                 .nonblocking    = 1,
412                 .for_kupdate    = 1,
413                 .range_cyclic   = 1,
414         };
415
416         sync_supers();
417
418         get_writeback_state(&wbs);
419         oldest_jif = jiffies - dirty_expire_interval;
420         start_jif = jiffies;
421         next_jif = start_jif + dirty_writeback_interval;
422         nr_to_write = wbs.nr_dirty + wbs.nr_unstable +
423                         (inodes_stat.nr_inodes - inodes_stat.nr_unused);
424         while (nr_to_write > 0) {
425                 wbc.encountered_congestion = 0;
426                 wbc.nr_to_write = MAX_WRITEBACK_PAGES;
427                 writeback_inodes(&wbc);
428                 if (wbc.nr_to_write > 0) {
429                         if (wbc.encountered_congestion)
430                                 blk_congestion_wait(WRITE, HZ/10);
431                         else
432                                 break;  /* All the old data is written */
433                 }
434                 nr_to_write -= MAX_WRITEBACK_PAGES - wbc.nr_to_write;
435         }
436         if (time_before(next_jif, jiffies + HZ))
437                 next_jif = jiffies + HZ;
438         if (dirty_writeback_interval)
439                 mod_timer(&wb_timer, next_jif);
440 }
441
442 /*
443  * sysctl handler for /proc/sys/vm/dirty_writeback_centisecs
444  */
445 int dirty_writeback_centisecs_handler(ctl_table *table, int write,
446                 struct file *file, void __user *buffer, size_t *length, loff_t *ppos)
447 {
448         proc_dointvec_userhz_jiffies(table, write, file, buffer, length, ppos);
449         if (dirty_writeback_interval) {
450                 mod_timer(&wb_timer,
451                         jiffies + dirty_writeback_interval);
452                 } else {
453                 del_timer(&wb_timer);
454         }
455         return 0;
456 }
457
458 static void wb_timer_fn(unsigned long unused)
459 {
460         if (pdflush_operation(wb_kupdate, 0) < 0)
461                 mod_timer(&wb_timer, jiffies + HZ); /* delay 1 second */
462 }
463
464 static void laptop_flush(unsigned long unused)
465 {
466         sys_sync();
467 }
468
469 static void laptop_timer_fn(unsigned long unused)
470 {
471         pdflush_operation(laptop_flush, 0);
472 }
473
474 /*
475  * We've spun up the disk and we're in laptop mode: schedule writeback
476  * of all dirty data a few seconds from now.  If the flush is already scheduled
477  * then push it back - the user is still using the disk.
478  */
479 void laptop_io_completion(void)
480 {
481         mod_timer(&laptop_mode_wb_timer, jiffies + laptop_mode);
482 }
483
484 /*
485  * We're in laptop mode and we've just synced. The sync's writes will have
486  * caused another writeback to be scheduled by laptop_io_completion.
487  * Nothing needs to be written back anymore, so we unschedule the writeback.
488  */
489 void laptop_sync_completion(void)
490 {
491         del_timer(&laptop_mode_wb_timer);
492 }
493
494 /*
495  * If ratelimit_pages is too high then we can get into dirty-data overload
496  * if a large number of processes all perform writes at the same time.
497  * If it is too low then SMP machines will call the (expensive)
498  * get_writeback_state too often.
499  *
500  * Here we set ratelimit_pages to a level which ensures that when all CPUs are
501  * dirtying in parallel, we cannot go more than 3% (1/32) over the dirty memory
502  * thresholds before writeback cuts in.
503  *
504  * But the limit should not be set too high.  Because it also controls the
505  * amount of memory which the balance_dirty_pages() caller has to write back.
506  * If this is too large then the caller will block on the IO queue all the
507  * time.  So limit it to four megabytes - the balance_dirty_pages() caller
508  * will write six megabyte chunks, max.
509  */
510
511 static void set_ratelimit(void)
512 {
513         ratelimit_pages = total_pages / (num_online_cpus() * 32);
514         if (ratelimit_pages < 16)
515                 ratelimit_pages = 16;
516         if (ratelimit_pages * PAGE_CACHE_SIZE > 4096 * 1024)
517                 ratelimit_pages = (4096 * 1024) / PAGE_CACHE_SIZE;
518 }
519
520 static int __cpuinit
521 ratelimit_handler(struct notifier_block *self, unsigned long u, void *v)
522 {
523         set_ratelimit();
524         return 0;
525 }
526
527 static struct notifier_block __cpuinitdata ratelimit_nb = {
528         .notifier_call  = ratelimit_handler,
529         .next           = NULL,
530 };
531
532 /*
533  * If the machine has a large highmem:lowmem ratio then scale back the default
534  * dirty memory thresholds: allowing too much dirty highmem pins an excessive
535  * number of buffer_heads.
536  */
537 void __init page_writeback_init(void)
538 {
539         long buffer_pages = nr_free_buffer_pages();
540         long correction;
541
542         total_pages = nr_free_pagecache_pages();
543
544         correction = (100 * 4 * buffer_pages) / total_pages;
545
546         if (correction < 100) {
547                 dirty_background_ratio *= correction;
548                 dirty_background_ratio /= 100;
549                 vm_dirty_ratio *= correction;
550                 vm_dirty_ratio /= 100;
551
552                 if (dirty_background_ratio <= 0)
553                         dirty_background_ratio = 1;
554                 if (vm_dirty_ratio <= 0)
555                         vm_dirty_ratio = 1;
556         }
557         mod_timer(&wb_timer, jiffies + dirty_writeback_interval);
558         set_ratelimit();
559         register_cpu_notifier(&ratelimit_nb);
560 }
561
562 int do_writepages(struct address_space *mapping, struct writeback_control *wbc)
563 {
564         int ret;
565
566         if (wbc->nr_to_write <= 0)
567                 return 0;
568         wbc->for_writepages = 1;
569         if (mapping->a_ops->writepages)
570                 ret =  mapping->a_ops->writepages(mapping, wbc);
571         else
572                 ret = generic_writepages(mapping, wbc);
573         wbc->for_writepages = 0;
574         return ret;
575 }
576
577 /**
578  * write_one_page - write out a single page and optionally wait on I/O
579  *
580  * @page: the page to write
581  * @wait: if true, wait on writeout
582  *
583  * The page must be locked by the caller and will be unlocked upon return.
584  *
585  * write_one_page() returns a negative error code if I/O failed.
586  */
587 int write_one_page(struct page *page, int wait)
588 {
589         struct address_space *mapping = page->mapping;
590         int ret = 0;
591         struct writeback_control wbc = {
592                 .sync_mode = WB_SYNC_ALL,
593                 .nr_to_write = 1,
594         };
595
596         BUG_ON(!PageLocked(page));
597
598         if (wait)
599                 wait_on_page_writeback(page);
600
601         if (clear_page_dirty_for_io(page)) {
602                 page_cache_get(page);
603                 ret = mapping->a_ops->writepage(page, &wbc);
604                 if (ret == 0 && wait) {
605                         wait_on_page_writeback(page);
606                         if (PageError(page))
607                                 ret = -EIO;
608                 }
609                 page_cache_release(page);
610         } else {
611                 unlock_page(page);
612         }
613         return ret;
614 }
615 EXPORT_SYMBOL(write_one_page);
616
617 /*
618  * For address_spaces which do not use buffers.  Just tag the page as dirty in
619  * its radix tree.
620  *
621  * This is also used when a single buffer is being dirtied: we want to set the
622  * page dirty in that case, but not all the buffers.  This is a "bottom-up"
623  * dirtying, whereas __set_page_dirty_buffers() is a "top-down" dirtying.
624  *
625  * Most callers have locked the page, which pins the address_space in memory.
626  * But zap_pte_range() does not lock the page, however in that case the
627  * mapping is pinned by the vma's ->vm_file reference.
628  *
629  * We take care to handle the case where the page was truncated from the
630  * mapping by re-checking page_mapping() insode tree_lock.
631  */
632 int __set_page_dirty_nobuffers(struct page *page)
633 {
634         if (!TestSetPageDirty(page)) {
635                 struct address_space *mapping = page_mapping(page);
636                 struct address_space *mapping2;
637
638                 if (mapping) {
639                         write_lock_irq(&mapping->tree_lock);
640                         mapping2 = page_mapping(page);
641                         if (mapping2) { /* Race with truncate? */
642                                 BUG_ON(mapping2 != mapping);
643                                 if (mapping_cap_account_dirty(mapping))
644                                         __inc_zone_page_state(page,
645                                                                 NR_FILE_DIRTY);
646                                 radix_tree_tag_set(&mapping->page_tree,
647                                         page_index(page), PAGECACHE_TAG_DIRTY);
648                         }
649                         write_unlock_irq(&mapping->tree_lock);
650                         if (mapping->host) {
651                                 /* !PageAnon && !swapper_space */
652                                 __mark_inode_dirty(mapping->host,
653                                                         I_DIRTY_PAGES);
654                         }
655                 }
656                 return 1;
657         }
658         return 0;
659 }
660 EXPORT_SYMBOL(__set_page_dirty_nobuffers);
661
662 /*
663  * When a writepage implementation decides that it doesn't want to write this
664  * page for some reason, it should redirty the locked page via
665  * redirty_page_for_writepage() and it should then unlock the page and return 0
666  */
667 int redirty_page_for_writepage(struct writeback_control *wbc, struct page *page)
668 {
669         wbc->pages_skipped++;
670         return __set_page_dirty_nobuffers(page);
671 }
672 EXPORT_SYMBOL(redirty_page_for_writepage);
673
674 /*
675  * If the mapping doesn't provide a set_page_dirty a_op, then
676  * just fall through and assume that it wants buffer_heads.
677  */
678 int fastcall set_page_dirty(struct page *page)
679 {
680         struct address_space *mapping = page_mapping(page);
681
682         if (likely(mapping)) {
683                 int (*spd)(struct page *) = mapping->a_ops->set_page_dirty;
684                 if (spd)
685                         return (*spd)(page);
686                 return __set_page_dirty_buffers(page);
687         }
688         if (!PageDirty(page)) {
689                 if (!TestSetPageDirty(page))
690                         return 1;
691         }
692         return 0;
693 }
694 EXPORT_SYMBOL(set_page_dirty);
695
696 /*
697  * set_page_dirty() is racy if the caller has no reference against
698  * page->mapping->host, and if the page is unlocked.  This is because another
699  * CPU could truncate the page off the mapping and then free the mapping.
700  *
701  * Usually, the page _is_ locked, or the caller is a user-space process which
702  * holds a reference on the inode by having an open file.
703  *
704  * In other cases, the page should be locked before running set_page_dirty().
705  */
706 int set_page_dirty_lock(struct page *page)
707 {
708         int ret;
709
710         lock_page(page);
711         ret = set_page_dirty(page);
712         unlock_page(page);
713         return ret;
714 }
715 EXPORT_SYMBOL(set_page_dirty_lock);
716
717 /*
718  * Clear a page's dirty flag, while caring for dirty memory accounting. 
719  * Returns true if the page was previously dirty.
720  */
721 int test_clear_page_dirty(struct page *page)
722 {
723         struct address_space *mapping = page_mapping(page);
724         unsigned long flags;
725
726         if (mapping) {
727                 write_lock_irqsave(&mapping->tree_lock, flags);
728                 if (TestClearPageDirty(page)) {
729                         radix_tree_tag_clear(&mapping->page_tree,
730                                                 page_index(page),
731                                                 PAGECACHE_TAG_DIRTY);
732                         if (mapping_cap_account_dirty(mapping))
733                                 __dec_zone_page_state(page, NR_FILE_DIRTY);
734                         write_unlock_irqrestore(&mapping->tree_lock, flags);
735                         return 1;
736                 }
737                 write_unlock_irqrestore(&mapping->tree_lock, flags);
738                 return 0;
739         }
740         return TestClearPageDirty(page);
741 }
742 EXPORT_SYMBOL(test_clear_page_dirty);
743
744 /*
745  * Clear a page's dirty flag, while caring for dirty memory accounting.
746  * Returns true if the page was previously dirty.
747  *
748  * This is for preparing to put the page under writeout.  We leave the page
749  * tagged as dirty in the radix tree so that a concurrent write-for-sync
750  * can discover it via a PAGECACHE_TAG_DIRTY walk.  The ->writepage
751  * implementation will run either set_page_writeback() or set_page_dirty(),
752  * at which stage we bring the page's dirty flag and radix-tree dirty tag
753  * back into sync.
754  *
755  * This incoherency between the page's dirty flag and radix-tree tag is
756  * unfortunate, but it only exists while the page is locked.
757  */
758 int clear_page_dirty_for_io(struct page *page)
759 {
760         struct address_space *mapping = page_mapping(page);
761
762         if (mapping) {
763                 if (TestClearPageDirty(page)) {
764                         if (mapping_cap_account_dirty(mapping))
765                                 dec_zone_page_state(page, NR_FILE_DIRTY);
766                         return 1;
767                 }
768                 return 0;
769         }
770         return TestClearPageDirty(page);
771 }
772 EXPORT_SYMBOL(clear_page_dirty_for_io);
773
774 int test_clear_page_writeback(struct page *page)
775 {
776         struct address_space *mapping = page_mapping(page);
777         int ret;
778
779         if (mapping) {
780                 unsigned long flags;
781
782                 write_lock_irqsave(&mapping->tree_lock, flags);
783                 ret = TestClearPageWriteback(page);
784                 if (ret)
785                         radix_tree_tag_clear(&mapping->page_tree,
786                                                 page_index(page),
787                                                 PAGECACHE_TAG_WRITEBACK);
788                 write_unlock_irqrestore(&mapping->tree_lock, flags);
789         } else {
790                 ret = TestClearPageWriteback(page);
791         }
792         return ret;
793 }
794
795 int test_set_page_writeback(struct page *page)
796 {
797         struct address_space *mapping = page_mapping(page);
798         int ret;
799
800         if (mapping) {
801                 unsigned long flags;
802
803                 write_lock_irqsave(&mapping->tree_lock, flags);
804                 ret = TestSetPageWriteback(page);
805                 if (!ret)
806                         radix_tree_tag_set(&mapping->page_tree,
807                                                 page_index(page),
808                                                 PAGECACHE_TAG_WRITEBACK);
809                 if (!PageDirty(page))
810                         radix_tree_tag_clear(&mapping->page_tree,
811                                                 page_index(page),
812                                                 PAGECACHE_TAG_DIRTY);
813                 write_unlock_irqrestore(&mapping->tree_lock, flags);
814         } else {
815                 ret = TestSetPageWriteback(page);
816         }
817         return ret;
818
819 }
820 EXPORT_SYMBOL(test_set_page_writeback);
821
822 /*
823  * Return true if any of the pages in the mapping are marged with the
824  * passed tag.
825  */
826 int mapping_tagged(struct address_space *mapping, int tag)
827 {
828         unsigned long flags;
829         int ret;
830
831         read_lock_irqsave(&mapping->tree_lock, flags);
832         ret = radix_tree_tagged(&mapping->page_tree, tag);
833         read_unlock_irqrestore(&mapping->tree_lock, flags);
834         return ret;
835 }
836 EXPORT_SYMBOL(mapping_tagged);