nfsd: nfsd should drop CAP_MKNOD for non-root
[safe/jmp/linux-2.6] / mm / page-writeback.c
index 886ea0d..74dc57c 100644 (file)
@@ -2,11 +2,12 @@
  * mm/page-writeback.c
  *
  * Copyright (C) 2002, Linus Torvalds.
+ * Copyright (C) 2007 Red Hat, Inc., Peter Zijlstra <pzijlstr@redhat.com>
  *
  * Contains functions related to writing back dirty pages at the
  * address_space level.
  *
- * 10Apr2002   akpm@zip.com.au
+ * 10Apr2002   Andrew Morton
  *             Initial version
  */
 
@@ -36,7 +37,7 @@
 
 /*
  * The maximum number of pages to writeout in a single bdflush/kupdate
- * operation.  We do this so we don't hold I_LOCK against an inode for
+ * operation.  We do this so we don't hold I_SYNC against an inode for
  * enormous amounts of time, which would block a userspace task which has
  * been forced to throttle against that inode.  Also, the code reevaluates
  * the dirty each time it has written this many pages.
@@ -49,8 +50,6 @@
  */
 static long ratelimit_pages = 32;
 
-static int dirty_exceeded __cacheline_aligned_in_smp;  /* Dirty mem may be over limit */
-
 /*
  * When balance_dirty_pages decides that the caller needs to perform some
  * non-background writeback, this is how many pages it will attempt to write.
@@ -70,11 +69,29 @@ static inline long sync_writeback_pages(void)
 int dirty_background_ratio = 5;
 
 /*
+ * dirty_background_bytes starts at 0 (disabled) so that it is a function of
+ * dirty_background_ratio * the amount of dirtyable memory
+ */
+unsigned long dirty_background_bytes;
+
+/*
+ * free highmem will not be subtracted from the total free memory
+ * for calculating free ratios if vm_highmem_is_dirtyable is true
+ */
+int vm_highmem_is_dirtyable;
+
+/*
  * The generator of dirty data starts writeback at this percentage
  */
 int vm_dirty_ratio = 10;
 
 /*
+ * vm_dirty_bytes starts at 0 (disabled) so that it is a function of
+ * vm_dirty_ratio * the amount of dirtyable memory
+ */
+unsigned long vm_dirty_bytes;
+
+/*
  * The interval between `kupdate'-style writebacks, in jiffies
  */
 int dirty_writeback_interval = 5 * HZ;
@@ -103,6 +120,252 @@ EXPORT_SYMBOL(laptop_mode);
 static void background_writeout(unsigned long _min_pages);
 
 /*
+ * Scale the writeback cache size proportional to the relative writeout speeds.
+ *
+ * We do this by keeping a floating proportion between BDIs, based on page
+ * writeback completions [end_page_writeback()]. Those devices that write out
+ * pages fastest will get the larger share, while the slower will get a smaller
+ * share.
+ *
+ * We use page writeout completions because we are interested in getting rid of
+ * dirty pages. Having them written out is the primary goal.
+ *
+ * We introduce a concept of time, a period over which we measure these events,
+ * because demand can/will vary over time. The length of this period itself is
+ * measured in page writeback completions.
+ *
+ */
+static struct prop_descriptor vm_completions;
+static struct prop_descriptor vm_dirties;
+
+/*
+ * couple the period to the dirty_ratio:
+ *
+ *   period/2 ~ roundup_pow_of_two(dirty limit)
+ */
+static int calc_period_shift(void)
+{
+       unsigned long dirty_total;
+
+       if (vm_dirty_bytes)
+               dirty_total = vm_dirty_bytes / PAGE_SIZE;
+       else
+               dirty_total = (vm_dirty_ratio * determine_dirtyable_memory()) /
+                               100;
+       return 2 + ilog2(dirty_total - 1);
+}
+
+/*
+ * update the period when the dirty threshold changes.
+ */
+static void update_completion_period(void)
+{
+       int shift = calc_period_shift();
+       prop_change_shift(&vm_completions, shift);
+       prop_change_shift(&vm_dirties, shift);
+}
+
+int dirty_background_ratio_handler(struct ctl_table *table, int write,
+               struct file *filp, void __user *buffer, size_t *lenp,
+               loff_t *ppos)
+{
+       int ret;
+
+       ret = proc_dointvec_minmax(table, write, filp, buffer, lenp, ppos);
+       if (ret == 0 && write)
+               dirty_background_bytes = 0;
+       return ret;
+}
+
+int dirty_background_bytes_handler(struct ctl_table *table, int write,
+               struct file *filp, void __user *buffer, size_t *lenp,
+               loff_t *ppos)
+{
+       int ret;
+
+       ret = proc_doulongvec_minmax(table, write, filp, buffer, lenp, ppos);
+       if (ret == 0 && write)
+               dirty_background_ratio = 0;
+       return ret;
+}
+
+int dirty_ratio_handler(struct ctl_table *table, int write,
+               struct file *filp, void __user *buffer, size_t *lenp,
+               loff_t *ppos)
+{
+       int old_ratio = vm_dirty_ratio;
+       int ret;
+
+       ret = proc_dointvec_minmax(table, write, filp, buffer, lenp, ppos);
+       if (ret == 0 && write && vm_dirty_ratio != old_ratio) {
+               update_completion_period();
+               vm_dirty_bytes = 0;
+       }
+       return ret;
+}
+
+
+int dirty_bytes_handler(struct ctl_table *table, int write,
+               struct file *filp, void __user *buffer, size_t *lenp,
+               loff_t *ppos)
+{
+       unsigned long old_bytes = vm_dirty_bytes;
+       int ret;
+
+       ret = proc_doulongvec_minmax(table, write, filp, buffer, lenp, ppos);
+       if (ret == 0 && write && vm_dirty_bytes != old_bytes) {
+               update_completion_period();
+               vm_dirty_ratio = 0;
+       }
+       return ret;
+}
+
+/*
+ * Increment the BDI's writeout completion count and the global writeout
+ * completion count. Called from test_clear_page_writeback().
+ */
+static inline void __bdi_writeout_inc(struct backing_dev_info *bdi)
+{
+       __prop_inc_percpu_max(&vm_completions, &bdi->completions,
+                             bdi->max_prop_frac);
+}
+
+void bdi_writeout_inc(struct backing_dev_info *bdi)
+{
+       unsigned long flags;
+
+       local_irq_save(flags);
+       __bdi_writeout_inc(bdi);
+       local_irq_restore(flags);
+}
+EXPORT_SYMBOL_GPL(bdi_writeout_inc);
+
+void task_dirty_inc(struct task_struct *tsk)
+{
+       prop_inc_single(&vm_dirties, &tsk->dirties);
+}
+
+/*
+ * Obtain an accurate fraction of the BDI's portion.
+ */
+static void bdi_writeout_fraction(struct backing_dev_info *bdi,
+               long *numerator, long *denominator)
+{
+       if (bdi_cap_writeback_dirty(bdi)) {
+               prop_fraction_percpu(&vm_completions, &bdi->completions,
+                               numerator, denominator);
+       } else {
+               *numerator = 0;
+               *denominator = 1;
+       }
+}
+
+/*
+ * Clip the earned share of dirty pages to that which is actually available.
+ * This avoids exceeding the total dirty_limit when the floating averages
+ * fluctuate too quickly.
+ */
+static void
+clip_bdi_dirty_limit(struct backing_dev_info *bdi, long dirty, long *pbdi_dirty)
+{
+       long avail_dirty;
+
+       avail_dirty = dirty -
+               (global_page_state(NR_FILE_DIRTY) +
+                global_page_state(NR_WRITEBACK) +
+                global_page_state(NR_UNSTABLE_NFS) +
+                global_page_state(NR_WRITEBACK_TEMP));
+
+       if (avail_dirty < 0)
+               avail_dirty = 0;
+
+       avail_dirty += bdi_stat(bdi, BDI_RECLAIMABLE) +
+               bdi_stat(bdi, BDI_WRITEBACK);
+
+       *pbdi_dirty = min(*pbdi_dirty, avail_dirty);
+}
+
+static inline void task_dirties_fraction(struct task_struct *tsk,
+               long *numerator, long *denominator)
+{
+       prop_fraction_single(&vm_dirties, &tsk->dirties,
+                               numerator, denominator);
+}
+
+/*
+ * scale the dirty limit
+ *
+ * task specific dirty limit:
+ *
+ *   dirty -= (dirty/8) * p_{t}
+ */
+static void task_dirty_limit(struct task_struct *tsk, long *pdirty)
+{
+       long numerator, denominator;
+       long dirty = *pdirty;
+       u64 inv = dirty >> 3;
+
+       task_dirties_fraction(tsk, &numerator, &denominator);
+       inv *= numerator;
+       do_div(inv, denominator);
+
+       dirty -= inv;
+       if (dirty < *pdirty/2)
+               dirty = *pdirty/2;
+
+       *pdirty = dirty;
+}
+
+/*
+ *
+ */
+static DEFINE_SPINLOCK(bdi_lock);
+static unsigned int bdi_min_ratio;
+
+int bdi_set_min_ratio(struct backing_dev_info *bdi, unsigned int min_ratio)
+{
+       int ret = 0;
+       unsigned long flags;
+
+       spin_lock_irqsave(&bdi_lock, flags);
+       if (min_ratio > bdi->max_ratio) {
+               ret = -EINVAL;
+       } else {
+               min_ratio -= bdi->min_ratio;
+               if (bdi_min_ratio + min_ratio < 100) {
+                       bdi_min_ratio += min_ratio;
+                       bdi->min_ratio += min_ratio;
+               } else {
+                       ret = -EINVAL;
+               }
+       }
+       spin_unlock_irqrestore(&bdi_lock, flags);
+
+       return ret;
+}
+
+int bdi_set_max_ratio(struct backing_dev_info *bdi, unsigned max_ratio)
+{
+       unsigned long flags;
+       int ret = 0;
+
+       if (max_ratio > 100)
+               return -EINVAL;
+
+       spin_lock_irqsave(&bdi_lock, flags);
+       if (bdi->min_ratio > max_ratio) {
+               ret = -EINVAL;
+       } else {
+               bdi->max_ratio = max_ratio;
+               bdi->max_prop_frac = (PROP_FRAC_BASE * max_ratio) / 100;
+       }
+       spin_unlock_irqrestore(&bdi_lock, flags);
+
+       return ret;
+}
+EXPORT_SYMBOL(bdi_set_max_ratio);
+
+/*
  * Work out the current dirty-memory clamping and background writeout
  * thresholds.
  *
@@ -126,13 +389,11 @@ static unsigned long highmem_dirtyable_memory(unsigned long total)
        int node;
        unsigned long x = 0;
 
-       for_each_online_node(node) {
+       for_each_node_state(node, N_HIGH_MEMORY) {
                struct zone *z =
                        &NODE_DATA(node)->node_zones[ZONE_HIGHMEM];
 
-               x += zone_page_state(z, NR_FREE_PAGES)
-                       + zone_page_state(z, NR_INACTIVE)
-                       + zone_page_state(z, NR_ACTIVE);
+               x += zone_page_state(z, NR_FREE_PAGES) + zone_lru_pages(z);
        }
        /*
         * Make sure that the number of highmem pages is never larger
@@ -146,46 +407,51 @@ static unsigned long highmem_dirtyable_memory(unsigned long total)
 #endif
 }
 
-static unsigned long determine_dirtyable_memory(void)
+/**
+ * determine_dirtyable_memory - amount of memory that may be used
+ *
+ * Returns the numebr of pages that can currently be freed and used
+ * by the kernel for direct mappings.
+ */
+unsigned long determine_dirtyable_memory(void)
 {
        unsigned long x;
 
-       x = global_page_state(NR_FREE_PAGES)
-               + global_page_state(NR_INACTIVE)
-               + global_page_state(NR_ACTIVE);
-       x -= highmem_dirtyable_memory(x);
+       x = global_page_state(NR_FREE_PAGES) + global_lru_pages();
+
+       if (!vm_highmem_is_dirtyable)
+               x -= highmem_dirtyable_memory(x);
+
        return x + 1;   /* Ensure that we never return 0 */
 }
 
-static void
-get_dirty_limits(long *pbackground, long *pdirty,
-                                       struct address_space *mapping)
+void
+get_dirty_limits(unsigned long *pbackground, unsigned long *pdirty,
+                unsigned long *pbdi_dirty, struct backing_dev_info *bdi)
 {
-       int background_ratio;           /* Percentages */
-       int dirty_ratio;
-       int unmapped_ratio;
-       long background;
-       long dirty;
+       unsigned long background;
+       unsigned long dirty;
        unsigned long available_memory = determine_dirtyable_memory();
        struct task_struct *tsk;
 
-       unmapped_ratio = 100 - ((global_page_state(NR_FILE_MAPPED) +
-                               global_page_state(NR_ANON_PAGES)) * 100) /
-                                       available_memory;
-
-       dirty_ratio = vm_dirty_ratio;
-       if (dirty_ratio > unmapped_ratio / 2)
-               dirty_ratio = unmapped_ratio / 2;
+       if (vm_dirty_bytes)
+               dirty = DIV_ROUND_UP(vm_dirty_bytes, PAGE_SIZE);
+       else {
+               int dirty_ratio;
 
-       if (dirty_ratio < 5)
-               dirty_ratio = 5;
+               dirty_ratio = vm_dirty_ratio;
+               if (dirty_ratio < 5)
+                       dirty_ratio = 5;
+               dirty = (dirty_ratio * available_memory) / 100;
+       }
 
-       background_ratio = dirty_background_ratio;
-       if (background_ratio >= dirty_ratio)
-               background_ratio = dirty_ratio / 2;
+       if (dirty_background_bytes)
+               background = DIV_ROUND_UP(dirty_background_bytes, PAGE_SIZE);
+       else
+               background = (dirty_background_ratio * available_memory) / 100;
 
-       background = (background_ratio * available_memory) / 100;
-       dirty = (dirty_ratio * available_memory) / 100;
+       if (background >= dirty)
+               background = dirty / 2;
        tsk = current;
        if (tsk->flags & PF_LESS_THROTTLE || rt_task(tsk)) {
                background += background / 4;
@@ -193,6 +459,27 @@ get_dirty_limits(long *pbackground, long *pdirty,
        }
        *pbackground = background;
        *pdirty = dirty;
+
+       if (bdi) {
+               u64 bdi_dirty;
+               long numerator, denominator;
+
+               /*
+                * Calculate this BDI's share of the dirty ratio.
+                */
+               bdi_writeout_fraction(bdi, &numerator, &denominator);
+
+               bdi_dirty = (dirty * (100 - bdi_min_ratio)) / 100;
+               bdi_dirty *= numerator;
+               do_div(bdi_dirty, denominator);
+               bdi_dirty += (dirty * bdi->min_ratio) / 100;
+               if (bdi_dirty > (dirty * bdi->max_ratio) / 100)
+                       bdi_dirty = dirty * bdi->max_ratio / 100;
+
+               *pbdi_dirty = bdi_dirty;
+               clip_bdi_dirty_limit(bdi, dirty, pbdi_dirty);
+               task_dirty_limit(current, pbdi_dirty);
+       }
 }
 
 /*
@@ -204,9 +491,11 @@ get_dirty_limits(long *pbackground, long *pdirty,
  */
 static void balance_dirty_pages(struct address_space *mapping)
 {
-       long nr_reclaimable;
-       long background_thresh;
-       long dirty_thresh;
+       long nr_reclaimable, bdi_nr_reclaimable;
+       long nr_writeback, bdi_nr_writeback;
+       unsigned long background_thresh;
+       unsigned long dirty_thresh;
+       unsigned long bdi_thresh;
        unsigned long pages_written = 0;
        unsigned long write_chunk = sync_writeback_pages();
 
@@ -221,15 +510,30 @@ static void balance_dirty_pages(struct address_space *mapping)
                        .range_cyclic   = 1,
                };
 
-               get_dirty_limits(&background_thresh, &dirty_thresh, mapping);
+               get_dirty_limits(&background_thresh, &dirty_thresh,
+                               &bdi_thresh, bdi);
+
                nr_reclaimable = global_page_state(NR_FILE_DIRTY) +
                                        global_page_state(NR_UNSTABLE_NFS);
-               if (nr_reclaimable + global_page_state(NR_WRITEBACK) <=
-                       dirty_thresh)
-                               break;
+               nr_writeback = global_page_state(NR_WRITEBACK);
+
+               bdi_nr_reclaimable = bdi_stat(bdi, BDI_RECLAIMABLE);
+               bdi_nr_writeback = bdi_stat(bdi, BDI_WRITEBACK);
+
+               if (bdi_nr_reclaimable + bdi_nr_writeback <= bdi_thresh)
+                       break;
 
-               if (!dirty_exceeded)
-                       dirty_exceeded = 1;
+               /*
+                * Throttle it only when the background writeback cannot
+                * catch-up. This avoids (excessively) small writeouts
+                * when the bdi limits are ramping up.
+                */
+               if (nr_reclaimable + nr_writeback <
+                               (background_thresh + dirty_thresh) / 2)
+                       break;
+
+               if (!bdi->dirty_exceeded)
+                       bdi->dirty_exceeded = 1;
 
                /* Note: nr_reclaimable denotes nr_dirty + nr_unstable.
                 * Unstable writes are a feature of certain networked
@@ -237,26 +541,42 @@ static void balance_dirty_pages(struct address_space *mapping)
                 * written to the server's write cache, but has not yet
                 * been flushed to permanent storage.
                 */
-               if (nr_reclaimable) {
+               if (bdi_nr_reclaimable) {
                        writeback_inodes(&wbc);
-                       get_dirty_limits(&background_thresh,
-                                               &dirty_thresh, mapping);
-                       nr_reclaimable = global_page_state(NR_FILE_DIRTY) +
-                                       global_page_state(NR_UNSTABLE_NFS);
-                       if (nr_reclaimable +
-                               global_page_state(NR_WRITEBACK)
-                                       <= dirty_thresh)
-                                               break;
                        pages_written += write_chunk - wbc.nr_to_write;
-                       if (pages_written >= write_chunk)
-                               break;          /* We've done our duty */
+                       get_dirty_limits(&background_thresh, &dirty_thresh,
+                                      &bdi_thresh, bdi);
                }
+
+               /*
+                * In order to avoid the stacked BDI deadlock we need
+                * to ensure we accurately count the 'dirty' pages when
+                * the threshold is low.
+                *
+                * Otherwise it would be possible to get thresh+n pages
+                * reported dirty, even though there are thresh-m pages
+                * actually dirty; with m+n sitting in the percpu
+                * deltas.
+                */
+               if (bdi_thresh < 2*bdi_stat_error(bdi)) {
+                       bdi_nr_reclaimable = bdi_stat_sum(bdi, BDI_RECLAIMABLE);
+                       bdi_nr_writeback = bdi_stat_sum(bdi, BDI_WRITEBACK);
+               } else if (bdi_nr_reclaimable) {
+                       bdi_nr_reclaimable = bdi_stat(bdi, BDI_RECLAIMABLE);
+                       bdi_nr_writeback = bdi_stat(bdi, BDI_WRITEBACK);
+               }
+
+               if (bdi_nr_reclaimable + bdi_nr_writeback <= bdi_thresh)
+                       break;
+               if (pages_written >= write_chunk)
+                       break;          /* We've done our duty */
+
                congestion_wait(WRITE, HZ/10);
        }
 
-       if (nr_reclaimable + global_page_state(NR_WRITEBACK)
-               <= dirty_thresh && dirty_exceeded)
-                       dirty_exceeded = 0;
+       if (bdi_nr_reclaimable + bdi_nr_writeback < bdi_thresh &&
+                       bdi->dirty_exceeded)
+               bdi->dirty_exceeded = 0;
 
        if (writeback_in_progress(bdi))
                return;         /* pdflush is already working this queue */
@@ -270,13 +590,15 @@ static void balance_dirty_pages(struct address_space *mapping)
         * background_thresh, to keep the amount of dirty memory low.
         */
        if ((laptop_mode && pages_written) ||
-            (!laptop_mode && (nr_reclaimable > background_thresh)))
+                       (!laptop_mode && (global_page_state(NR_FILE_DIRTY)
+                                         + global_page_state(NR_UNSTABLE_NFS)
+                                         > background_thresh)))
                pdflush_operation(background_writeout, 0);
 }
 
-void set_page_dirty_balance(struct page *page)
+void set_page_dirty_balance(struct page *page, int page_mkwrite)
 {
-       if (set_page_dirty(page)) {
+       if (set_page_dirty(page) || page_mkwrite) {
                struct address_space *mapping = page_mapping(page);
 
                if (mapping)
@@ -306,7 +628,7 @@ void balance_dirty_pages_ratelimited_nr(struct address_space *mapping,
        unsigned long *p;
 
        ratelimit = ratelimit_pages;
-       if (dirty_exceeded)
+       if (mapping->backing_dev_info->dirty_exceeded)
                ratelimit = 8;
 
        /*
@@ -328,21 +650,11 @@ EXPORT_SYMBOL(balance_dirty_pages_ratelimited_nr);
 
 void throttle_vm_writeout(gfp_t gfp_mask)
 {
-       long background_thresh;
-       long dirty_thresh;
-
-       if ((gfp_mask & (__GFP_FS|__GFP_IO)) != (__GFP_FS|__GFP_IO)) {
-               /*
-                * The caller might hold locks which can prevent IO completion
-                * or progress in the filesystem.  So we cannot just sit here
-                * waiting for IO to complete.
-                */
-               congestion_wait(WRITE, HZ/10);
-               return;
-       }
+       unsigned long background_thresh;
+       unsigned long dirty_thresh;
 
         for ( ; ; ) {
-               get_dirty_limits(&background_thresh, &dirty_thresh, NULL);
+               get_dirty_limits(&background_thresh, &dirty_thresh, NULL, NULL);
 
                 /*
                  * Boost the allowable dirty threshold a bit for page
@@ -354,6 +666,14 @@ void throttle_vm_writeout(gfp_t gfp_mask)
                        global_page_state(NR_WRITEBACK) <= dirty_thresh)
                                break;
                 congestion_wait(WRITE, HZ/10);
+
+               /*
+                * The caller might hold locks which can prevent IO completion
+                * or progress in the filesystem.  So we cannot just sit here
+                * waiting for IO to complete.
+                */
+               if ((gfp_mask & (__GFP_FS|__GFP_IO)) != (__GFP_FS|__GFP_IO))
+                       break;
         }
 }
 
@@ -374,14 +694,15 @@ static void background_writeout(unsigned long _min_pages)
        };
 
        for ( ; ; ) {
-               long background_thresh;
-               long dirty_thresh;
+               unsigned long background_thresh;
+               unsigned long dirty_thresh;
 
-               get_dirty_limits(&background_thresh, &dirty_thresh, NULL);
+               get_dirty_limits(&background_thresh, &dirty_thresh, NULL, NULL);
                if (global_page_state(NR_FILE_DIRTY) +
                        global_page_state(NR_UNSTABLE_NFS) < background_thresh
                                && min_pages <= 0)
                        break;
+               wbc.more_io = 0;
                wbc.encountered_congestion = 0;
                wbc.nr_to_write = MAX_WRITEBACK_PAGES;
                wbc.pages_skipped = 0;
@@ -389,8 +710,9 @@ static void background_writeout(unsigned long _min_pages)
                min_pages -= MAX_WRITEBACK_PAGES - wbc.nr_to_write;
                if (wbc.nr_to_write > 0 || wbc.pages_skipped > 0) {
                        /* Wrote less than expected */
-                       congestion_wait(WRITE, HZ/10);
-                       if (!wbc.encountered_congestion)
+                       if (wbc.encountered_congestion || wbc.more_io)
+                               congestion_wait(WRITE, HZ/10);
+                       else
                                break;
                }
        }
@@ -455,11 +777,12 @@ static void wb_kupdate(unsigned long arg)
                        global_page_state(NR_UNSTABLE_NFS) +
                        (inodes_stat.nr_inodes - inodes_stat.nr_unused);
        while (nr_to_write > 0) {
+               wbc.more_io = 0;
                wbc.encountered_congestion = 0;
                wbc.nr_to_write = MAX_WRITEBACK_PAGES;
                writeback_inodes(&wbc);
                if (wbc.nr_to_write > 0) {
-                       if (wbc.encountered_congestion)
+                       if (wbc.encountered_congestion || wbc.more_io)
                                congestion_wait(WRITE, HZ/10);
                        else
                                break;  /* All the old data is written */
@@ -580,9 +903,15 @@ static struct notifier_block __cpuinitdata ratelimit_nb = {
  */
 void __init page_writeback_init(void)
 {
+       int shift;
+
        mod_timer(&wb_timer, jiffies + dirty_writeback_interval);
        writeback_set_ratelimit();
        register_cpu_notifier(&ratelimit_nb);
+
+       shift = calc_period_shift();
+       prop_descriptor_init(&vm_completions, shift);
+       prop_descriptor_init(&vm_dirties, shift);
 }
 
 /**
@@ -609,10 +938,13 @@ int write_cache_pages(struct address_space *mapping,
        int done = 0;
        struct pagevec pvec;
        int nr_pages;
+       pgoff_t uninitialized_var(writeback_index);
        pgoff_t index;
        pgoff_t end;            /* Inclusive */
-       int scanned = 0;
+       pgoff_t done_index;
+       int cycled;
        int range_whole = 0;
+       long nr_to_write = wbc->nr_to_write;
 
        if (wbc->nonblocking && bdi_write_congested(bdi)) {
                wbc->encountered_congestion = 1;
@@ -621,80 +953,149 @@ int write_cache_pages(struct address_space *mapping,
 
        pagevec_init(&pvec, 0);
        if (wbc->range_cyclic) {
-               index = mapping->writeback_index; /* Start from prev offset */
+               writeback_index = mapping->writeback_index; /* prev offset */
+               index = writeback_index;
+               if (index == 0)
+                       cycled = 1;
+               else
+                       cycled = 0;
                end = -1;
        } else {
                index = wbc->range_start >> PAGE_CACHE_SHIFT;
                end = wbc->range_end >> PAGE_CACHE_SHIFT;
                if (wbc->range_start == 0 && wbc->range_end == LLONG_MAX)
                        range_whole = 1;
-               scanned = 1;
+               cycled = 1; /* ignore range_cyclic tests */
        }
 retry:
-       while (!done && (index <= end) &&
-              (nr_pages = pagevec_lookup_tag(&pvec, mapping, &index,
-                                             PAGECACHE_TAG_DIRTY,
-                                             min(end - index, (pgoff_t)PAGEVEC_SIZE-1) + 1))) {
-               unsigned i;
+       done_index = index;
+       while (!done && (index <= end)) {
+               int i;
+
+               nr_pages = pagevec_lookup_tag(&pvec, mapping, &index,
+                             PAGECACHE_TAG_DIRTY,
+                             min(end - index, (pgoff_t)PAGEVEC_SIZE-1) + 1);
+               if (nr_pages == 0)
+                       break;
 
-               scanned = 1;
                for (i = 0; i < nr_pages; i++) {
                        struct page *page = pvec.pages[i];
 
                        /*
-                        * At this point we hold neither mapping->tree_lock nor
-                        * lock on the page itself: the page may be truncated or
-                        * invalidated (changing page->mapping to NULL), or even
-                        * swizzled back from swapper_space to tmpfs file
-                        * mapping
+                        * At this point, the page may be truncated or
+                        * invalidated (changing page->mapping to NULL), or
+                        * even swizzled back from swapper_space to tmpfs file
+                        * mapping. However, page->index will not change
+                        * because we have a reference on the page.
                         */
+                       if (page->index > end) {
+                               /*
+                                * can't be range_cyclic (1st pass) because
+                                * end == -1 in that case.
+                                */
+                               done = 1;
+                               break;
+                       }
+
+                       done_index = page->index + 1;
+
                        lock_page(page);
 
+                       /*
+                        * Page truncated or invalidated. We can freely skip it
+                        * then, even for data integrity operations: the page
+                        * has disappeared concurrently, so there could be no
+                        * real expectation of this data interity operation
+                        * even if there is now a new, dirty page at the same
+                        * pagecache address.
+                        */
                        if (unlikely(page->mapping != mapping)) {
+continue_unlock:
                                unlock_page(page);
                                continue;
                        }
 
-                       if (!wbc->range_cyclic && page->index > end) {
-                               done = 1;
-                               unlock_page(page);
-                               continue;
+                       if (!PageDirty(page)) {
+                               /* someone wrote it for us */
+                               goto continue_unlock;
                        }
 
-                       if (wbc->sync_mode != WB_SYNC_NONE)
-                               wait_on_page_writeback(page);
-
-                       if (PageWriteback(page) ||
-                           !clear_page_dirty_for_io(page)) {
-                               unlock_page(page);
-                               continue;
+                       if (PageWriteback(page)) {
+                               if (wbc->sync_mode != WB_SYNC_NONE)
+                                       wait_on_page_writeback(page);
+                               else
+                                       goto continue_unlock;
                        }
 
+                       BUG_ON(PageWriteback(page));
+                       if (!clear_page_dirty_for_io(page))
+                               goto continue_unlock;
+
                        ret = (*writepage)(page, wbc, data);
+                       if (unlikely(ret)) {
+                               if (ret == AOP_WRITEPAGE_ACTIVATE) {
+                                       unlock_page(page);
+                                       ret = 0;
+                               } else {
+                                       /*
+                                        * done_index is set past this page,
+                                        * so media errors will not choke
+                                        * background writeout for the entire
+                                        * file. This has consequences for
+                                        * range_cyclic semantics (ie. it may
+                                        * not be suitable for data integrity
+                                        * writeout).
+                                        */
+                                       done = 1;
+                                       break;
+                               }
+                       }
+
+                       if (nr_to_write > 0) {
+                               nr_to_write--;
+                               if (nr_to_write == 0 &&
+                                   wbc->sync_mode == WB_SYNC_NONE) {
+                                       /*
+                                        * We stop writing back only if we are
+                                        * not doing integrity sync. In case of
+                                        * integrity sync we have to keep going
+                                        * because someone may be concurrently
+                                        * dirtying pages, and we might have
+                                        * synced a lot of newly appeared dirty
+                                        * pages, but have not synced all of the
+                                        * old dirty pages.
+                                        */
+                                       done = 1;
+                                       break;
+                               }
+                       }
 
-                       if (unlikely(ret == AOP_WRITEPAGE_ACTIVATE))
-                               unlock_page(page);
-                       if (ret || (--(wbc->nr_to_write) <= 0))
-                               done = 1;
                        if (wbc->nonblocking && bdi_write_congested(bdi)) {
                                wbc->encountered_congestion = 1;
                                done = 1;
+                               break;
                        }
                }
                pagevec_release(&pvec);
                cond_resched();
        }
-       if (!scanned && !done) {
+       if (!cycled && !done) {
                /*
+                * range_cyclic:
                 * We hit the last page and there is more work to be done: wrap
                 * back to the start of the file
                 */
-               scanned = 1;
+               cycled = 1;
                index = 0;
+               end = writeback_index - 1;
                goto retry;
        }
-       if (wbc->range_cyclic || (range_whole && wbc->nr_to_write > 0))
-               mapping->writeback_index = index;
+       if (!wbc->no_nrwrite_index_update) {
+               if (wbc->range_cyclic || (range_whole && nr_to_write > 0))
+                       mapping->writeback_index = done_index;
+               wbc->nr_to_write = nr_to_write;
+       }
+
        return ret;
 }
 EXPORT_SYMBOL(write_cache_pages);
@@ -809,7 +1210,7 @@ int __set_page_dirty_no_writeback(struct page *page)
  * mapping is pinned by the vma's ->vm_file reference.
  *
  * We take care to handle the case where the page was truncated from the
- * mapping by re-checking page_mapping() insode tree_lock.
+ * mapping by re-checking page_mapping() inside tree_lock.
  */
 int __set_page_dirty_nobuffers(struct page *page)
 {
@@ -820,19 +1221,22 @@ int __set_page_dirty_nobuffers(struct page *page)
                if (!mapping)
                        return 1;
 
-               write_lock_irq(&mapping->tree_lock);
+               spin_lock_irq(&mapping->tree_lock);
                mapping2 = page_mapping(page);
                if (mapping2) { /* Race with truncate? */
                        BUG_ON(mapping2 != mapping);
                        WARN_ON_ONCE(!PagePrivate(page) && !PageUptodate(page));
                        if (mapping_cap_account_dirty(mapping)) {
                                __inc_zone_page_state(page, NR_FILE_DIRTY);
+                               __inc_bdi_stat(mapping->backing_dev_info,
+                                               BDI_RECLAIMABLE);
+                               task_dirty_inc(current);
                                task_io_account_write(PAGE_CACHE_SIZE);
                        }
                        radix_tree_tag_set(&mapping->page_tree,
                                page_index(page), PAGECACHE_TAG_DIRTY);
                }
-               write_unlock_irq(&mapping->tree_lock);
+               spin_unlock_irq(&mapping->tree_lock);
                if (mapping->host) {
                        /* !PageAnon && !swapper_space */
                        __mark_inode_dirty(mapping->host, I_DIRTY_PAGES);
@@ -859,7 +1263,7 @@ EXPORT_SYMBOL(redirty_page_for_writepage);
  * If the mapping doesn't provide a set_page_dirty a_op, then
  * just fall through and assume that it wants buffer_heads.
  */
-int fastcall set_page_dirty(struct page *page)
+int set_page_dirty(struct page *page)
 {
        struct address_space *mapping = page_mapping(page);
 
@@ -918,6 +1322,9 @@ int clear_page_dirty_for_io(struct page *page)
 {
        struct address_space *mapping = page_mapping(page);
 
+       BUG_ON(!PageLocked(page));
+
+       ClearPageReclaim(page);
        if (mapping && mapping_cap_account_dirty(mapping)) {
                /*
                 * Yes, Virginia, this is indeed insane.
@@ -943,16 +1350,23 @@ int clear_page_dirty_for_io(struct page *page)
                 * We basically use the page "master dirty bit"
                 * as a serialization point for all the different
                 * threads doing their things.
-                *
-                * FIXME! We still have a race here: if somebody
-                * adds the page back to the page tables in
-                * between the "page_mkclean()" and the "TestClearPageDirty()",
-                * we might have it mapped without the dirty bit set.
                 */
                if (page_mkclean(page))
                        set_page_dirty(page);
+               /*
+                * We carefully synchronise fault handlers against
+                * installing a dirty pte and marking the page dirty
+                * at this point. We do this by having them hold the
+                * page lock at some point after installing their
+                * pte, but before marking the page dirty.
+                * Pages are always locked coming in here, so we get
+                * the desired exclusion. See mm/memory.c:do_wp_page()
+                * for more comments.
+                */
                if (TestClearPageDirty(page)) {
                        dec_zone_page_state(page, NR_FILE_DIRTY);
+                       dec_bdi_stat(mapping->backing_dev_info,
+                                       BDI_RECLAIMABLE);
                        return 1;
                }
                return 0;
@@ -967,18 +1381,26 @@ int test_clear_page_writeback(struct page *page)
        int ret;
 
        if (mapping) {
+               struct backing_dev_info *bdi = mapping->backing_dev_info;
                unsigned long flags;
 
-               write_lock_irqsave(&mapping->tree_lock, flags);
+               spin_lock_irqsave(&mapping->tree_lock, flags);
                ret = TestClearPageWriteback(page);
-               if (ret)
+               if (ret) {
                        radix_tree_tag_clear(&mapping->page_tree,
                                                page_index(page),
                                                PAGECACHE_TAG_WRITEBACK);
-               write_unlock_irqrestore(&mapping->tree_lock, flags);
+                       if (bdi_cap_account_writeback(bdi)) {
+                               __dec_bdi_stat(bdi, BDI_WRITEBACK);
+                               __bdi_writeout_inc(bdi);
+                       }
+               }
+               spin_unlock_irqrestore(&mapping->tree_lock, flags);
        } else {
                ret = TestClearPageWriteback(page);
        }
+       if (ret)
+               dec_zone_page_state(page, NR_WRITEBACK);
        return ret;
 }
 
@@ -988,39 +1410,43 @@ int test_set_page_writeback(struct page *page)
        int ret;
 
        if (mapping) {
+               struct backing_dev_info *bdi = mapping->backing_dev_info;
                unsigned long flags;
 
-               write_lock_irqsave(&mapping->tree_lock, flags);
+               spin_lock_irqsave(&mapping->tree_lock, flags);
                ret = TestSetPageWriteback(page);
-               if (!ret)
+               if (!ret) {
                        radix_tree_tag_set(&mapping->page_tree,
                                                page_index(page),
                                                PAGECACHE_TAG_WRITEBACK);
+                       if (bdi_cap_account_writeback(bdi))
+                               __inc_bdi_stat(bdi, BDI_WRITEBACK);
+               }
                if (!PageDirty(page))
                        radix_tree_tag_clear(&mapping->page_tree,
                                                page_index(page),
                                                PAGECACHE_TAG_DIRTY);
-               write_unlock_irqrestore(&mapping->tree_lock, flags);
+               spin_unlock_irqrestore(&mapping->tree_lock, flags);
        } else {
                ret = TestSetPageWriteback(page);
        }
+       if (!ret)
+               inc_zone_page_state(page, NR_WRITEBACK);
        return ret;
 
 }
 EXPORT_SYMBOL(test_set_page_writeback);
 
 /*
- * Return true if any of the pages in the mapping are marged with the
+ * Return true if any of the pages in the mapping are marked with the
  * passed tag.
  */
 int mapping_tagged(struct address_space *mapping, int tag)
 {
-       unsigned long flags;
        int ret;
-
-       read_lock_irqsave(&mapping->tree_lock, flags);
+       rcu_read_lock();
        ret = radix_tree_tagged(&mapping->page_tree, tag);
-       read_unlock_irqrestore(&mapping->tree_lock, flags);
+       rcu_read_unlock();
        return ret;
 }
 EXPORT_SYMBOL(mapping_tagged);