x86 & generic: change to __builtin_prefetch()
[safe/jmp/linux-2.6] / mm / page-writeback.c
index b036054..7845462 100644 (file)
@@ -37,7 +37,7 @@
 
 /*
  * The maximum number of pages to writeout in a single bdflush/kupdate
- * operation.  We do this so we don't hold I_LOCK against an inode for
+ * operation.  We do this so we don't hold I_SYNC against an inode for
  * enormous amounts of time, which would block a userspace task which has
  * been forced to throttle against that inode.  Also, the code reevaluates
  * the dirty each time it has written this many pages.
@@ -118,6 +118,7 @@ static void background_writeout(unsigned long _min_pages);
  *
  */
 static struct prop_descriptor vm_completions;
+static struct prop_descriptor vm_dirties;
 
 static unsigned long determine_dirtyable_memory(void);
 
@@ -146,6 +147,7 @@ int dirty_ratio_handler(struct ctl_table *table, int write,
        if (ret == 0 && write && vm_dirty_ratio != old_ratio) {
                int shift = calc_period_shift();
                prop_change_shift(&vm_completions, shift);
+               prop_change_shift(&vm_dirties, shift);
        }
        return ret;
 }
@@ -159,6 +161,11 @@ static inline void __bdi_writeout_inc(struct backing_dev_info *bdi)
        __prop_inc_percpu(&vm_completions, &bdi->completions);
 }
 
+static inline void task_dirty_inc(struct task_struct *tsk)
+{
+       prop_inc_single(&vm_dirties, &tsk->dirties);
+}
+
 /*
  * Obtain an accurate fraction of the BDI's portion.
  */
@@ -198,6 +205,37 @@ clip_bdi_dirty_limit(struct backing_dev_info *bdi, long dirty, long *pbdi_dirty)
        *pbdi_dirty = min(*pbdi_dirty, avail_dirty);
 }
 
+static inline void task_dirties_fraction(struct task_struct *tsk,
+               long *numerator, long *denominator)
+{
+       prop_fraction_single(&vm_dirties, &tsk->dirties,
+                               numerator, denominator);
+}
+
+/*
+ * scale the dirty limit
+ *
+ * task specific dirty limit:
+ *
+ *   dirty -= (dirty/8) * p_{t}
+ */
+void task_dirty_limit(struct task_struct *tsk, long *pdirty)
+{
+       long numerator, denominator;
+       long dirty = *pdirty;
+       u64 inv = dirty >> 3;
+
+       task_dirties_fraction(tsk, &numerator, &denominator);
+       inv *= numerator;
+       do_div(inv, denominator);
+
+       dirty -= inv;
+       if (dirty < *pdirty/2)
+               dirty = *pdirty/2;
+
+       *pdirty = dirty;
+}
+
 /*
  * Work out the current dirty-memory clamping and background writeout
  * thresholds.
@@ -304,6 +342,7 @@ get_dirty_limits(long *pbackground, long *pdirty, long *pbdi_dirty,
 
                *pbdi_dirty = bdi_dirty;
                clip_bdi_dirty_limit(bdi, dirty, pbdi_dirty);
+               task_dirty_limit(current, pbdi_dirty);
        }
 }
 
@@ -463,16 +502,6 @@ void throttle_vm_writeout(gfp_t gfp_mask)
        long background_thresh;
        long dirty_thresh;
 
-       if ((gfp_mask & (__GFP_FS|__GFP_IO)) != (__GFP_FS|__GFP_IO)) {
-               /*
-                * The caller might hold locks which can prevent IO completion
-                * or progress in the filesystem.  So we cannot just sit here
-                * waiting for IO to complete.
-                */
-               congestion_wait(WRITE, HZ/10);
-               return;
-       }
-
         for ( ; ; ) {
                get_dirty_limits(&background_thresh, &dirty_thresh, NULL, NULL);
 
@@ -486,6 +515,14 @@ void throttle_vm_writeout(gfp_t gfp_mask)
                        global_page_state(NR_WRITEBACK) <= dirty_thresh)
                                break;
                 congestion_wait(WRITE, HZ/10);
+
+               /*
+                * The caller might hold locks which can prevent IO completion
+                * or progress in the filesystem.  So we cannot just sit here
+                * waiting for IO to complete.
+                */
+               if ((gfp_mask & (__GFP_FS|__GFP_IO)) != (__GFP_FS|__GFP_IO))
+                       break;
         }
 }
 
@@ -514,6 +551,7 @@ static void background_writeout(unsigned long _min_pages)
                        global_page_state(NR_UNSTABLE_NFS) < background_thresh
                                && min_pages <= 0)
                        break;
+               wbc.more_io = 0;
                wbc.encountered_congestion = 0;
                wbc.nr_to_write = MAX_WRITEBACK_PAGES;
                wbc.pages_skipped = 0;
@@ -521,8 +559,9 @@ static void background_writeout(unsigned long _min_pages)
                min_pages -= MAX_WRITEBACK_PAGES - wbc.nr_to_write;
                if (wbc.nr_to_write > 0 || wbc.pages_skipped > 0) {
                        /* Wrote less than expected */
-                       congestion_wait(WRITE, HZ/10);
-                       if (!wbc.encountered_congestion)
+                       if (wbc.encountered_congestion || wbc.more_io)
+                               congestion_wait(WRITE, HZ/10);
+                       else
                                break;
                }
        }
@@ -587,11 +626,12 @@ static void wb_kupdate(unsigned long arg)
                        global_page_state(NR_UNSTABLE_NFS) +
                        (inodes_stat.nr_inodes - inodes_stat.nr_unused);
        while (nr_to_write > 0) {
+               wbc.more_io = 0;
                wbc.encountered_congestion = 0;
                wbc.nr_to_write = MAX_WRITEBACK_PAGES;
                writeback_inodes(&wbc);
                if (wbc.nr_to_write > 0) {
-                       if (wbc.encountered_congestion)
+                       if (wbc.encountered_congestion || wbc.more_io)
                                congestion_wait(WRITE, HZ/10);
                        else
                                break;  /* All the old data is written */
@@ -720,6 +760,7 @@ void __init page_writeback_init(void)
 
        shift = calc_period_shift();
        prop_descriptor_init(&vm_completions, shift);
+       prop_descriptor_init(&vm_dirties, shift);
 }
 
 /**
@@ -809,8 +850,10 @@ retry:
 
                        ret = (*writepage)(page, wbc, data);
 
-                       if (unlikely(ret == AOP_WRITEPAGE_ACTIVATE))
+                       if (unlikely(ret == AOP_WRITEPAGE_ACTIVATE)) {
                                unlock_page(page);
+                               ret = 0;
+                       }
                        if (ret || (--(wbc->nr_to_write) <= 0))
                                done = 1;
                        if (wbc->nonblocking && bdi_write_congested(bdi)) {
@@ -998,7 +1041,7 @@ EXPORT_SYMBOL(redirty_page_for_writepage);
  * If the mapping doesn't provide a set_page_dirty a_op, then
  * just fall through and assume that it wants buffer_heads.
  */
-int fastcall set_page_dirty(struct page *page)
+static int __set_page_dirty(struct page *page)
 {
        struct address_space *mapping = page_mapping(page);
 
@@ -1016,6 +1059,14 @@ int fastcall set_page_dirty(struct page *page)
        }
        return 0;
 }
+
+int fastcall set_page_dirty(struct page *page)
+{
+       int ret = __set_page_dirty(page);
+       if (ret)
+               task_dirty_inc(current);
+       return ret;
+}
 EXPORT_SYMBOL(set_page_dirty);
 
 /*