[PATCH] mm: swap write failure fixup
[safe/jmp/linux-2.6] / mm / page-writeback.c
index c1052ee..5557529 100644 (file)
@@ -23,6 +23,7 @@
 #include <linux/backing-dev.h>
 #include <linux/blkdev.h>
 #include <linux/mpage.h>
+#include <linux/rmap.h>
 #include <linux/percpu.h>
 #include <linux/notifier.h>
 #include <linux/smp.h>
@@ -72,13 +73,12 @@ int dirty_background_ratio = 10;
 int vm_dirty_ratio = 40;
 
 /*
- * The interval between `kupdate'-style writebacks, in centiseconds
- * (hundredths of a second)
+ * The interval between `kupdate'-style writebacks, in jiffies
  */
 int dirty_writeback_interval = 5 * HZ;
 
 /*
- * The longest number of centiseconds for which data is allowed to remain dirty
+ * The longest number of jiffies for which data is allowed to remain dirty
  */
 int dirty_expire_interval = 30 * HZ;
 
@@ -100,22 +100,6 @@ EXPORT_SYMBOL(laptop_mode);
 
 static void background_writeout(unsigned long _min_pages);
 
-struct writeback_state
-{
-       unsigned long nr_dirty;
-       unsigned long nr_unstable;
-       unsigned long nr_mapped;
-       unsigned long nr_writeback;
-};
-
-static void get_writeback_state(struct writeback_state *wbs)
-{
-       wbs->nr_dirty = read_page_state(nr_dirty);
-       wbs->nr_unstable = read_page_state(nr_unstable);
-       wbs->nr_mapped = read_page_state(nr_mapped);
-       wbs->nr_writeback = read_page_state(nr_writeback);
-}
-
 /*
  * Work out the current dirty-memory clamping and background writeout
  * thresholds.
@@ -134,8 +118,8 @@ static void get_writeback_state(struct writeback_state *wbs)
  * clamping level.
  */
 static void
-get_dirty_limits(struct writeback_state *wbs, long *pbackground, long *pdirty,
-               struct address_space *mapping)
+get_dirty_limits(long *pbackground, long *pdirty,
+                                       struct address_space *mapping)
 {
        int background_ratio;           /* Percentages */
        int dirty_ratio;
@@ -145,8 +129,6 @@ get_dirty_limits(struct writeback_state *wbs, long *pbackground, long *pdirty,
        unsigned long available_memory = total_pages;
        struct task_struct *tsk;
 
-       get_writeback_state(wbs);
-
 #ifdef CONFIG_HIGHMEM
        /*
         * If this mapping can only allocate from low memory,
@@ -157,7 +139,9 @@ get_dirty_limits(struct writeback_state *wbs, long *pbackground, long *pdirty,
 #endif
 
 
-       unmapped_ratio = 100 - (wbs->nr_mapped * 100) / total_pages;
+       unmapped_ratio = 100 - ((global_page_state(NR_FILE_MAPPED) +
+                               global_page_state(NR_ANON_PAGES)) * 100) /
+                                       total_pages;
 
        dirty_ratio = vm_dirty_ratio;
        if (dirty_ratio > unmapped_ratio / 2)
@@ -190,7 +174,6 @@ get_dirty_limits(struct writeback_state *wbs, long *pbackground, long *pdirty,
  */
 static void balance_dirty_pages(struct address_space *mapping)
 {
-       struct writeback_state wbs;
        long nr_reclaimable;
        long background_thresh;
        long dirty_thresh;
@@ -205,13 +188,15 @@ static void balance_dirty_pages(struct address_space *mapping)
                        .sync_mode      = WB_SYNC_NONE,
                        .older_than_this = NULL,
                        .nr_to_write    = write_chunk,
+                       .range_cyclic   = 1,
                };
 
-               get_dirty_limits(&wbs, &background_thresh,
-                                       &dirty_thresh, mapping);
-               nr_reclaimable = wbs.nr_dirty + wbs.nr_unstable;
-               if (nr_reclaimable + wbs.nr_writeback <= dirty_thresh)
-                       break;
+               get_dirty_limits(&background_thresh, &dirty_thresh, mapping);
+               nr_reclaimable = global_page_state(NR_FILE_DIRTY) +
+                                       global_page_state(NR_UNSTABLE_NFS);
+               if (nr_reclaimable + global_page_state(NR_WRITEBACK) <=
+                       dirty_thresh)
+                               break;
 
                if (!dirty_exceeded)
                        dirty_exceeded = 1;
@@ -224,11 +209,14 @@ static void balance_dirty_pages(struct address_space *mapping)
                 */
                if (nr_reclaimable) {
                        writeback_inodes(&wbc);
-                       get_dirty_limits(&wbs, &background_thresh,
-                                       &dirty_thresh, mapping);
-                       nr_reclaimable = wbs.nr_dirty + wbs.nr_unstable;
-                       if (nr_reclaimable + wbs.nr_writeback <= dirty_thresh)
-                               break;
+                       get_dirty_limits(&background_thresh,
+                                               &dirty_thresh, mapping);
+                       nr_reclaimable = global_page_state(NR_FILE_DIRTY) +
+                                       global_page_state(NR_UNSTABLE_NFS);
+                       if (nr_reclaimable +
+                               global_page_state(NR_WRITEBACK)
+                                       <= dirty_thresh)
+                                               break;
                        pages_written += write_chunk - wbc.nr_to_write;
                        if (pages_written >= write_chunk)
                                break;          /* We've done our duty */
@@ -236,8 +224,9 @@ static void balance_dirty_pages(struct address_space *mapping)
                blk_congestion_wait(WRITE, HZ/10);
        }
 
-       if (nr_reclaimable + wbs.nr_writeback <= dirty_thresh && dirty_exceeded)
-               dirty_exceeded = 0;
+       if (nr_reclaimable + global_page_state(NR_WRITEBACK)
+               <= dirty_thresh && dirty_exceeded)
+                       dirty_exceeded = 0;
 
        if (writeback_in_progress(bdi))
                return;         /* pdflush is already working this queue */
@@ -255,9 +244,20 @@ static void balance_dirty_pages(struct address_space *mapping)
                pdflush_operation(background_writeout, 0);
 }
 
+void set_page_dirty_balance(struct page *page)
+{
+       if (set_page_dirty(page)) {
+               struct address_space *mapping = page_mapping(page);
+
+               if (mapping)
+                       balance_dirty_pages_ratelimited(mapping);
+       }
+}
+
 /**
- * balance_dirty_pages_ratelimited - balance dirty memory state
+ * balance_dirty_pages_ratelimited_nr - balance dirty memory state
  * @mapping: address_space which was dirtied
+ * @nr_pages_dirtied: number of pages which the caller has just dirtied
  *
  * Processes which are dirtying memory should call in here once for each page
  * which was newly dirtied.  The function will periodically check the system's
@@ -268,10 +268,12 @@ static void balance_dirty_pages(struct address_space *mapping)
  * limit we decrease the ratelimiting by a lot, to prevent individual processes
  * from overshooting the limit by (ratelimit_pages) each.
  */
-void balance_dirty_pages_ratelimited(struct address_space *mapping)
+void balance_dirty_pages_ratelimited_nr(struct address_space *mapping,
+                                       unsigned long nr_pages_dirtied)
 {
-       static DEFINE_PER_CPU(int, ratelimits) = 0;
-       long ratelimit;
+       static DEFINE_PER_CPU(unsigned long, ratelimits) = 0;
+       unsigned long ratelimit;
+       unsigned long *p;
 
        ratelimit = ratelimit_pages;
        if (dirty_exceeded)
@@ -281,24 +283,26 @@ void balance_dirty_pages_ratelimited(struct address_space *mapping)
         * Check the rate limiting. Also, we do not want to throttle real-time
         * tasks in balance_dirty_pages(). Period.
         */
-       if (get_cpu_var(ratelimits)++ >= ratelimit) {
-               __get_cpu_var(ratelimits) = 0;
-               put_cpu_var(ratelimits);
+       preempt_disable();
+       p =  &__get_cpu_var(ratelimits);
+       *p += nr_pages_dirtied;
+       if (unlikely(*p >= ratelimit)) {
+               *p = 0;
+               preempt_enable();
                balance_dirty_pages(mapping);
                return;
        }
-       put_cpu_var(ratelimits);
+       preempt_enable();
 }
-EXPORT_SYMBOL(balance_dirty_pages_ratelimited);
+EXPORT_SYMBOL(balance_dirty_pages_ratelimited_nr);
 
 void throttle_vm_writeout(void)
 {
-       struct writeback_state wbs;
        long background_thresh;
        long dirty_thresh;
 
         for ( ; ; ) {
-               get_dirty_limits(&wbs, &background_thresh, &dirty_thresh, NULL);
+               get_dirty_limits(&background_thresh, &dirty_thresh, NULL);
 
                 /*
                  * Boost the allowable dirty threshold a bit for page
@@ -306,8 +310,9 @@ void throttle_vm_writeout(void)
                  */
                 dirty_thresh += dirty_thresh / 10;      /* wheeee... */
 
-                if (wbs.nr_unstable + wbs.nr_writeback <= dirty_thresh)
-                        break;
+                if (global_page_state(NR_UNSTABLE_NFS) +
+                       global_page_state(NR_WRITEBACK) <= dirty_thresh)
+                               break;
                 blk_congestion_wait(WRITE, HZ/10);
         }
 }
@@ -326,15 +331,16 @@ static void background_writeout(unsigned long _min_pages)
                .older_than_this = NULL,
                .nr_to_write    = 0,
                .nonblocking    = 1,
+               .range_cyclic   = 1,
        };
 
        for ( ; ; ) {
-               struct writeback_state wbs;
                long background_thresh;
                long dirty_thresh;
 
-               get_dirty_limits(&wbs, &background_thresh, &dirty_thresh, NULL);
-               if (wbs.nr_dirty + wbs.nr_unstable < background_thresh
+               get_dirty_limits(&background_thresh, &dirty_thresh, NULL);
+               if (global_page_state(NR_FILE_DIRTY) +
+                       global_page_state(NR_UNSTABLE_NFS) < background_thresh
                                && min_pages <= 0)
                        break;
                wbc.encountered_congestion = 0;
@@ -358,12 +364,9 @@ static void background_writeout(unsigned long _min_pages)
  */
 int wakeup_pdflush(long nr_pages)
 {
-       if (nr_pages == 0) {
-               struct writeback_state wbs;
-
-               get_writeback_state(&wbs);
-               nr_pages = wbs.nr_dirty + wbs.nr_unstable;
-       }
+       if (nr_pages == 0)
+               nr_pages = global_page_state(NR_FILE_DIRTY) +
+                               global_page_state(NR_UNSTABLE_NFS);
        return pdflush_operation(background_writeout, nr_pages);
 }
 
@@ -394,7 +397,6 @@ static void wb_kupdate(unsigned long arg)
        unsigned long start_jif;
        unsigned long next_jif;
        long nr_to_write;
-       struct writeback_state wbs;
        struct writeback_control wbc = {
                .bdi            = NULL,
                .sync_mode      = WB_SYNC_NONE,
@@ -402,15 +404,16 @@ static void wb_kupdate(unsigned long arg)
                .nr_to_write    = 0,
                .nonblocking    = 1,
                .for_kupdate    = 1,
+               .range_cyclic   = 1,
        };
 
        sync_supers();
 
-       get_writeback_state(&wbs);
        oldest_jif = jiffies - dirty_expire_interval;
        start_jif = jiffies;
        next_jif = start_jif + dirty_writeback_interval;
-       nr_to_write = wbs.nr_dirty + wbs.nr_unstable +
+       nr_to_write = global_page_state(NR_FILE_DIRTY) +
+                       global_page_state(NR_UNSTABLE_NFS) +
                        (inodes_stat.nr_inodes - inodes_stat.nr_unused);
        while (nr_to_write > 0) {
                wbc.encountered_congestion = 0;
@@ -508,14 +511,14 @@ static void set_ratelimit(void)
                ratelimit_pages = (4096 * 1024) / PAGE_CACHE_SIZE;
 }
 
-static int
+static int __cpuinit
 ratelimit_handler(struct notifier_block *self, unsigned long u, void *v)
 {
        set_ratelimit();
        return 0;
 }
 
-static struct notifier_block ratelimit_nb = {
+static struct notifier_block __cpuinitdata ratelimit_nb = {
        .notifier_call  = ratelimit_handler,
        .next           = NULL,
 };
@@ -558,7 +561,7 @@ int do_writepages(struct address_space *mapping, struct writeback_control *wbc)
                return 0;
        wbc->for_writepages = 1;
        if (mapping->a_ops->writepages)
-               ret =  mapping->a_ops->writepages(mapping, wbc);
+               ret = mapping->a_ops->writepages(mapping, wbc);
        else
                ret = generic_writepages(mapping, wbc);
        wbc->for_writepages = 0;
@@ -622,8 +625,6 @@ EXPORT_SYMBOL(write_one_page);
  */
 int __set_page_dirty_nobuffers(struct page *page)
 {
-       int ret = 0;
-
        if (!TestSetPageDirty(page)) {
                struct address_space *mapping = page_mapping(page);
                struct address_space *mapping2;
@@ -634,7 +635,8 @@ int __set_page_dirty_nobuffers(struct page *page)
                        if (mapping2) { /* Race with truncate? */
                                BUG_ON(mapping2 != mapping);
                                if (mapping_cap_account_dirty(mapping))
-                                       inc_page_state(nr_dirty);
+                                       __inc_zone_page_state(page,
+                                                               NR_FILE_DIRTY);
                                radix_tree_tag_set(&mapping->page_tree,
                                        page_index(page), PAGECACHE_TAG_DIRTY);
                        }
@@ -645,8 +647,9 @@ int __set_page_dirty_nobuffers(struct page *page)
                                                        I_DIRTY_PAGES);
                        }
                }
+               return 1;
        }
-       return ret;
+       return 0;
 }
 EXPORT_SYMBOL(__set_page_dirty_nobuffers);
 
@@ -676,8 +679,10 @@ int fastcall set_page_dirty(struct page *page)
                        return (*spd)(page);
                return __set_page_dirty_buffers(page);
        }
-       if (!PageDirty(page))
-               SetPageDirty(page);
+       if (!PageDirty(page)) {
+               if (!TestSetPageDirty(page))
+                       return 1;
+       }
        return 0;
 }
 EXPORT_SYMBOL(set_page_dirty);
@@ -696,7 +701,7 @@ int set_page_dirty_lock(struct page *page)
 {
        int ret;
 
-       lock_page(page);
+       lock_page_nosync(page);
        ret = set_page_dirty(page);
        unlock_page(page);
        return ret;
@@ -719,8 +724,14 @@ int test_clear_page_dirty(struct page *page)
                                                page_index(page),
                                                PAGECACHE_TAG_DIRTY);
                        write_unlock_irqrestore(&mapping->tree_lock, flags);
-                       if (mapping_cap_account_dirty(mapping))
-                               dec_page_state(nr_dirty);
+                       /*
+                        * We can continue to use `mapping' here because the
+                        * page is locked, which pins the address_space
+                        */
+                       if (mapping_cap_account_dirty(mapping)) {
+                               page_mkclean(page);
+                               dec_zone_page_state(page, NR_FILE_DIRTY);
+                       }
                        return 1;
                }
                write_unlock_irqrestore(&mapping->tree_lock, flags);
@@ -750,8 +761,10 @@ int clear_page_dirty_for_io(struct page *page)
 
        if (mapping) {
                if (TestClearPageDirty(page)) {
-                       if (mapping_cap_account_dirty(mapping))
-                               dec_page_state(nr_dirty);
+                       if (mapping_cap_account_dirty(mapping)) {
+                               page_mkclean(page);
+                               dec_zone_page_state(page, NR_FILE_DIRTY);
+                       }
                        return 1;
                }
                return 0;
@@ -809,6 +822,15 @@ int test_set_page_writeback(struct page *page)
 EXPORT_SYMBOL(test_set_page_writeback);
 
 /*
+ * Wakes up tasks that are being throttled due to writeback congestion
+ */
+void writeback_congestion_end(void)
+{
+       blk_congestion_end(WRITE);
+}
+EXPORT_SYMBOL(writeback_congestion_end);
+
+/*
  * Return true if any of the pages in the mapping are marged with the
  * passed tag.
  */