RPC/RDMA: maintain the RPC task bytes-sent statistic.
[safe/jmp/linux-2.6] / mm / page-writeback.c
index 2a9942f..24de8b6 100644 (file)
@@ -126,8 +126,6 @@ static void background_writeout(unsigned long _min_pages);
 static struct prop_descriptor vm_completions;
 static struct prop_descriptor vm_dirties;
 
-static unsigned long determine_dirtyable_memory(void);
-
 /*
  * couple the period to the dirty_ratio:
  *
@@ -168,6 +166,16 @@ static inline void __bdi_writeout_inc(struct backing_dev_info *bdi)
                              bdi->max_prop_frac);
 }
 
+void bdi_writeout_inc(struct backing_dev_info *bdi)
+{
+       unsigned long flags;
+
+       local_irq_save(flags);
+       __bdi_writeout_inc(bdi);
+       local_irq_restore(flags);
+}
+EXPORT_SYMBOL_GPL(bdi_writeout_inc);
+
 static inline void task_dirty_inc(struct task_struct *tsk)
 {
        prop_inc_single(&vm_dirties, &tsk->dirties);
@@ -201,7 +209,8 @@ clip_bdi_dirty_limit(struct backing_dev_info *bdi, long dirty, long *pbdi_dirty)
        avail_dirty = dirty -
                (global_page_state(NR_FILE_DIRTY) +
                 global_page_state(NR_WRITEBACK) +
-                global_page_state(NR_UNSTABLE_NFS));
+                global_page_state(NR_UNSTABLE_NFS) +
+                global_page_state(NR_WRITEBACK_TEMP));
 
        if (avail_dirty < 0)
                avail_dirty = 0;
@@ -336,7 +345,13 @@ static unsigned long highmem_dirtyable_memory(unsigned long total)
 #endif
 }
 
-static unsigned long determine_dirtyable_memory(void)
+/**
+ * determine_dirtyable_memory - amount of memory that may be used
+ *
+ * Returns the numebr of pages that can currently be freed and used
+ * by the kernel for direct mappings.
+ */
+unsigned long determine_dirtyable_memory(void)
 {
        unsigned long x;
 
@@ -945,6 +960,9 @@ retry:
        }
        if (wbc->range_cyclic || (range_whole && wbc->nr_to_write > 0))
                mapping->writeback_index = index;
+
+       if (wbc->range_cont)
+               wbc->range_start = index << PAGE_CACHE_SHIFT;
        return ret;
 }
 EXPORT_SYMBOL(write_cache_pages);
@@ -1070,7 +1088,7 @@ int __set_page_dirty_nobuffers(struct page *page)
                if (!mapping)
                        return 1;
 
-               write_lock_irq(&mapping->tree_lock);
+               spin_lock_irq(&mapping->tree_lock);
                mapping2 = page_mapping(page);
                if (mapping2) { /* Race with truncate? */
                        BUG_ON(mapping2 != mapping);
@@ -1084,7 +1102,7 @@ int __set_page_dirty_nobuffers(struct page *page)
                        radix_tree_tag_set(&mapping->page_tree,
                                page_index(page), PAGECACHE_TAG_DIRTY);
                }
-               write_unlock_irq(&mapping->tree_lock);
+               spin_unlock_irq(&mapping->tree_lock);
                if (mapping->host) {
                        /* !PageAnon && !swapper_space */
                        __mark_inode_dirty(mapping->host, I_DIRTY_PAGES);
@@ -1240,18 +1258,18 @@ int test_clear_page_writeback(struct page *page)
                struct backing_dev_info *bdi = mapping->backing_dev_info;
                unsigned long flags;
 
-               write_lock_irqsave(&mapping->tree_lock, flags);
+               spin_lock_irqsave(&mapping->tree_lock, flags);
                ret = TestClearPageWriteback(page);
                if (ret) {
                        radix_tree_tag_clear(&mapping->page_tree,
                                                page_index(page),
                                                PAGECACHE_TAG_WRITEBACK);
-                       if (bdi_cap_writeback_dirty(bdi)) {
+                       if (bdi_cap_account_writeback(bdi)) {
                                __dec_bdi_stat(bdi, BDI_WRITEBACK);
                                __bdi_writeout_inc(bdi);
                        }
                }
-               write_unlock_irqrestore(&mapping->tree_lock, flags);
+               spin_unlock_irqrestore(&mapping->tree_lock, flags);
        } else {
                ret = TestClearPageWriteback(page);
        }
@@ -1269,20 +1287,20 @@ int test_set_page_writeback(struct page *page)
                struct backing_dev_info *bdi = mapping->backing_dev_info;
                unsigned long flags;
 
-               write_lock_irqsave(&mapping->tree_lock, flags);
+               spin_lock_irqsave(&mapping->tree_lock, flags);
                ret = TestSetPageWriteback(page);
                if (!ret) {
                        radix_tree_tag_set(&mapping->page_tree,
                                                page_index(page),
                                                PAGECACHE_TAG_WRITEBACK);
-                       if (bdi_cap_writeback_dirty(bdi))
+                       if (bdi_cap_account_writeback(bdi))
                                __inc_bdi_stat(bdi, BDI_WRITEBACK);
                }
                if (!PageDirty(page))
                        radix_tree_tag_clear(&mapping->page_tree,
                                                page_index(page),
                                                PAGECACHE_TAG_DIRTY);
-               write_unlock_irqrestore(&mapping->tree_lock, flags);
+               spin_unlock_irqrestore(&mapping->tree_lock, flags);
        } else {
                ret = TestSetPageWriteback(page);
        }