git://ftp.safe.ca
/
safe
/
jmp
/
linux-2.6
/ blobdiff
commit
grep
author
committer
pickaxe
?
search:
re
summary
|
shortlog
|
log
|
commit
|
commitdiff
|
tree
raw
|
inline
| side by side
KVM: VMX: Use INTR_TYPE_NMI_INTR instead of magic value
[safe/jmp/linux-2.6]
/
mm
/
page-writeback.c
diff --git
a/mm/page-writeback.c
b/mm/page-writeback.c
index
789b6ad
..
2970e35
100644
(file)
--- a/
mm/page-writeback.c
+++ b/
mm/page-writeback.c
@@
-7,7
+7,7
@@
* Contains functions related to writing back dirty pages at the
* address_space level.
*
* Contains functions related to writing back dirty pages at the
* address_space level.
*
- * 10Apr2002
akpm@zip.com.au
+ * 10Apr2002
Andrew Morton
* Initial version
*/
* Initial version
*/
@@
-126,8
+126,6
@@
static void background_writeout(unsigned long _min_pages);
static struct prop_descriptor vm_completions;
static struct prop_descriptor vm_dirties;
static struct prop_descriptor vm_completions;
static struct prop_descriptor vm_dirties;
-static unsigned long determine_dirtyable_memory(void);
-
/*
* couple the period to the dirty_ratio:
*
/*
* couple the period to the dirty_ratio:
*
@@
-331,9
+329,7
@@
static unsigned long highmem_dirtyable_memory(unsigned long total)
struct zone *z =
&NODE_DATA(node)->node_zones[ZONE_HIGHMEM];
struct zone *z =
&NODE_DATA(node)->node_zones[ZONE_HIGHMEM];
- x += zone_page_state(z, NR_FREE_PAGES)
- + zone_page_state(z, NR_INACTIVE)
- + zone_page_state(z, NR_ACTIVE);
+ x += zone_page_state(z, NR_FREE_PAGES) + zone_lru_pages(z);
}
/*
* Make sure that the number of highmem pages is never larger
}
/*
* Make sure that the number of highmem pages is never larger
@@
-347,13
+343,17
@@
static unsigned long highmem_dirtyable_memory(unsigned long total)
#endif
}
#endif
}
-static unsigned long determine_dirtyable_memory(void)
+/**
+ * determine_dirtyable_memory - amount of memory that may be used
+ *
+ * Returns the numebr of pages that can currently be freed and used
+ * by the kernel for direct mappings.
+ */
+unsigned long determine_dirtyable_memory(void)
{
unsigned long x;
{
unsigned long x;
- x = global_page_state(NR_FREE_PAGES)
- + global_page_state(NR_INACTIVE)
- + global_page_state(NR_ACTIVE);
+ x = global_page_state(NR_FREE_PAGES) + global_lru_pages();
if (!vm_highmem_is_dirtyable)
x -= highmem_dirtyable_memory(x);
if (!vm_highmem_is_dirtyable)
x -= highmem_dirtyable_memory(x);
@@
-872,6
+872,7
@@
int write_cache_pages(struct address_space *mapping,
pgoff_t end; /* Inclusive */
int scanned = 0;
int range_whole = 0;
pgoff_t end; /* Inclusive */
int scanned = 0;
int range_whole = 0;
+ long nr_to_write = wbc->nr_to_write;
if (wbc->nonblocking && bdi_write_congested(bdi)) {
wbc->encountered_congestion = 1;
if (wbc->nonblocking && bdi_write_congested(bdi)) {
wbc->encountered_congestion = 1;
@@
-935,7
+936,7
@@
retry:
unlock_page(page);
ret = 0;
}
unlock_page(page);
ret = 0;
}
- if (ret || (--
(wbc->nr_to_write)
<= 0))
+ if (ret || (--
nr_to_write
<= 0))
done = 1;
if (wbc->nonblocking && bdi_write_congested(bdi)) {
wbc->encountered_congestion = 1;
done = 1;
if (wbc->nonblocking && bdi_write_congested(bdi)) {
wbc->encountered_congestion = 1;
@@
-954,8
+955,12
@@
retry:
index = 0;
goto retry;
}
index = 0;
goto retry;
}
- if (wbc->range_cyclic || (range_whole && wbc->nr_to_write > 0))
- mapping->writeback_index = index;
+ if (!wbc->no_nrwrite_index_update) {
+ if (wbc->range_cyclic || (range_whole && nr_to_write > 0))
+ mapping->writeback_index = index;
+ wbc->nr_to_write = nr_to_write;
+ }
+
return ret;
}
EXPORT_SYMBOL(write_cache_pages);
return ret;
}
EXPORT_SYMBOL(write_cache_pages);
@@
-1081,7
+1086,7
@@
int __set_page_dirty_nobuffers(struct page *page)
if (!mapping)
return 1;
if (!mapping)
return 1;
-
write
_lock_irq(&mapping->tree_lock);
+
spin
_lock_irq(&mapping->tree_lock);
mapping2 = page_mapping(page);
if (mapping2) { /* Race with truncate? */
BUG_ON(mapping2 != mapping);
mapping2 = page_mapping(page);
if (mapping2) { /* Race with truncate? */
BUG_ON(mapping2 != mapping);
@@
-1095,7
+1100,7
@@
int __set_page_dirty_nobuffers(struct page *page)
radix_tree_tag_set(&mapping->page_tree,
page_index(page), PAGECACHE_TAG_DIRTY);
}
radix_tree_tag_set(&mapping->page_tree,
page_index(page), PAGECACHE_TAG_DIRTY);
}
-
write
_unlock_irq(&mapping->tree_lock);
+
spin
_unlock_irq(&mapping->tree_lock);
if (mapping->host) {
/* !PageAnon && !swapper_space */
__mark_inode_dirty(mapping->host, I_DIRTY_PAGES);
if (mapping->host) {
/* !PageAnon && !swapper_space */
__mark_inode_dirty(mapping->host, I_DIRTY_PAGES);
@@
-1251,7
+1256,7
@@
int test_clear_page_writeback(struct page *page)
struct backing_dev_info *bdi = mapping->backing_dev_info;
unsigned long flags;
struct backing_dev_info *bdi = mapping->backing_dev_info;
unsigned long flags;
-
write
_lock_irqsave(&mapping->tree_lock, flags);
+
spin
_lock_irqsave(&mapping->tree_lock, flags);
ret = TestClearPageWriteback(page);
if (ret) {
radix_tree_tag_clear(&mapping->page_tree,
ret = TestClearPageWriteback(page);
if (ret) {
radix_tree_tag_clear(&mapping->page_tree,
@@
-1262,7
+1267,7
@@
int test_clear_page_writeback(struct page *page)
__bdi_writeout_inc(bdi);
}
}
__bdi_writeout_inc(bdi);
}
}
-
write
_unlock_irqrestore(&mapping->tree_lock, flags);
+
spin
_unlock_irqrestore(&mapping->tree_lock, flags);
} else {
ret = TestClearPageWriteback(page);
}
} else {
ret = TestClearPageWriteback(page);
}
@@
-1280,7
+1285,7
@@
int test_set_page_writeback(struct page *page)
struct backing_dev_info *bdi = mapping->backing_dev_info;
unsigned long flags;
struct backing_dev_info *bdi = mapping->backing_dev_info;
unsigned long flags;
-
write
_lock_irqsave(&mapping->tree_lock, flags);
+
spin
_lock_irqsave(&mapping->tree_lock, flags);
ret = TestSetPageWriteback(page);
if (!ret) {
radix_tree_tag_set(&mapping->page_tree,
ret = TestSetPageWriteback(page);
if (!ret) {
radix_tree_tag_set(&mapping->page_tree,
@@
-1293,7
+1298,7
@@
int test_set_page_writeback(struct page *page)
radix_tree_tag_clear(&mapping->page_tree,
page_index(page),
PAGECACHE_TAG_DIRTY);
radix_tree_tag_clear(&mapping->page_tree,
page_index(page),
PAGECACHE_TAG_DIRTY);
-
write
_unlock_irqrestore(&mapping->tree_lock, flags);
+
spin
_unlock_irqrestore(&mapping->tree_lock, flags);
} else {
ret = TestSetPageWriteback(page);
}
} else {
ret = TestSetPageWriteback(page);
}