vmscan: zone_reclaim() don't use insane swap_cluster_max
[safe/jmp/linux-2.6] / mm / migrate.c
index 37143b9..367272d 100644 (file)
@@ -67,6 +67,8 @@ int putback_lru_pages(struct list_head *l)
 
        list_for_each_entry_safe(page, page2, l, lru) {
                list_del(&page->lru);
+               dec_zone_page_state(page, NR_ISOLATED_ANON +
+                               page_is_file_cache(page));
                putback_lru_page(page);
                count++;
        }
@@ -170,17 +172,14 @@ static void remove_anon_migration_ptes(struct page *old, struct page *new)
 {
        struct anon_vma *anon_vma;
        struct vm_area_struct *vma;
-       unsigned long mapping;
-
-       mapping = (unsigned long)new->mapping;
-
-       if (!mapping || (mapping & PAGE_MAPPING_ANON) == 0)
-               return;
 
        /*
         * We hold the mmap_sem lock. So no need to call page_lock_anon_vma.
         */
-       anon_vma = (struct anon_vma *) (mapping - PAGE_MAPPING_ANON);
+       anon_vma = page_anon_vma(new);
+       if (!anon_vma)
+               return;
+
        spin_lock(&anon_vma->lock);
 
        list_for_each_entry(vma, &anon_vma->head, anon_vma_node)
@@ -270,7 +269,7 @@ static int migrate_page_move_mapping(struct address_space *mapping,
        pslot = radix_tree_lookup_slot(&mapping->page_tree,
                                        page_index(page));
 
-       expected_count = 2 + !!page_has_private(page);
+       expected_count = 2 + page_has_private(page);
        if (page_count(page) != expected_count ||
                        (struct page *)radix_tree_deref_slot(pslot) != page) {
                spin_unlock_irq(&mapping->tree_lock);
@@ -600,7 +599,7 @@ static int unmap_and_move(new_page_t get_new_page, unsigned long private,
        struct page *newpage = get_new_page(page, private, &result);
        int rcu_locked = 0;
        int charge = 0;
-       struct mem_cgroup *mem;
+       struct mem_cgroup *mem = NULL;
 
        if (!newpage)
                return -ENOMEM;
@@ -673,7 +672,7 @@ static int unmap_and_move(new_page_t get_new_page, unsigned long private,
        }
 
        /* Establish migration ptes or remove ptes */
-       try_to_unmap(page, 1);
+       try_to_unmap(page, TTU_MIGRATION|TTU_IGNORE_MLOCK|TTU_IGNORE_ACCESS);
 
 skip_unmap:
        if (!page_mapped(page))
@@ -698,6 +697,8 @@ unlock:
                 * restored.
                 */
                list_del(&page->lru);
+               dec_zone_page_state(page, NR_ISOLATED_ANON +
+                               page_is_file_cache(page));
                putback_lru_page(page);
        }
 
@@ -867,8 +868,11 @@ static int do_move_page_to_node_array(struct mm_struct *mm,
                        goto put_and_set;
 
                err = isolate_lru_page(page);
-               if (!err)
+               if (!err) {
                        list_add_tail(&page->lru, &pagelist);
+                       inc_zone_page_state(page, NR_ISOLATED_ANON +
+                                           page_is_file_cache(page));
+               }
 put_and_set:
                /*
                 * Either remove the duplicate refcount from
@@ -1033,7 +1037,7 @@ static int do_pages_stat(struct mm_struct *mm, unsigned long nr_pages,
        int err;
 
        for (i = 0; i < nr_pages; i += chunk_nr) {
-               if (chunk_nr + i > nr_pages)
+               if (chunk_nr > nr_pages - i)
                        chunk_nr = nr_pages - i;
 
                err = copy_from_user(chunk_pages, &pages[i],