mm: CONFIG_MMU for PG_mlocked
[safe/jmp/linux-2.6] / mm / migrate.c
index 068655d..367272d 100644 (file)
@@ -67,6 +67,8 @@ int putback_lru_pages(struct list_head *l)
 
        list_for_each_entry_safe(page, page2, l, lru) {
                list_del(&page->lru);
+               dec_zone_page_state(page, NR_ISOLATED_ANON +
+                               page_is_file_cache(page));
                putback_lru_page(page);
                count++;
        }
@@ -147,7 +149,7 @@ out:
 static void remove_file_migration_ptes(struct page *old, struct page *new)
 {
        struct vm_area_struct *vma;
-       struct address_space *mapping = page_mapping(new);
+       struct address_space *mapping = new->mapping;
        struct prio_tree_iter iter;
        pgoff_t pgoff = new->index << (PAGE_CACHE_SHIFT - PAGE_SHIFT);
 
@@ -170,17 +172,14 @@ static void remove_anon_migration_ptes(struct page *old, struct page *new)
 {
        struct anon_vma *anon_vma;
        struct vm_area_struct *vma;
-       unsigned long mapping;
-
-       mapping = (unsigned long)new->mapping;
-
-       if (!mapping || (mapping & PAGE_MAPPING_ANON) == 0)
-               return;
 
        /*
         * We hold the mmap_sem lock. So no need to call page_lock_anon_vma.
         */
-       anon_vma = (struct anon_vma *) (mapping - PAGE_MAPPING_ANON);
+       anon_vma = page_anon_vma(new);
+       if (!anon_vma)
+               return;
+
        spin_lock(&anon_vma->lock);
 
        list_for_each_entry(vma, &anon_vma->head, anon_vma_node)
@@ -270,7 +269,7 @@ static int migrate_page_move_mapping(struct address_space *mapping,
        pslot = radix_tree_lookup_slot(&mapping->page_tree,
                                        page_index(page));
 
-       expected_count = 2 + !!page_has_private(page);
+       expected_count = 2 + page_has_private(page);
        if (page_count(page) != expected_count ||
                        (struct page *)radix_tree_deref_slot(pslot) != page) {
                spin_unlock_irq(&mapping->tree_lock);
@@ -312,7 +311,10 @@ static int migrate_page_move_mapping(struct address_space *mapping,
         */
        __dec_zone_page_state(page, NR_FILE_PAGES);
        __inc_zone_page_state(newpage, NR_FILE_PAGES);
-
+       if (PageSwapBacked(page)) {
+               __dec_zone_page_state(page, NR_SHMEM);
+               __inc_zone_page_state(newpage, NR_SHMEM);
+       }
        spin_unlock_irq(&mapping->tree_lock);
 
        return 0;
@@ -597,7 +599,7 @@ static int unmap_and_move(new_page_t get_new_page, unsigned long private,
        struct page *newpage = get_new_page(page, private, &result);
        int rcu_locked = 0;
        int charge = 0;
-       struct mem_cgroup *mem;
+       struct mem_cgroup *mem = NULL;
 
        if (!newpage)
                return -ENOMEM;
@@ -664,13 +666,15 @@ static int unmap_and_move(new_page_t get_new_page, unsigned long private,
                         *    needs to be effective.
                         */
                        try_to_free_buffers(page);
+                       goto rcu_unlock;
                }
-               goto rcu_unlock;
+               goto skip_unmap;
        }
 
        /* Establish migration ptes or remove ptes */
-       try_to_unmap(page, 1);
+       try_to_unmap(page, TTU_MIGRATION|TTU_IGNORE_MLOCK|TTU_IGNORE_ACCESS);
 
+skip_unmap:
        if (!page_mapped(page))
                rc = move_to_new_page(newpage, page);
 
@@ -693,6 +697,8 @@ unlock:
                 * restored.
                 */
                list_del(&page->lru);
+               dec_zone_page_state(page, NR_ISOLATED_ANON +
+                               page_is_file_cache(page));
                putback_lru_page(page);
        }
 
@@ -802,7 +808,7 @@ static struct page *new_page_node(struct page *p, unsigned long private,
 
        *result = &pm->status;
 
-       return alloc_pages_node(pm->node,
+       return alloc_pages_exact_node(pm->node,
                                GFP_HIGHUSER_MOVABLE | GFP_THISNODE, 0);
 }
 
@@ -820,7 +826,6 @@ static int do_move_page_to_node_array(struct mm_struct *mm,
        struct page_to_node *pp;
        LIST_HEAD(pagelist);
 
-       migrate_prep();
        down_read(&mm->mmap_sem);
 
        /*
@@ -863,8 +868,11 @@ static int do_move_page_to_node_array(struct mm_struct *mm,
                        goto put_and_set;
 
                err = isolate_lru_page(page);
-               if (!err)
+               if (!err) {
                        list_add_tail(&page->lru, &pagelist);
+                       inc_zone_page_state(page, NR_ISOLATED_ANON +
+                                           page_is_file_cache(page));
+               }
 put_and_set:
                /*
                 * Either remove the duplicate refcount from
@@ -907,6 +915,9 @@ static int do_pages_move(struct mm_struct *mm, struct task_struct *task,
        pm = (struct page_to_node *)__get_free_page(GFP_KERNEL);
        if (!pm)
                goto out;
+
+       migrate_prep();
+
        /*
         * Store a chunk of page_to_node array in a page,
         * but keep the last one as a marker
@@ -1026,7 +1037,7 @@ static int do_pages_stat(struct mm_struct *mm, unsigned long nr_pages,
        int err;
 
        for (i = 0; i < nr_pages; i += chunk_nr) {
-               if (chunk_nr + i > nr_pages)
+               if (chunk_nr > nr_pages - i)
                        chunk_nr = nr_pages - i;
 
                err = copy_from_user(chunk_pages, &pages[i],