memory unplug: migration by kernel
[safe/jmp/linux-2.6] / mm / migrate.c
index 3f1e0c2..c8d8722 100644 (file)
@@ -294,10 +294,10 @@ out:
 static int migrate_page_move_mapping(struct address_space *mapping,
                struct page *newpage, struct page *page)
 {
-       struct page **radix_pointer;
+       void **pslot;
 
        if (!mapping) {
-               /* Anonymous page */
+               /* Anonymous page without mapping */
                if (page_count(page) != 1)
                        return -EAGAIN;
                return 0;
@@ -305,12 +305,11 @@ static int migrate_page_move_mapping(struct address_space *mapping,
 
        write_lock_irq(&mapping->tree_lock);
 
-       radix_pointer = (struct page **)radix_tree_lookup_slot(
-                                               &mapping->page_tree,
-                                               page_index(page));
+       pslot = radix_tree_lookup_slot(&mapping->page_tree,
+                                       page_index(page));
 
        if (page_count(page) != 2 + !!PagePrivate(page) ||
-                       *radix_pointer != page) {
+                       (struct page *)radix_tree_deref_slot(pslot) != page) {
                write_unlock_irq(&mapping->tree_lock);
                return -EAGAIN;
        }
@@ -318,7 +317,7 @@ static int migrate_page_move_mapping(struct address_space *mapping,
        /*
         * Now we know that no one else is looking at the page.
         */
-       get_page(newpage);
+       get_page(newpage);      /* add cache reference */
 #ifdef CONFIG_SWAP
        if (PageSwapCache(page)) {
                SetPageSwapCache(newpage);
@@ -326,8 +325,27 @@ static int migrate_page_move_mapping(struct address_space *mapping,
        }
 #endif
 
-       *radix_pointer = newpage;
+       radix_tree_replace_slot(pslot, newpage);
+
+       /*
+        * Drop cache reference from old page.
+        * We know this isn't the last reference.
+        */
        __put_page(page);
+
+       /*
+        * If moved to a different zone then also account
+        * the page for that zone. Other VM counters will be
+        * taken care of when we establish references to the
+        * new page and drop references to the old page.
+        *
+        * Note that anonymous pages are accounted for
+        * via NR_FILE_PAGES and NR_ANON_PAGES if they
+        * are mapped to swap space.
+        */
+       __dec_zone_page_state(page, NR_FILE_PAGES);
+       __inc_zone_page_state(newpage, NR_FILE_PAGES);
+
        write_unlock_irq(&mapping->tree_lock);
 
        return 0;
@@ -409,6 +427,7 @@ int migrate_page(struct address_space *mapping,
 }
 EXPORT_SYMBOL(migrate_page);
 
+#ifdef CONFIG_BLOCK
 /*
  * Migration function for pages with buffers. This function can only be used
  * if the underlying filesystem guarantees that no other references to "page"
@@ -466,6 +485,7 @@ int buffer_migrate_page(struct address_space *mapping,
        return 0;
 }
 EXPORT_SYMBOL(buffer_migrate_page);
+#endif
 
 /*
  * Writeback a page to clean the dirty state
@@ -525,7 +545,7 @@ static int fallback_migrate_page(struct address_space *mapping,
         * Buffers may be managed in a filesystem specific way.
         * We must have no buffers or drop them.
         */
-       if (page_has_buffers(page) &&
+       if (PagePrivate(page) &&
            !try_to_release_page(page, GFP_KERNEL))
                return -EAGAIN;
 
@@ -612,18 +632,35 @@ static int unmap_and_move(new_page_t get_new_page, unsigned long private,
                        goto unlock;
                wait_on_page_writeback(page);
        }
-
        /*
-        * Establish migration ptes or remove ptes
+        * By try_to_unmap(), page->mapcount goes down to 0 here. In this case,
+        * we cannot notice that anon_vma is freed while we migrates a page.
+        * This rcu_read_lock() delays freeing anon_vma pointer until the end
+        * of migration. File cache pages are no problem because of page_lock()
+        */
+       rcu_read_lock();
+       /*
+        * This is a corner case handling.
+        * When a new swap-cache is read into, it is linked to LRU
+        * and treated as swapcache but has no rmap yet.
+        * Calling try_to_unmap() against a page->mapping==NULL page is
+        * BUG. So handle it here.
         */
+       if (!page->mapping)
+               goto rcu_unlock;
+       /* Establish migration ptes or remove ptes */
        try_to_unmap(page, 1);
+
        if (!page_mapped(page))
                rc = move_to_new_page(newpage, page);
 
        if (rc)
                remove_migration_ptes(page, page);
+rcu_unlock:
+       rcu_read_unlock();
 
 unlock:
+
        unlock_page(page);
 
        if (rc != -EAGAIN) {
@@ -741,7 +778,8 @@ static struct page *new_page_node(struct page *p, unsigned long private,
 
        *result = &pm->status;
 
-       return alloc_pages_node(pm->node, GFP_HIGHUSER, 0);
+       return alloc_pages_node(pm->node,
+                               GFP_HIGHUSER_MOVABLE | GFP_THISNODE, 0);
 }
 
 /*
@@ -774,7 +812,7 @@ static int do_move_pages(struct mm_struct *mm, struct page_to_node *pm,
 
                err = -EFAULT;
                vma = find_vma(mm, pp->addr);
-               if (!vma)
+               if (!vma || !vma_migratable(vma))
                        goto set_status;
 
                page = follow_page(vma, pp->addr, FOLL_GET);
@@ -950,7 +988,8 @@ asmlinkage long sys_move_pages(pid_t pid, unsigned long nr_pages,
                                goto out;
 
                        pm[i].node = node;
-               }
+               } else
+                       pm[i].node = 0; /* anything to not match MAX_NUMNODES */
        }
        /* End marker */
        pm[nr_pages].node = MAX_NUMNODES;