ALSA: usb-audio: add support for Akai MPD16
[safe/jmp/linux-2.6] / mm / migrate.c
index a73504f..d3f3f7f 100644 (file)
@@ -9,7 +9,7 @@
  * IWAMOTO Toshihiro <iwamoto@valinux.co.jp>
  * Hirokazu Takahashi <taka@valinux.co.jp>
  * Dave Hansen <haveblue@us.ibm.com>
- * Christoph Lameter <clameter@sgi.com>
+ * Christoph Lameter
  */
 
 #include <linux/migrate.h>
@@ -21,6 +21,7 @@
 #include <linux/mm_inline.h>
 #include <linux/nsproxy.h>
 #include <linux/pagevec.h>
+#include <linux/ksm.h>
 #include <linux/rmap.h>
 #include <linux/topology.h>
 #include <linux/cpu.h>
 #include <linux/vmalloc.h>
 #include <linux/security.h>
 #include <linux/memcontrol.h>
+#include <linux/syscalls.h>
+#include <linux/gfp.h>
 
 #include "internal.h"
 
 #define lru_to_page(_head) (list_entry((_head)->prev, struct page, lru))
 
 /*
- * Isolate one page from the LRU lists. If successful put it onto
- * the indicated list with elevated page count.
- *
- * Result:
- *  -EBUSY: page not on LRU list
- *  0: page removed from LRU list and added to the specified list.
- */
-int isolate_lru_page(struct page *page, struct list_head *pagelist)
-{
-       int ret = -EBUSY;
-
-       if (PageLRU(page)) {
-               struct zone *zone = page_zone(page);
-
-               spin_lock_irq(&zone->lru_lock);
-               if (PageLRU(page) && get_page_unless_zero(page)) {
-                       ret = 0;
-                       ClearPageLRU(page);
-                       if (PageActive(page))
-                               del_page_from_active_list(zone, page);
-                       else
-                               del_page_from_inactive_list(zone, page);
-                       list_add_tail(&page->lru, pagelist);
-               }
-               spin_unlock_irq(&zone->lru_lock);
-       }
-       return ret;
-}
-
-/*
  * migrate_prep() needs to be called before we start compiling a list of pages
  * to be migrated using isolate_lru_page().
  */
@@ -82,23 +55,9 @@ int migrate_prep(void)
        return 0;
 }
 
-static inline void move_to_lru(struct page *page)
-{
-       if (PageActive(page)) {
-               /*
-                * lru_cache_add_active checks that
-                * the PG_active bit is off.
-                */
-               ClearPageActive(page);
-               lru_cache_add_active(page);
-       } else {
-               lru_cache_add(page);
-       }
-       put_page(page);
-}
-
 /*
- * Add isolated pages on the list back to the LRU.
+ * Add isolated pages on the list back to the LRU under page lock
+ * to avoid leaking evictable pages back onto unevictable list.
  *
  * returns the number of pages put back.
  */
@@ -110,7 +69,9 @@ int putback_lru_pages(struct list_head *l)
 
        list_for_each_entry_safe(page, page2, l, lru) {
                list_del(&page->lru);
-               move_to_lru(page);
+               dec_zone_page_state(page, NR_ISOLATED_ANON +
+                               page_is_file_cache(page));
+               putback_lru_page(page);
                count++;
        }
        return count;
@@ -119,8 +80,8 @@ int putback_lru_pages(struct list_head *l)
 /*
  * Restore a potential migration pte to a working pte entry
  */
-static void remove_migration_pte(struct vm_area_struct *vma,
-               struct page *old, struct page *new)
+static int remove_migration_pte(struct page *new, struct vm_area_struct *vma,
+                                unsigned long addr, void *old)
 {
        struct mm_struct *mm = vma->vm_mm;
        swp_entry_t entry;
@@ -129,45 +90,37 @@ static void remove_migration_pte(struct vm_area_struct *vma,
        pmd_t *pmd;
        pte_t *ptep, pte;
        spinlock_t *ptl;
-       unsigned long addr = page_address_in_vma(new, vma);
-
-       if (addr == -EFAULT)
-               return;
 
        pgd = pgd_offset(mm, addr);
        if (!pgd_present(*pgd))
-                return;
+               goto out;
 
        pud = pud_offset(pgd, addr);
        if (!pud_present(*pud))
-                return;
+               goto out;
 
        pmd = pmd_offset(pud, addr);
        if (!pmd_present(*pmd))
-               return;
+               goto out;
 
        ptep = pte_offset_map(pmd, addr);
 
        if (!is_swap_pte(*ptep)) {
                pte_unmap(ptep);
-               return;
+               goto out;
        }
 
-       if (mem_cgroup_charge(new, mm, GFP_KERNEL)) {
-               pte_unmap(ptep);
-               return;
-       }
-
        ptl = pte_lockptr(mm, pmd);
        spin_lock(ptl);
        pte = *ptep;
        if (!is_swap_pte(pte))
-               goto out;
+               goto unlock;
 
        entry = pte_to_swp_entry(pte);
 
-       if (!is_migration_entry(entry) || migration_entry_to_page(entry) != old)
-               goto out;
+       if (!is_migration_entry(entry) ||
+           migration_entry_to_page(entry) != old)
+               goto unlock;
 
        get_page(new);
        pte = pte_mkold(mk_pte(new, vma->vm_page_prot));
@@ -182,59 +135,11 @@ static void remove_migration_pte(struct vm_area_struct *vma,
                page_add_file_rmap(new);
 
        /* No need to invalidate - it was non-present before */
-       update_mmu_cache(vma, addr, pte);
-
-out:
+       update_mmu_cache(vma, addr, ptep);
+unlock:
        pte_unmap_unlock(ptep, ptl);
-}
-
-/*
- * Note that remove_file_migration_ptes will only work on regular mappings,
- * Nonlinear mappings do not use migration entries.
- */
-static void remove_file_migration_ptes(struct page *old, struct page *new)
-{
-       struct vm_area_struct *vma;
-       struct address_space *mapping = page_mapping(new);
-       struct prio_tree_iter iter;
-       pgoff_t pgoff = new->index << (PAGE_CACHE_SHIFT - PAGE_SHIFT);
-
-       if (!mapping)
-               return;
-
-       spin_lock(&mapping->i_mmap_lock);
-
-       vma_prio_tree_foreach(vma, &iter, &mapping->i_mmap, pgoff, pgoff)
-               remove_migration_pte(vma, old, new);
-
-       spin_unlock(&mapping->i_mmap_lock);
-}
-
-/*
- * Must hold mmap_sem lock on at least one of the vmas containing
- * the page so that the anon_vma cannot vanish.
- */
-static void remove_anon_migration_ptes(struct page *old, struct page *new)
-{
-       struct anon_vma *anon_vma;
-       struct vm_area_struct *vma;
-       unsigned long mapping;
-
-       mapping = (unsigned long)new->mapping;
-
-       if (!mapping || (mapping & PAGE_MAPPING_ANON) == 0)
-               return;
-
-       /*
-        * We hold the mmap_sem lock. So no need to call page_lock_anon_vma.
-        */
-       anon_vma = (struct anon_vma *) (mapping - PAGE_MAPPING_ANON);
-       spin_lock(&anon_vma->lock);
-
-       list_for_each_entry(vma, &anon_vma->head, anon_vma_node)
-               remove_migration_pte(vma, old, new);
-
-       spin_unlock(&anon_vma->lock);
+out:
+       return SWAP_AGAIN;
 }
 
 /*
@@ -243,10 +148,7 @@ static void remove_anon_migration_ptes(struct page *old, struct page *new)
  */
 static void remove_migration_ptes(struct page *old, struct page *new)
 {
-       if (PageAnon(new))
-               remove_anon_migration_ptes(old, new);
-       else
-               remove_file_migration_ptes(old, new);
+       rmap_walk(new, remove_migration_pte, old);
 }
 
 /*
@@ -275,7 +177,15 @@ void migration_entry_wait(struct mm_struct *mm, pmd_t *pmd,
 
        page = migration_entry_to_page(entry);
 
-       get_page(page);
+       /*
+        * Once radix-tree replacement of page migration started, page_count
+        * *must* be zero. And, we don't want to call wait_on_page_locked()
+        * against a page without get_page().
+        * So, we use get_page_unless_zero(), here. Even failed, page fault
+        * will occur again.
+        */
+       if (!get_page_unless_zero(page))
+               goto out;
        pte_unmap_unlock(ptep, ptl);
        wait_on_page_locked(page);
        put_page(page);
@@ -290,11 +200,12 @@ out:
  * The number of remaining references must be:
  * 1 for anonymous pages without a mapping
  * 2 for pages with a mapping
- * 3 for pages with a mapping and PagePrivate set.
+ * 3 for pages with a mapping and PagePrivate/PagePrivate2 set.
  */
 static int migrate_page_move_mapping(struct address_space *mapping,
                struct page *newpage, struct page *page)
 {
+       int expected_count;
        void **pslot;
 
        if (!mapping) {
@@ -304,14 +215,20 @@ static int migrate_page_move_mapping(struct address_space *mapping,
                return 0;
        }
 
-       write_lock_irq(&mapping->tree_lock);
+       spin_lock_irq(&mapping->tree_lock);
 
        pslot = radix_tree_lookup_slot(&mapping->page_tree,
                                        page_index(page));
 
-       if (page_count(page) != 2 + !!PagePrivate(page) ||
+       expected_count = 2 + page_has_private(page);
+       if (page_count(page) != expected_count ||
                        (struct page *)radix_tree_deref_slot(pslot) != page) {
-               write_unlock_irq(&mapping->tree_lock);
+               spin_unlock_irq(&mapping->tree_lock);
+               return -EAGAIN;
+       }
+
+       if (!page_freeze_refs(page, expected_count)) {
+               spin_unlock_irq(&mapping->tree_lock);
                return -EAGAIN;
        }
 
@@ -319,15 +236,14 @@ static int migrate_page_move_mapping(struct address_space *mapping,
         * Now we know that no one else is looking at the page.
         */
        get_page(newpage);      /* add cache reference */
-#ifdef CONFIG_SWAP
        if (PageSwapCache(page)) {
                SetPageSwapCache(newpage);
                set_page_private(newpage, page_private(page));
        }
-#endif
 
        radix_tree_replace_slot(pslot, newpage);
 
+       page_unfreeze_refs(page, expected_count);
        /*
         * Drop cache reference from old page.
         * We know this isn't the last reference.
@@ -346,8 +262,11 @@ static int migrate_page_move_mapping(struct address_space *mapping,
         */
        __dec_zone_page_state(page, NR_FILE_PAGES);
        __inc_zone_page_state(newpage, NR_FILE_PAGES);
-
-       write_unlock_irq(&mapping->tree_lock);
+       if (PageSwapBacked(page)) {
+               __dec_zone_page_state(page, NR_SHMEM);
+               __inc_zone_page_state(newpage, NR_SHMEM);
+       }
+       spin_unlock_irq(&mapping->tree_lock);
 
        return 0;
 }
@@ -365,8 +284,11 @@ static void migrate_page_copy(struct page *newpage, struct page *page)
                SetPageReferenced(newpage);
        if (PageUptodate(page))
                SetPageUptodate(newpage);
-       if (PageActive(page))
+       if (TestClearPageActive(page)) {
+               VM_BUG_ON(PageUnevictable(page));
                SetPageActive(newpage);
+       } else if (TestClearPageUnevictable(page))
+               SetPageUnevictable(newpage);
        if (PageChecked(page))
                SetPageChecked(newpage);
        if (PageMappedToDisk(page))
@@ -374,13 +296,20 @@ static void migrate_page_copy(struct page *newpage, struct page *page)
 
        if (PageDirty(page)) {
                clear_page_dirty_for_io(page);
-               set_page_dirty(newpage);
+               /*
+                * Want to mark the page and the radix tree as dirty, and
+                * redo the accounting that clear_page_dirty_for_io undid,
+                * but we can't use set_page_dirty because that function
+                * is actually a signal that all of the page has become dirty.
+                * Wheras only part of our page may be dirty.
+                */
+               __set_page_dirty_nobuffers(newpage);
        }
 
-#ifdef CONFIG_SWAP
+       mlock_migrate_page(newpage, page);
+       ksm_migrate_page(newpage, page);
+
        ClearPageSwapCache(page);
-#endif
-       ClearPageActive(page);
        ClearPagePrivate(page);
        set_page_private(page, 0);
        page->mapping = NULL;
@@ -407,7 +336,7 @@ EXPORT_SYMBOL(fail_migrate_page);
 
 /*
  * Common logic to directly migrate a single page suitable for
- * pages that do not use PagePrivate.
+ * pages that do not use PagePrivate/PagePrivate2.
  *
  * Pages are locked upon entry and exit.
  */
@@ -522,15 +451,12 @@ static int writeout(struct address_space *mapping, struct page *page)
        remove_migration_ptes(page, page);
 
        rc = mapping->a_ops->writepage(page, &wbc);
-       if (rc < 0)
-               /* I/O Error writing */
-               return -EIO;
 
        if (rc != AOP_WRITEPAGE_ACTIVATE)
                /* unlocked. Relock */
                lock_page(page);
 
-       return -EAGAIN;
+       return (rc < 0) ? -EIO : -EAGAIN;
 }
 
 /*
@@ -546,7 +472,7 @@ static int fallback_migrate_page(struct address_space *mapping,
         * Buffers may be managed in a filesystem specific way.
         * We must have no buffers or drop them.
         */
-       if (PagePrivate(page) &&
+       if (page_has_private(page) &&
            !try_to_release_page(page, GFP_KERNEL))
                return -EAGAIN;
 
@@ -559,6 +485,10 @@ static int fallback_migrate_page(struct address_space *mapping,
  *
  * The new page will have replaced the old page if this function
  * is successful.
+ *
+ * Return value:
+ *   < 0 - error code
+ *  == 0 - success
  */
 static int move_to_new_page(struct page *newpage, struct page *page)
 {
@@ -570,12 +500,14 @@ static int move_to_new_page(struct page *newpage, struct page *page)
         * establishing additional references. We are the only one
         * holding a reference to the new page at this point.
         */
-       if (TestSetPageLocked(newpage))
+       if (!trylock_page(newpage))
                BUG();
 
        /* Prepare mapping for the new page.*/
        newpage->index = page->index;
        newpage->mapping = page->mapping;
+       if (PageSwapBacked(page))
+               SetPageSwapBacked(newpage);
 
        mapping = page_mapping(page);
        if (!mapping)
@@ -593,10 +525,9 @@ static int move_to_new_page(struct page *newpage, struct page *page)
        else
                rc = fallback_migrate_page(mapping, newpage, page);
 
-       if (!rc) {
-               mem_cgroup_page_migration(page, newpage);
+       if (!rc)
                remove_migration_ptes(page, newpage);
-       else
+       else
                newpage->mapping = NULL;
 
        unlock_page(newpage);
@@ -609,31 +540,57 @@ static int move_to_new_page(struct page *newpage, struct page *page)
  * to the newly allocated page in newpage.
  */
 static int unmap_and_move(new_page_t get_new_page, unsigned long private,
-                       struct page *page, int force)
+                       struct page *page, int force, int offlining)
 {
        int rc = 0;
        int *result = NULL;
        struct page *newpage = get_new_page(page, private, &result);
        int rcu_locked = 0;
        int charge = 0;
+       struct mem_cgroup *mem = NULL;
 
        if (!newpage)
                return -ENOMEM;
 
-       if (page_count(page) == 1)
+       if (page_count(page) == 1) {
                /* page was freed from under us. So we are done. */
                goto move_newpage;
+       }
 
+       /* prepare cgroup just returns 0 or -ENOMEM */
        rc = -EAGAIN;
-       if (TestSetPageLocked(page)) {
+
+       if (!trylock_page(page)) {
                if (!force)
                        goto move_newpage;
                lock_page(page);
        }
 
+       /*
+        * Only memory hotplug's offline_pages() caller has locked out KSM,
+        * and can safely migrate a KSM page.  The other cases have skipped
+        * PageKsm along with PageReserved - but it is only now when we have
+        * the page lock that we can be certain it will not go KSM beneath us
+        * (KSM will not upgrade a page from PageAnon to PageKsm when it sees
+        * its pagecount raised, but only here do we take the page lock which
+        * serializes that).
+        */
+       if (PageKsm(page) && !offlining) {
+               rc = -EBUSY;
+               goto unlock;
+       }
+
+       /* charge against new page */
+       charge = mem_cgroup_prepare_migration(page, &mem);
+       if (charge == -ENOMEM) {
+               rc = -ENOMEM;
+               goto unlock;
+       }
+       BUG_ON(charge);
+
        if (PageWriteback(page)) {
                if (!force)
-                       goto unlock;
+                       goto uncharge;
                wait_on_page_writeback(page);
        }
        /*
@@ -662,7 +619,7 @@ static int unmap_and_move(new_page_t get_new_page, unsigned long private,
         * free the metadata, so the page can be freed.
         */
        if (!page->mapping) {
-               if (!PageAnon(page) && PagePrivate(page)) {
+               if (!PageAnon(page) && page_has_private(page)) {
                        /*
                         * Go direct to try_to_free_buffers() here because
                         * a) that's what try_to_release_page() would do anyway
@@ -671,29 +628,27 @@ static int unmap_and_move(new_page_t get_new_page, unsigned long private,
                         *    needs to be effective.
                         */
                        try_to_free_buffers(page);
+                       goto rcu_unlock;
                }
-               goto rcu_unlock;
+               goto skip_unmap;
        }
 
-       charge = mem_cgroup_prepare_migration(page);
        /* Establish migration ptes or remove ptes */
-       try_to_unmap(page, 1);
+       try_to_unmap(page, TTU_MIGRATION|TTU_IGNORE_MLOCK|TTU_IGNORE_ACCESS);
 
+skip_unmap:
        if (!page_mapped(page))
                rc = move_to_new_page(newpage, page);
 
-       if (rc) {
+       if (rc)
                remove_migration_ptes(page, page);
-               if (charge)
-                       mem_cgroup_end_migration(page);
-       } else if (charge)
-               mem_cgroup_end_migration(newpage);
 rcu_unlock:
        if (rcu_locked)
                rcu_read_unlock();
-
+uncharge:
+       if (!charge)
+               mem_cgroup_end_migration(mem, page, newpage);
 unlock:
-
        unlock_page(page);
 
        if (rc != -EAGAIN) {
@@ -704,15 +659,19 @@ unlock:
                 * restored.
                 */
                list_del(&page->lru);
-               move_to_lru(page);
+               dec_zone_page_state(page, NR_ISOLATED_ANON +
+                               page_is_file_cache(page));
+               putback_lru_page(page);
        }
 
 move_newpage:
+
        /*
         * Move the new page to the LRU. If migration was not successful
         * then this will free the page.
         */
-       move_to_lru(newpage);
+       putback_lru_page(newpage);
+
        if (result) {
                if (rc)
                        *result = rc;
@@ -737,7 +696,7 @@ move_newpage:
  * Return: Number of pages not migrated or error code.
  */
 int migrate_pages(struct list_head *from,
-               new_page_t get_new_page, unsigned long private)
+               new_page_t get_new_page, unsigned long private, int offlining)
 {
        int retry = 1;
        int nr_failed = 0;
@@ -757,7 +716,7 @@ int migrate_pages(struct list_head *from,
                        cond_resched();
 
                        rc = unmap_and_move(get_new_page, private,
-                                               page, pass > 2);
+                                               page, pass > 2, offlining);
 
                        switch(rc) {
                        case -ENOMEM:
@@ -811,7 +770,7 @@ static struct page *new_page_node(struct page *p, unsigned long private,
 
        *result = &pm->status;
 
-       return alloc_pages_node(pm->node,
+       return alloc_pages_exact_node(pm->node,
                                GFP_HIGHUSER_MOVABLE | GFP_THISNODE, 0);
 }
 
@@ -819,9 +778,11 @@ static struct page *new_page_node(struct page *p, unsigned long private,
  * Move a set of pages as indicated in the pm array. The addr
  * field must be set to the virtual address of the page to be moved
  * and the node number must contain a valid target node.
+ * The pm array ends with node = MAX_NUMNODES.
  */
-static int do_move_pages(struct mm_struct *mm, struct page_to_node *pm,
-                               int migrate_all)
+static int do_move_page_to_node_array(struct mm_struct *mm,
+                                     struct page_to_node *pm,
+                                     int migrate_all)
 {
        int err;
        struct page_to_node *pp;
@@ -832,28 +793,27 @@ static int do_move_pages(struct mm_struct *mm, struct page_to_node *pm,
        /*
         * Build a list of pages to migrate
         */
-       migrate_prep();
        for (pp = pm; pp->node != MAX_NUMNODES; pp++) {
                struct vm_area_struct *vma;
                struct page *page;
 
-               /*
-                * A valid page pointer that will not match any of the
-                * pages that will be moved.
-                */
-               pp->page = ZERO_PAGE(0);
-
                err = -EFAULT;
                vma = find_vma(mm, pp->addr);
                if (!vma || !vma_migratable(vma))
                        goto set_status;
 
                page = follow_page(vma, pp->addr, FOLL_GET);
+
+               err = PTR_ERR(page);
+               if (IS_ERR(page))
+                       goto set_status;
+
                err = -ENOENT;
                if (!page)
                        goto set_status;
 
-               if (PageReserved(page))         /* Check for zero page */
+               /* Use PageReserved to check for zero page */
+               if (PageReserved(page) || PageKsm(page))
                        goto put_and_set;
 
                pp->page = page;
@@ -870,7 +830,12 @@ static int do_move_pages(struct mm_struct *mm, struct page_to_node *pm,
                                !migrate_all)
                        goto put_and_set;
 
-               err = isolate_lru_page(page, &pagelist);
+               err = isolate_lru_page(page);
+               if (!err) {
+                       list_add_tail(&page->lru, &pagelist);
+                       inc_zone_page_state(page, NR_ISOLATED_ANON +
+                                           page_is_file_cache(page));
+               }
 put_and_set:
                /*
                 * Either remove the duplicate refcount from
@@ -882,65 +847,194 @@ set_status:
                pp->status = err;
        }
 
+       err = 0;
        if (!list_empty(&pagelist))
                err = migrate_pages(&pagelist, new_page_node,
-                               (unsigned long)pm);
-       else
-               err = -ENOENT;
+                               (unsigned long)pm, 0);
 
        up_read(&mm->mmap_sem);
        return err;
 }
 
 /*
- * Determine the nodes of a list of pages. The addr in the pm array
- * must have been set to the virtual address of which we want to determine
- * the node number.
+ * Migrate an array of page address onto an array of nodes and fill
+ * the corresponding array of status.
+ */
+static int do_pages_move(struct mm_struct *mm, struct task_struct *task,
+                        unsigned long nr_pages,
+                        const void __user * __user *pages,
+                        const int __user *nodes,
+                        int __user *status, int flags)
+{
+       struct page_to_node *pm;
+       nodemask_t task_nodes;
+       unsigned long chunk_nr_pages;
+       unsigned long chunk_start;
+       int err;
+
+       task_nodes = cpuset_mems_allowed(task);
+
+       err = -ENOMEM;
+       pm = (struct page_to_node *)__get_free_page(GFP_KERNEL);
+       if (!pm)
+               goto out;
+
+       migrate_prep();
+
+       /*
+        * Store a chunk of page_to_node array in a page,
+        * but keep the last one as a marker
+        */
+       chunk_nr_pages = (PAGE_SIZE / sizeof(struct page_to_node)) - 1;
+
+       for (chunk_start = 0;
+            chunk_start < nr_pages;
+            chunk_start += chunk_nr_pages) {
+               int j;
+
+               if (chunk_start + chunk_nr_pages > nr_pages)
+                       chunk_nr_pages = nr_pages - chunk_start;
+
+               /* fill the chunk pm with addrs and nodes from user-space */
+               for (j = 0; j < chunk_nr_pages; j++) {
+                       const void __user *p;
+                       int node;
+
+                       err = -EFAULT;
+                       if (get_user(p, pages + j + chunk_start))
+                               goto out_pm;
+                       pm[j].addr = (unsigned long) p;
+
+                       if (get_user(node, nodes + j + chunk_start))
+                               goto out_pm;
+
+                       err = -ENODEV;
+                       if (node < 0 || node >= MAX_NUMNODES)
+                               goto out_pm;
+
+                       if (!node_state(node, N_HIGH_MEMORY))
+                               goto out_pm;
+
+                       err = -EACCES;
+                       if (!node_isset(node, task_nodes))
+                               goto out_pm;
+
+                       pm[j].node = node;
+               }
+
+               /* End marker for this chunk */
+               pm[chunk_nr_pages].node = MAX_NUMNODES;
+
+               /* Migrate this chunk */
+               err = do_move_page_to_node_array(mm, pm,
+                                                flags & MPOL_MF_MOVE_ALL);
+               if (err < 0)
+                       goto out_pm;
+
+               /* Return status information */
+               for (j = 0; j < chunk_nr_pages; j++)
+                       if (put_user(pm[j].status, status + j + chunk_start)) {
+                               err = -EFAULT;
+                               goto out_pm;
+                       }
+       }
+       err = 0;
+
+out_pm:
+       free_page((unsigned long)pm);
+out:
+       return err;
+}
+
+/*
+ * Determine the nodes of an array of pages and store it in an array of status.
  */
-static int do_pages_stat(struct mm_struct *mm, struct page_to_node *pm)
+static void do_pages_stat_array(struct mm_struct *mm, unsigned long nr_pages,
+                               const void __user **pages, int *status)
 {
+       unsigned long i;
+
        down_read(&mm->mmap_sem);
 
-       for ( ; pm->node != MAX_NUMNODES; pm++) {
+       for (i = 0; i < nr_pages; i++) {
+               unsigned long addr = (unsigned long)(*pages);
                struct vm_area_struct *vma;
                struct page *page;
-               int err;
+               int err = -EFAULT;
 
-               err = -EFAULT;
-               vma = find_vma(mm, pm->addr);
+               vma = find_vma(mm, addr);
                if (!vma)
                        goto set_status;
 
-               page = follow_page(vma, pm->addr, 0);
+               page = follow_page(vma, addr, 0);
+
+               err = PTR_ERR(page);
+               if (IS_ERR(page))
+                       goto set_status;
+
                err = -ENOENT;
                /* Use PageReserved to check for zero page */
-               if (!page || PageReserved(page))
+               if (!page || PageReserved(page) || PageKsm(page))
                        goto set_status;
 
                err = page_to_nid(page);
 set_status:
-               pm->status = err;
+               *status = err;
+
+               pages++;
+               status++;
        }
 
        up_read(&mm->mmap_sem);
-       return 0;
+}
+
+/*
+ * Determine the nodes of a user array of pages and store it in
+ * a user array of status.
+ */
+static int do_pages_stat(struct mm_struct *mm, unsigned long nr_pages,
+                        const void __user * __user *pages,
+                        int __user *status)
+{
+#define DO_PAGES_STAT_CHUNK_NR 16
+       const void __user *chunk_pages[DO_PAGES_STAT_CHUNK_NR];
+       int chunk_status[DO_PAGES_STAT_CHUNK_NR];
+
+       while (nr_pages) {
+               unsigned long chunk_nr;
+
+               chunk_nr = nr_pages;
+               if (chunk_nr > DO_PAGES_STAT_CHUNK_NR)
+                       chunk_nr = DO_PAGES_STAT_CHUNK_NR;
+
+               if (copy_from_user(chunk_pages, pages, chunk_nr * sizeof(*chunk_pages)))
+                       break;
+
+               do_pages_stat_array(mm, chunk_nr, chunk_pages, chunk_status);
+
+               if (copy_to_user(status, chunk_status, chunk_nr * sizeof(*status)))
+                       break;
+
+               pages += chunk_nr;
+               status += chunk_nr;
+               nr_pages -= chunk_nr;
+       }
+       return nr_pages ? -EFAULT : 0;
 }
 
 /*
  * Move a list of pages in the address space of the currently executing
  * process.
  */
-asmlinkage long sys_move_pages(pid_t pid, unsigned long nr_pages,
-                       const void __user * __user *pages,
-                       const int __user *nodes,
-                       int __user *status, int flags)
+SYSCALL_DEFINE6(move_pages, pid_t, pid, unsigned long, nr_pages,
+               const void __user * __user *, pages,
+               const int __user *, nodes,
+               int __user *, status, int, flags)
 {
-       int err = 0;
-       int i;
+       const struct cred *cred = current_cred(), *tcred;
        struct task_struct *task;
-       nodemask_t task_nodes;
        struct mm_struct *mm;
-       struct page_to_node *pm = NULL;
+       int err;
 
        /* Check flags */
        if (flags & ~(MPOL_MF_MOVE|MPOL_MF_MOVE_ALL))
@@ -968,83 +1062,32 @@ asmlinkage long sys_move_pages(pid_t pid, unsigned long nr_pages,
         * capabilities, superuser privileges or the same
         * userid as the target process.
         */
-       if ((current->euid != task->suid) && (current->euid != task->uid) &&
-           (current->uid != task->suid) && (current->uid != task->uid) &&
+       rcu_read_lock();
+       tcred = __task_cred(task);
+       if (cred->euid != tcred->suid && cred->euid != tcred->uid &&
+           cred->uid  != tcred->suid && cred->uid  != tcred->uid &&
            !capable(CAP_SYS_NICE)) {
+               rcu_read_unlock();
                err = -EPERM;
-               goto out2;
+               goto out;
        }
+       rcu_read_unlock();
 
        err = security_task_movememory(task);
        if (err)
-               goto out2;
-
-
-       task_nodes = cpuset_mems_allowed(task);
-
-       /* Limit nr_pages so that the multiplication may not overflow */
-       if (nr_pages >= ULONG_MAX / sizeof(struct page_to_node) - 1) {
-               err = -E2BIG;
-               goto out2;
-       }
-
-       pm = vmalloc((nr_pages + 1) * sizeof(struct page_to_node));
-       if (!pm) {
-               err = -ENOMEM;
-               goto out2;
-       }
-
-       /*
-        * Get parameters from user space and initialize the pm
-        * array. Return various errors if the user did something wrong.
-        */
-       for (i = 0; i < nr_pages; i++) {
-               const void __user *p;
-
-               err = -EFAULT;
-               if (get_user(p, pages + i))
-                       goto out;
-
-               pm[i].addr = (unsigned long)p;
-               if (nodes) {
-                       int node;
-
-                       if (get_user(node, nodes + i))
-                               goto out;
-
-                       err = -ENODEV;
-                       if (!node_state(node, N_HIGH_MEMORY))
-                               goto out;
-
-                       err = -EACCES;
-                       if (!node_isset(node, task_nodes))
-                               goto out;
+               goto out;
 
-                       pm[i].node = node;
-               } else
-                       pm[i].node = 0; /* anything to not match MAX_NUMNODES */
+       if (nodes) {
+               err = do_pages_move(mm, task, nr_pages, pages, nodes, status,
+                                   flags);
+       } else {
+               err = do_pages_stat(mm, nr_pages, pages, status);
        }
-       /* End marker */
-       pm[nr_pages].node = MAX_NUMNODES;
-
-       if (nodes)
-               err = do_move_pages(mm, pm, flags & MPOL_MF_MOVE_ALL);
-       else
-               err = do_pages_stat(mm, pm);
-
-       if (err >= 0)
-               /* Return status information */
-               for (i = 0; i < nr_pages; i++)
-                       if (put_user(pm[i].status, status + i))
-                               err = -EFAULT;
 
 out:
-       vfree(pm);
-out2:
        mmput(mm);
        return err;
 }
-#endif
 
 /*
  * Call migration functions in the vma_ops that may prepare
@@ -1057,7 +1100,7 @@ int migrate_vmas(struct mm_struct *mm, const nodemask_t *to,
        struct vm_area_struct *vma;
        int err = 0;
 
-       for(vma = mm->mmap; vma->vm_next && !err; vma = vma->vm_next) {
+       for (vma = mm->mmap; vma && !err; vma = vma->vm_next) {
                if (vma->vm_ops && vma->vm_ops->migrate) {
                        err = vma->vm_ops->migrate(vma, to, from, flags);
                        if (err)
@@ -1066,3 +1109,4 @@ int migrate_vmas(struct mm_struct *mm, const nodemask_t *to,
        }
        return err;
 }
+#endif