cgroups: make cftype.unregister_event() void-returning
[safe/jmp/linux-2.6] / mm / rmap.c
index ebee816..38a336e 100644 (file)
--- a/mm/rmap.c
+++ b/mm/rmap.c
@@ -49,6 +49,7 @@
 #include <linux/swapops.h>
 #include <linux/slab.h>
 #include <linux/init.h>
+#include <linux/ksm.h>
 #include <linux/rmap.h>
 #include <linux/rcupdate.h>
 #include <linux/module.h>
 #include "internal.h"
 
 static struct kmem_cache *anon_vma_cachep;
+static struct kmem_cache *anon_vma_chain_cachep;
 
 static inline struct anon_vma *anon_vma_alloc(void)
 {
        return kmem_cache_alloc(anon_vma_cachep, GFP_KERNEL);
 }
 
-static inline void anon_vma_free(struct anon_vma *anon_vma)
+void anon_vma_free(struct anon_vma *anon_vma)
 {
        kmem_cache_free(anon_vma_cachep, anon_vma);
 }
 
+static inline struct anon_vma_chain *anon_vma_chain_alloc(void)
+{
+       return kmem_cache_alloc(anon_vma_chain_cachep, GFP_KERNEL);
+}
+
+void anon_vma_chain_free(struct anon_vma_chain *anon_vma_chain)
+{
+       kmem_cache_free(anon_vma_chain_cachep, anon_vma_chain);
+}
+
 /**
  * anon_vma_prepare - attach an anon_vma to a memory region
  * @vma: the memory region in question
@@ -102,87 +114,167 @@ static inline void anon_vma_free(struct anon_vma *anon_vma)
 int anon_vma_prepare(struct vm_area_struct *vma)
 {
        struct anon_vma *anon_vma = vma->anon_vma;
+       struct anon_vma_chain *avc;
 
        might_sleep();
        if (unlikely(!anon_vma)) {
                struct mm_struct *mm = vma->vm_mm;
                struct anon_vma *allocated;
 
+               avc = anon_vma_chain_alloc();
+               if (!avc)
+                       goto out_enomem;
+
                anon_vma = find_mergeable_anon_vma(vma);
                allocated = NULL;
                if (!anon_vma) {
                        anon_vma = anon_vma_alloc();
                        if (unlikely(!anon_vma))
-                               return -ENOMEM;
+                               goto out_enomem_free_avc;
                        allocated = anon_vma;
                }
-               spin_lock(&anon_vma->lock);
 
+               spin_lock(&anon_vma->lock);
                /* page_table_lock to protect against threads */
                spin_lock(&mm->page_table_lock);
                if (likely(!vma->anon_vma)) {
                        vma->anon_vma = anon_vma;
-                       list_add_tail(&vma->anon_vma_node, &anon_vma->head);
+                       avc->anon_vma = anon_vma;
+                       avc->vma = vma;
+                       list_add(&avc->same_vma, &vma->anon_vma_chain);
+                       list_add(&avc->same_anon_vma, &anon_vma->head);
                        allocated = NULL;
+                       avc = NULL;
                }
                spin_unlock(&mm->page_table_lock);
-
                spin_unlock(&anon_vma->lock);
+
                if (unlikely(allocated))
                        anon_vma_free(allocated);
+               if (unlikely(avc))
+                       anon_vma_chain_free(avc);
        }
        return 0;
+
+ out_enomem_free_avc:
+       anon_vma_chain_free(avc);
+ out_enomem:
+       return -ENOMEM;
 }
 
-void __anon_vma_merge(struct vm_area_struct *vma, struct vm_area_struct *next)
+static void anon_vma_chain_link(struct vm_area_struct *vma,
+                               struct anon_vma_chain *avc,
+                               struct anon_vma *anon_vma)
 {
-       BUG_ON(vma->anon_vma != next->anon_vma);
-       list_del(&next->anon_vma_node);
+       avc->vma = vma;
+       avc->anon_vma = anon_vma;
+       list_add(&avc->same_vma, &vma->anon_vma_chain);
+
+       spin_lock(&anon_vma->lock);
+       list_add_tail(&avc->same_anon_vma, &anon_vma->head);
+       spin_unlock(&anon_vma->lock);
 }
 
-void __anon_vma_link(struct vm_area_struct *vma)
+/*
+ * Attach the anon_vmas from src to dst.
+ * Returns 0 on success, -ENOMEM on failure.
+ */
+int anon_vma_clone(struct vm_area_struct *dst, struct vm_area_struct *src)
 {
-       struct anon_vma *anon_vma = vma->anon_vma;
+       struct anon_vma_chain *avc, *pavc;
+
+       list_for_each_entry_reverse(pavc, &src->anon_vma_chain, same_vma) {
+               avc = anon_vma_chain_alloc();
+               if (!avc)
+                       goto enomem_failure;
+               anon_vma_chain_link(dst, avc, pavc->anon_vma);
+       }
+       return 0;
 
-       if (anon_vma)
-               list_add_tail(&vma->anon_vma_node, &anon_vma->head);
+ enomem_failure:
+       unlink_anon_vmas(dst);
+       return -ENOMEM;
 }
 
-void anon_vma_link(struct vm_area_struct *vma)
+/*
+ * Attach vma to its own anon_vma, as well as to the anon_vmas that
+ * the corresponding VMA in the parent process is attached to.
+ * Returns 0 on success, non-zero on failure.
+ */
+int anon_vma_fork(struct vm_area_struct *vma, struct vm_area_struct *pvma)
 {
-       struct anon_vma *anon_vma = vma->anon_vma;
+       struct anon_vma_chain *avc;
+       struct anon_vma *anon_vma;
 
-       if (anon_vma) {
-               spin_lock(&anon_vma->lock);
-               list_add_tail(&vma->anon_vma_node, &anon_vma->head);
-               spin_unlock(&anon_vma->lock);
-       }
+       /* Don't bother if the parent process has no anon_vma here. */
+       if (!pvma->anon_vma)
+               return 0;
+
+       /*
+        * First, attach the new VMA to the parent VMA's anon_vmas,
+        * so rmap can find non-COWed pages in child processes.
+        */
+       if (anon_vma_clone(vma, pvma))
+               return -ENOMEM;
+
+       /* Then add our own anon_vma. */
+       anon_vma = anon_vma_alloc();
+       if (!anon_vma)
+               goto out_error;
+       avc = anon_vma_chain_alloc();
+       if (!avc)
+               goto out_error_free_anon_vma;
+       anon_vma_chain_link(vma, avc, anon_vma);
+       /* Mark this anon_vma as the one where our new (COWed) pages go. */
+       vma->anon_vma = anon_vma;
+
+       return 0;
+
+ out_error_free_anon_vma:
+       anon_vma_free(anon_vma);
+ out_error:
+       unlink_anon_vmas(vma);
+       return -ENOMEM;
 }
 
-void anon_vma_unlink(struct vm_area_struct *vma)
+static void anon_vma_unlink(struct anon_vma_chain *anon_vma_chain)
 {
-       struct anon_vma *anon_vma = vma->anon_vma;
+       struct anon_vma *anon_vma = anon_vma_chain->anon_vma;
        int empty;
 
+       /* If anon_vma_fork fails, we can get an empty anon_vma_chain. */
        if (!anon_vma)
                return;
 
        spin_lock(&anon_vma->lock);
-       list_del(&vma->anon_vma_node);
+       list_del(&anon_vma_chain->same_anon_vma);
 
        /* We must garbage collect the anon_vma if it's empty */
-       empty = list_empty(&anon_vma->head);
+       empty = list_empty(&anon_vma->head) && !anonvma_external_refcount(anon_vma);
        spin_unlock(&anon_vma->lock);
 
        if (empty)
                anon_vma_free(anon_vma);
 }
 
+void unlink_anon_vmas(struct vm_area_struct *vma)
+{
+       struct anon_vma_chain *avc, *next;
+
+       /* Unlink each anon_vma chained to the VMA. */
+       list_for_each_entry_safe(avc, next, &vma->anon_vma_chain, same_vma) {
+               anon_vma_unlink(avc);
+               list_del(&avc->same_vma);
+               anon_vma_chain_free(avc);
+       }
+}
+
 static void anon_vma_ctor(void *data)
 {
        struct anon_vma *anon_vma = data;
 
        spin_lock_init(&anon_vma->lock);
+       anonvma_external_refcount_init(anon_vma);
        INIT_LIST_HEAD(&anon_vma->head);
 }
 
@@ -190,6 +282,7 @@ void __init anon_vma_init(void)
 {
        anon_vma_cachep = kmem_cache_create("anon_vma", sizeof(struct anon_vma),
                        0, SLAB_DESTROY_BY_RCU|SLAB_PANIC, anon_vma_ctor);
+       anon_vma_chain_cachep = KMEM_CACHE(anon_vma_chain, SLAB_PANIC);
 }
 
 /*
@@ -202,7 +295,7 @@ struct anon_vma *page_lock_anon_vma(struct page *page)
        unsigned long anon_mapping;
 
        rcu_read_lock();
-       anon_mapping = (unsigned long) page->mapping;
+       anon_mapping = (unsigned long) ACCESS_ONCE(page->mapping);
        if ((anon_mapping & PAGE_MAPPING_FLAGS) != PAGE_MAPPING_ANON)
                goto out;
        if (!page_mapped(page))
@@ -243,14 +336,13 @@ vma_address(struct page *page, struct vm_area_struct *vma)
 
 /*
  * At what user virtual address is page expected in vma?
- * checking that the page matches the vma.
+ * Caller should check the page is actually part of the vma.
  */
 unsigned long page_address_in_vma(struct page *page, struct vm_area_struct *vma)
 {
-       if (PageAnon(page)) {
-               if (vma->anon_vma != page_anon_vma(page))
-                       return -EFAULT;
-       } else if (page->mapping && !(vma->vm_flags & VM_NONLINEAR)) {
+       if (PageAnon(page))
+               ;
+       else if (page->mapping && !(vma->vm_flags & VM_NONLINEAR)) {
                if (!vma->vm_file ||
                    vma->vm_file->f_mapping != page->mapping)
                        return -EFAULT;
@@ -336,9 +428,9 @@ int page_mapped_in_vma(struct page *page, struct vm_area_struct *vma)
  * Subfunctions of page_referenced: page_referenced_one called
  * repeatedly from either page_referenced_anon or page_referenced_file.
  */
-static int page_referenced_one(struct page *page, struct vm_area_struct *vma,
-                              unsigned long address, unsigned int *mapcount,
-                              unsigned long *vm_flags)
+int page_referenced_one(struct page *page, struct vm_area_struct *vma,
+                       unsigned long address, unsigned int *mapcount,
+                       unsigned long *vm_flags)
 {
        struct mm_struct *mm = vma->vm_mm;
        pte_t *pte;
@@ -394,7 +486,7 @@ static int page_referenced_anon(struct page *page,
 {
        unsigned int mapcount;
        struct anon_vma *anon_vma;
-       struct vm_area_struct *vma;
+       struct anon_vma_chain *avc;
        int referenced = 0;
 
        anon_vma = page_lock_anon_vma(page);
@@ -402,7 +494,8 @@ static int page_referenced_anon(struct page *page,
                return referenced;
 
        mapcount = page_mapcount(page);
-       list_for_each_entry(vma, &anon_vma->head, anon_vma_node) {
+       list_for_each_entry(avc, &anon_vma->head, same_anon_vma) {
+               struct vm_area_struct *vma = avc->vma;
                unsigned long address = vma_address(page, vma);
                if (address == -EFAULT)
                        continue;
@@ -507,28 +600,30 @@ int page_referenced(struct page *page,
                    unsigned long *vm_flags)
 {
        int referenced = 0;
-
-       if (TestClearPageReferenced(page))
-               referenced++;
+       int we_locked = 0;
 
        *vm_flags = 0;
        if (page_mapped(page) && page_rmapping(page)) {
-               if (PageAnon(page))
+               if (!is_locked && (!PageAnon(page) || PageKsm(page))) {
+                       we_locked = trylock_page(page);
+                       if (!we_locked) {
+                               referenced++;
+                               goto out;
+                       }
+               }
+               if (unlikely(PageKsm(page)))
+                       referenced += page_referenced_ksm(page, mem_cont,
+                                                               vm_flags);
+               else if (PageAnon(page))
                        referenced += page_referenced_anon(page, mem_cont,
                                                                vm_flags);
-               else if (is_locked)
+               else if (page->mapping)
                        referenced += page_referenced_file(page, mem_cont,
                                                                vm_flags);
-               else if (!trylock_page(page))
-                       referenced++;
-               else {
-                       if (page->mapping)
-                               referenced += page_referenced_file(page,
-                                                       mem_cont, vm_flags);
+               if (we_locked)
                        unlock_page(page);
-               }
        }
-
+out:
        if (page_test_and_clear_young(page))
                referenced++;
 
@@ -607,27 +702,60 @@ int page_mkclean(struct page *page)
 EXPORT_SYMBOL_GPL(page_mkclean);
 
 /**
+ * page_move_anon_rmap - move a page to our anon_vma
+ * @page:      the page to move to our anon_vma
+ * @vma:       the vma the page belongs to
+ * @address:   the user virtual address mapped
+ *
+ * When a page belongs exclusively to one process after a COW event,
+ * that page can be moved into the anon_vma that belongs to just that
+ * process, so the rmap code will not search the parent or sibling
+ * processes.
+ */
+void page_move_anon_rmap(struct page *page,
+       struct vm_area_struct *vma, unsigned long address)
+{
+       struct anon_vma *anon_vma = vma->anon_vma;
+
+       VM_BUG_ON(!PageLocked(page));
+       VM_BUG_ON(!anon_vma);
+       VM_BUG_ON(page->index != linear_page_index(vma, address));
+
+       anon_vma = (void *) anon_vma + PAGE_MAPPING_ANON;
+       page->mapping = (struct address_space *) anon_vma;
+}
+
+/**
  * __page_set_anon_rmap - setup new anonymous rmap
  * @page:      the page to add the mapping to
  * @vma:       the vm area in which the mapping is added
  * @address:   the user virtual address mapped
+ * @exclusive: the page is exclusively owned by the current process
  */
 static void __page_set_anon_rmap(struct page *page,
-       struct vm_area_struct *vma, unsigned long address)
+       struct vm_area_struct *vma, unsigned long address, int exclusive)
 {
        struct anon_vma *anon_vma = vma->anon_vma;
 
        BUG_ON(!anon_vma);
-       anon_vma = (void *) anon_vma + PAGE_MAPPING_ANON;
-       page->mapping = (struct address_space *) anon_vma;
-
-       page->index = linear_page_index(vma, address);
 
        /*
-        * nr_mapped state can be updated without turning off
-        * interrupts because it is not modified via interrupt.
+        * If the page isn't exclusively mapped into this vma,
+        * we must use the _oldest_ possible anon_vma for the
+        * page mapping!
+        *
+        * So take the last AVC chain entry in the vma, which is
+        * the deepest ancestor, and use the anon_vma from that.
         */
-       __inc_zone_page_state(page, NR_ANON_PAGES);
+       if (!exclusive) {
+               struct anon_vma_chain *avc;
+               avc = list_entry(vma->anon_vma_chain.prev, struct anon_vma_chain, same_vma);
+               anon_vma = avc->anon_vma;
+       }
+
+       anon_vma = (void *) anon_vma + PAGE_MAPPING_ANON;
+       page->mapping = (struct address_space *) anon_vma;
+       page->index = linear_page_index(vma, address);
 }
 
 /**
@@ -652,9 +780,6 @@ static void __page_check_anon_rmap(struct page *page,
         * are initially only visible via the pagetables, and the pte is locked
         * over the call to page_add_new_anon_rmap.
         */
-       struct anon_vma *anon_vma = vma->anon_vma;
-       anon_vma = (void *) anon_vma + PAGE_MAPPING_ANON;
-       BUG_ON(page->mapping != (struct address_space *)anon_vma);
        BUG_ON(page->index != linear_page_index(vma, address));
 #endif
 }
@@ -665,15 +790,24 @@ static void __page_check_anon_rmap(struct page *page,
  * @vma:       the vm area in which the mapping is added
  * @address:   the user virtual address mapped
  *
- * The caller needs to hold the pte lock and the page must be locked.
+ * The caller needs to hold the pte lock, and the page must be locked in
+ * the anon_vma case: to serialize mapping,index checking after setting,
+ * and to ensure that PageAnon is not being upgraded racily to PageKsm
+ * (but PageKsm is never downgraded to PageAnon).
  */
 void page_add_anon_rmap(struct page *page,
        struct vm_area_struct *vma, unsigned long address)
 {
+       int first = atomic_inc_and_test(&page->_mapcount);
+       if (first)
+               __inc_zone_page_state(page, NR_ANON_PAGES);
+       if (unlikely(PageKsm(page)))
+               return;
+
        VM_BUG_ON(!PageLocked(page));
        VM_BUG_ON(address < vma->vm_start || address >= vma->vm_end);
-       if (atomic_inc_and_test(&page->_mapcount))
-               __page_set_anon_rmap(page, vma, address);
+       if (first)
+               __page_set_anon_rmap(page, vma, address, 0);
        else
                __page_check_anon_rmap(page, vma, address);
 }
@@ -694,7 +828,8 @@ void page_add_new_anon_rmap(struct page *page,
        VM_BUG_ON(address < vma->vm_start || address >= vma->vm_end);
        SetPageSwapBacked(page);
        atomic_set(&page->_mapcount, 0); /* increment count (starts at -1) */
-       __page_set_anon_rmap(page, vma, address);
+       __inc_zone_page_state(page, NR_ANON_PAGES);
+       __page_set_anon_rmap(page, vma, address, 1);
        if (page_evictable(page, vma))
                lru_cache_add_lru(page, LRU_ACTIVE_ANON);
        else
@@ -711,7 +846,7 @@ void page_add_file_rmap(struct page *page)
 {
        if (atomic_inc_and_test(&page->_mapcount)) {
                __inc_zone_page_state(page, NR_FILE_MAPPED);
-               mem_cgroup_update_mapped_file_stat(page, 1);
+               mem_cgroup_update_file_mapped(page, 1);
        }
 }
 
@@ -743,8 +878,8 @@ void page_remove_rmap(struct page *page)
                __dec_zone_page_state(page, NR_ANON_PAGES);
        } else {
                __dec_zone_page_state(page, NR_FILE_MAPPED);
+               mem_cgroup_update_file_mapped(page, -1);
        }
-       mem_cgroup_update_mapped_file_stat(page, -1);
        /*
         * It would be tidy to reset the PageAnon mapping here,
         * but that might overwrite a racing page_add_anon_rmap
@@ -760,8 +895,8 @@ void page_remove_rmap(struct page *page)
  * Subfunctions of try_to_unmap: try_to_unmap_one called
  * repeatedly from either try_to_unmap_anon or try_to_unmap_file.
  */
-static int try_to_unmap_one(struct page *page, struct vm_area_struct *vma,
-                           unsigned long address, enum ttu_flags flags)
+int try_to_unmap_one(struct page *page, struct vm_area_struct *vma,
+                    unsigned long address, enum ttu_flags flags)
 {
        struct mm_struct *mm = vma->vm_mm;
        pte_t *pte;
@@ -779,10 +914,9 @@ static int try_to_unmap_one(struct page *page, struct vm_area_struct *vma,
         * skipped over this mm) then we should reactivate it.
         */
        if (!(flags & TTU_IGNORE_MLOCK)) {
-               if (vma->vm_flags & VM_LOCKED) {
-                       ret = SWAP_MLOCK;
-                       goto out_unmap;
-               }
+               if (vma->vm_flags & VM_LOCKED)
+                       goto out_mlock;
+
                if (TTU_ACTION(flags) == TTU_MUNLOCK)
                        goto out_unmap;
        }
@@ -806,9 +940,9 @@ static int try_to_unmap_one(struct page *page, struct vm_area_struct *vma,
 
        if (PageHWPoison(page) && !(flags & TTU_IGNORE_HWPOISON)) {
                if (PageAnon(page))
-                       dec_mm_counter(mm, anon_rss);
+                       dec_mm_counter(mm, MM_ANONPAGES);
                else
-                       dec_mm_counter(mm, file_rss);
+                       dec_mm_counter(mm, MM_FILEPAGES);
                set_pte_at(mm, address, pte,
                                swp_entry_to_pte(make_hwpoison_entry(page)));
        } else if (PageAnon(page)) {
@@ -830,7 +964,8 @@ static int try_to_unmap_one(struct page *page, struct vm_area_struct *vma,
                                        list_add(&mm->mmlist, &init_mm.mmlist);
                                spin_unlock(&mmlist_lock);
                        }
-                       dec_mm_counter(mm, anon_rss);
+                       dec_mm_counter(mm, MM_ANONPAGES);
+                       inc_mm_counter(mm, MM_SWAPENTS);
                } else if (PAGE_MIGRATION) {
                        /*
                         * Store the pfn of the page in a special migration
@@ -848,25 +983,35 @@ static int try_to_unmap_one(struct page *page, struct vm_area_struct *vma,
                entry = make_migration_entry(page, pte_write(pteval));
                set_pte_at(mm, address, pte, swp_entry_to_pte(entry));
        } else
-               dec_mm_counter(mm, file_rss);
+               dec_mm_counter(mm, MM_FILEPAGES);
 
        page_remove_rmap(page);
        page_cache_release(page);
 
 out_unmap:
        pte_unmap_unlock(pte, ptl);
+out:
+       return ret;
 
-       if (ret == SWAP_MLOCK) {
-               ret = SWAP_AGAIN;
-               if (down_read_trylock(&vma->vm_mm->mmap_sem)) {
-                       if (vma->vm_flags & VM_LOCKED) {
-                               mlock_vma_page(page);
-                               ret = SWAP_MLOCK;
-                       }
-                       up_read(&vma->vm_mm->mmap_sem);
+out_mlock:
+       pte_unmap_unlock(pte, ptl);
+
+
+       /*
+        * We need mmap_sem locking, Otherwise VM_LOCKED check makes
+        * unstable result and race. Plus, We can't wait here because
+        * we now hold anon_vma->lock or mapping->i_mmap_lock.
+        * if trylock failed, the page remain in evictable lru and later
+        * vmscan could retry to move the page to unevictable lru if the
+        * page is actually mlocked.
+        */
+       if (down_read_trylock(&vma->vm_mm->mmap_sem)) {
+               if (vma->vm_flags & VM_LOCKED) {
+                       mlock_vma_page(page);
+                       ret = SWAP_MLOCK;
                }
+               up_read(&vma->vm_mm->mmap_sem);
        }
-out:
        return ret;
 }
 
@@ -977,7 +1122,7 @@ static int try_to_unmap_cluster(unsigned long cursor, unsigned int *mapcount,
 
                page_remove_rmap(page);
                page_cache_release(page);
-               dec_mm_counter(mm, file_rss);
+               dec_mm_counter(mm, MM_FILEPAGES);
                (*mapcount)--;
        }
        pte_unmap_unlock(pte - 1, ptl);
@@ -986,6 +1131,20 @@ static int try_to_unmap_cluster(unsigned long cursor, unsigned int *mapcount,
        return ret;
 }
 
+static bool is_vma_temporary_stack(struct vm_area_struct *vma)
+{
+       int maybe_stack = vma->vm_flags & (VM_GROWSDOWN | VM_GROWSUP);
+
+       if (!maybe_stack)
+               return false;
+
+       if ((vma->vm_flags & VM_STACK_INCOMPLETE_SETUP) ==
+                                               VM_STACK_INCOMPLETE_SETUP)
+               return true;
+
+       return false;
+}
+
 /**
  * try_to_unmap_anon - unmap or unlock anonymous page using the object-based
  * rmap method
@@ -1005,15 +1164,30 @@ static int try_to_unmap_cluster(unsigned long cursor, unsigned int *mapcount,
 static int try_to_unmap_anon(struct page *page, enum ttu_flags flags)
 {
        struct anon_vma *anon_vma;
-       struct vm_area_struct *vma;
+       struct anon_vma_chain *avc;
        int ret = SWAP_AGAIN;
 
        anon_vma = page_lock_anon_vma(page);
        if (!anon_vma)
                return ret;
 
-       list_for_each_entry(vma, &anon_vma->head, anon_vma_node) {
-               unsigned long address = vma_address(page, vma);
+       list_for_each_entry(avc, &anon_vma->head, same_anon_vma) {
+               struct vm_area_struct *vma = avc->vma;
+               unsigned long address;
+
+               /*
+                * During exec, a temporary VMA is setup and later moved.
+                * The VMA is moved under the anon_vma lock but not the
+                * page tables leading to a race where migration cannot
+                * find the migration ptes. Rather than increasing the
+                * locking requirements of exec(), migration skips
+                * temporary VMAs until after exec() completes.
+                */
+               if (PAGE_MIGRATION && (flags & TTU_MIGRATION) &&
+                               is_vma_temporary_stack(vma))
+                       continue;
+
+               address = vma_address(page, vma);
                if (address == -EFAULT)
                        continue;
                ret = try_to_unmap_one(page, vma, address, flags);
@@ -1156,7 +1330,9 @@ int try_to_unmap(struct page *page, enum ttu_flags flags)
 
        BUG_ON(!PageLocked(page));
 
-       if (PageAnon(page))
+       if (unlikely(PageKsm(page)))
+               ret = try_to_unmap_ksm(page, flags);
+       else if (PageAnon(page))
                ret = try_to_unmap_anon(page, flags);
        else
                ret = try_to_unmap_file(page, flags);
@@ -1177,15 +1353,95 @@ int try_to_unmap(struct page *page, enum ttu_flags flags)
  *
  * SWAP_AGAIN  - no vma is holding page mlocked, or,
  * SWAP_AGAIN  - page mapped in mlocked vma -- couldn't acquire mmap sem
+ * SWAP_FAIL   - page cannot be located at present
  * SWAP_MLOCK  - page is now mlocked.
  */
 int try_to_munlock(struct page *page)
 {
        VM_BUG_ON(!PageLocked(page) || PageLRU(page));
 
-       if (PageAnon(page))
+       if (unlikely(PageKsm(page)))
+               return try_to_unmap_ksm(page, TTU_MUNLOCK);
+       else if (PageAnon(page))
                return try_to_unmap_anon(page, TTU_MUNLOCK);
        else
                return try_to_unmap_file(page, TTU_MUNLOCK);
 }
 
+#ifdef CONFIG_MIGRATION
+/*
+ * rmap_walk() and its helpers rmap_walk_anon() and rmap_walk_file():
+ * Called by migrate.c to remove migration ptes, but might be used more later.
+ */
+static int rmap_walk_anon(struct page *page, int (*rmap_one)(struct page *,
+               struct vm_area_struct *, unsigned long, void *), void *arg)
+{
+       struct anon_vma *anon_vma;
+       struct anon_vma_chain *avc;
+       int ret = SWAP_AGAIN;
+
+       /*
+        * Note: remove_migration_ptes() cannot use page_lock_anon_vma()
+        * because that depends on page_mapped(); but not all its usages
+        * are holding mmap_sem. Users without mmap_sem are required to
+        * take a reference count to prevent the anon_vma disappearing
+        */
+       anon_vma = page_anon_vma(page);
+       if (!anon_vma)
+               return ret;
+       spin_lock(&anon_vma->lock);
+       list_for_each_entry(avc, &anon_vma->head, same_anon_vma) {
+               struct vm_area_struct *vma = avc->vma;
+               unsigned long address = vma_address(page, vma);
+               if (address == -EFAULT)
+                       continue;
+               ret = rmap_one(page, vma, address, arg);
+               if (ret != SWAP_AGAIN)
+                       break;
+       }
+       spin_unlock(&anon_vma->lock);
+       return ret;
+}
+
+static int rmap_walk_file(struct page *page, int (*rmap_one)(struct page *,
+               struct vm_area_struct *, unsigned long, void *), void *arg)
+{
+       struct address_space *mapping = page->mapping;
+       pgoff_t pgoff = page->index << (PAGE_CACHE_SHIFT - PAGE_SHIFT);
+       struct vm_area_struct *vma;
+       struct prio_tree_iter iter;
+       int ret = SWAP_AGAIN;
+
+       if (!mapping)
+               return ret;
+       spin_lock(&mapping->i_mmap_lock);
+       vma_prio_tree_foreach(vma, &iter, &mapping->i_mmap, pgoff, pgoff) {
+               unsigned long address = vma_address(page, vma);
+               if (address == -EFAULT)
+                       continue;
+               ret = rmap_one(page, vma, address, arg);
+               if (ret != SWAP_AGAIN)
+                       break;
+       }
+       /*
+        * No nonlinear handling: being always shared, nonlinear vmas
+        * never contain migration ptes.  Decide what to do about this
+        * limitation to linear when we need rmap_walk() on nonlinear.
+        */
+       spin_unlock(&mapping->i_mmap_lock);
+       return ret;
+}
+
+int rmap_walk(struct page *page, int (*rmap_one)(struct page *,
+               struct vm_area_struct *, unsigned long, void *), void *arg)
+{
+       VM_BUG_ON(!PageLocked(page));
+
+       if (unlikely(PageKsm(page)))
+               return rmap_walk_ksm(page, rmap_one, arg);
+       else if (PageAnon(page))
+               return rmap_walk_anon(page, rmap_one, arg);
+       else
+               return rmap_walk_file(page, rmap_one, arg);
+}
+#endif /* CONFIG_MIGRATION */