HWPOISON: Add poison check to page fault handling
[safe/jmp/linux-2.6] / mm / memory.c
index 0da414c..44ea411 100644 (file)
 #include <linux/init.h>
 #include <linux/writeback.h>
 #include <linux/memcontrol.h>
+#include <linux/mmu_notifier.h>
+#include <linux/kallsyms.h>
+#include <linux/swapops.h>
+#include <linux/elf.h>
 
 #include <asm/pgalloc.h>
 #include <asm/uaccess.h>
@@ -58,8 +62,7 @@
 #include <asm/tlbflush.h>
 #include <asm/pgtable.h>
 
-#include <linux/swapops.h>
-#include <linux/elf.h>
+#include "internal.h"
 
 #ifndef CONFIG_NEED_MULTIPLE_NODES
 /* use the per-pgdat data instead for discontigmem - mbligh */
@@ -132,11 +135,12 @@ void pmd_clear_bad(pmd_t *pmd)
  * Note: this doesn't free the actual pages themselves. That
  * has been handled earlier when unmapping all the memory regions.
  */
-static void free_pte_range(struct mmu_gather *tlb, pmd_t *pmd)
+static void free_pte_range(struct mmu_gather *tlb, pmd_t *pmd,
+                          unsigned long addr)
 {
        pgtable_t token = pmd_pgtable(*pmd);
        pmd_clear(pmd);
-       pte_free_tlb(tlb, token);
+       pte_free_tlb(tlb, token, addr);
        tlb->mm->nr_ptes--;
 }
 
@@ -154,7 +158,7 @@ static inline void free_pmd_range(struct mmu_gather *tlb, pud_t *pud,
                next = pmd_addr_end(addr, end);
                if (pmd_none_or_clear_bad(pmd))
                        continue;
-               free_pte_range(tlb, pmd);
+               free_pte_range(tlb, pmd, addr);
        } while (pmd++, addr = next, addr != end);
 
        start &= PUD_MASK;
@@ -170,7 +174,7 @@ static inline void free_pmd_range(struct mmu_gather *tlb, pud_t *pud,
 
        pmd = pmd_offset(pud, start);
        pud_clear(pud);
-       pmd_free_tlb(tlb, pmd);
+       pmd_free_tlb(tlb, pmd, start);
 }
 
 static inline void free_pud_range(struct mmu_gather *tlb, pgd_t *pgd,
@@ -203,7 +207,7 @@ static inline void free_pud_range(struct mmu_gather *tlb, pgd_t *pgd,
 
        pud = pud_offset(pgd, start);
        pgd_clear(pgd);
-       pud_free_tlb(tlb, pud);
+       pud_free_tlb(tlb, pud, start);
 }
 
 /*
@@ -211,7 +215,7 @@ static inline void free_pud_range(struct mmu_gather *tlb, pgd_t *pgd,
  *
  * Must be called with pagetable lock held.
  */
-void free_pgd_range(struct mmu_gather **tlb,
+void free_pgd_range(struct mmu_gather *tlb,
                        unsigned long addr, unsigned long end,
                        unsigned long floor, unsigned long ceiling)
 {
@@ -262,16 +266,16 @@ void free_pgd_range(struct mmu_gather **tlb,
                return;
 
        start = addr;
-       pgd = pgd_offset((*tlb)->mm, addr);
+       pgd = pgd_offset(tlb->mm, addr);
        do {
                next = pgd_addr_end(addr, end);
                if (pgd_none_or_clear_bad(pgd))
                        continue;
-               free_pud_range(*tlb, pgd, addr, next, floor, ceiling);
+               free_pud_range(tlb, pgd, addr, next, floor, ceiling);
        } while (pgd++, addr = next, addr != end);
 }
 
-void free_pgtables(struct mmu_gather **tlb, struct vm_area_struct *vma,
+void free_pgtables(struct mmu_gather *tlb, struct vm_area_struct *vma,
                unsigned long floor, unsigned long ceiling)
 {
        while (vma) {
@@ -311,6 +315,21 @@ int __pte_alloc(struct mm_struct *mm, pmd_t *pmd, unsigned long address)
        if (!new)
                return -ENOMEM;
 
+       /*
+        * Ensure all pte setup (eg. pte page lock and page clearing) are
+        * visible before the pte is made visible to other CPUs by being
+        * put into page tables.
+        *
+        * The other side of the story is the pointer chasing in the page
+        * table walking code (when walking the page table without locking;
+        * ie. most of the time). Fortunately, these data accesses consist
+        * of a chain of data-dependent loads, meaning most CPUs (alpha
+        * being the notable exception) will already guarantee loads are
+        * seen in-order. See the alpha page table accessors for the
+        * smp_read_barrier_depends() barriers in page table walking code.
+        */
+       smp_wmb(); /* Could be smp_wmb__xxx(before|after)_spin_lock */
+
        spin_lock(&mm->page_table_lock);
        if (!pmd_present(*pmd)) {       /* Has another populated it ? */
                mm->nr_ptes++;
@@ -329,6 +348,8 @@ int __pte_alloc_kernel(pmd_t *pmd, unsigned long address)
        if (!new)
                return -ENOMEM;
 
+       smp_wmb(); /* See comment in __pte_alloc */
+
        spin_lock(&init_mm.page_table_lock);
        if (!pmd_present(*pmd)) {       /* Has another populated it ? */
                pmd_populate_kernel(&init_mm, pmd, new);
@@ -355,14 +376,65 @@ static inline void add_mm_rss(struct mm_struct *mm, int file_rss, int anon_rss)
  *
  * The calling function must still handle the error.
  */
-void print_bad_pte(struct vm_area_struct *vma, pte_t pte, unsigned long vaddr)
-{
-       printk(KERN_ERR "Bad pte = %08llx, process = %s, "
-                       "vm_flags = %lx, vaddr = %lx\n",
-               (long long)pte_val(pte),
-               (vma->vm_mm == current->mm ? current->comm : "???"),
-               vma->vm_flags, vaddr);
+static void print_bad_pte(struct vm_area_struct *vma, unsigned long addr,
+                         pte_t pte, struct page *page)
+{
+       pgd_t *pgd = pgd_offset(vma->vm_mm, addr);
+       pud_t *pud = pud_offset(pgd, addr);
+       pmd_t *pmd = pmd_offset(pud, addr);
+       struct address_space *mapping;
+       pgoff_t index;
+       static unsigned long resume;
+       static unsigned long nr_shown;
+       static unsigned long nr_unshown;
+
+       /*
+        * Allow a burst of 60 reports, then keep quiet for that minute;
+        * or allow a steady drip of one report per second.
+        */
+       if (nr_shown == 60) {
+               if (time_before(jiffies, resume)) {
+                       nr_unshown++;
+                       return;
+               }
+               if (nr_unshown) {
+                       printk(KERN_ALERT
+                               "BUG: Bad page map: %lu messages suppressed\n",
+                               nr_unshown);
+                       nr_unshown = 0;
+               }
+               nr_shown = 0;
+       }
+       if (nr_shown++ == 0)
+               resume = jiffies + 60 * HZ;
+
+       mapping = vma->vm_file ? vma->vm_file->f_mapping : NULL;
+       index = linear_page_index(vma, addr);
+
+       printk(KERN_ALERT
+               "BUG: Bad page map in process %s  pte:%08llx pmd:%08llx\n",
+               current->comm,
+               (long long)pte_val(pte), (long long)pmd_val(*pmd));
+       if (page) {
+               printk(KERN_ALERT
+               "page:%p flags:%p count:%d mapcount:%d mapping:%p index:%lx\n",
+               page, (void *)page->flags, page_count(page),
+               page_mapcount(page), page->mapping, page->index);
+       }
+       printk(KERN_ALERT
+               "addr:%p vm_flags:%08lx anon_vma:%p mapping:%p index:%lx\n",
+               (void *)addr, vma->vm_flags, vma->anon_vma, mapping, index);
+       /*
+        * Choose text because data symbols depend on CONFIG_KALLSYMS_ALL=y
+        */
+       if (vma->vm_ops)
+               print_symbol(KERN_ALERT "vma->vm_ops->fault: %s\n",
+                               (unsigned long)vma->vm_ops->fault);
+       if (vma->vm_file && vma->vm_file->f_op)
+               print_symbol(KERN_ALERT "vma->vm_file->f_op->mmap: %s\n",
+                               (unsigned long)vma->vm_file->f_op->mmap);
        dump_stack();
+       add_taint(TAINT_BAD_PAGE);
 }
 
 static inline int is_cow_mapping(unsigned int flags)
@@ -371,33 +443,37 @@ static inline int is_cow_mapping(unsigned int flags)
 }
 
 /*
- * This function gets the "struct page" associated with a pte or returns
- * NULL if no "struct page" is associated with the pte.
+ * vm_normal_page -- This function gets the "struct page" associated with a pte.
  *
- * A raw VM_PFNMAP mapping (ie. one that is not COWed) may not have any "struct
- * page" backing, and even if they do, they are not refcounted. COWed pages of
- * a VM_PFNMAP do always have a struct page, and they are normally refcounted
- * (they are _normal_ pages).
+ * "Special" mappings do not wish to be associated with a "struct page" (either
+ * it doesn't exist, or it exists but they don't want to touch it). In this
+ * case, NULL is returned here. "Normal" mappings do have a struct page.
  *
- * So a raw PFNMAP mapping will have each page table entry just pointing
- * to a page frame number, and as far as the VM layer is concerned, those do
- * not have pages associated with them - even if the PFN might point to memory
- * that otherwise is perfectly fine and has a "struct page".
+ * There are 2 broad cases. Firstly, an architecture may define a pte_special()
+ * pte bit, in which case this function is trivial. Secondly, an architecture
+ * may not have a spare pte bit, which requires a more complicated scheme,
+ * described below.
+ *
+ * A raw VM_PFNMAP mapping (ie. one that is not COWed) is always considered a
+ * special mapping (even if there are underlying and valid "struct pages").
+ * COWed pages of a VM_PFNMAP are always normal.
  *
  * The way we recognize COWed pages within VM_PFNMAP mappings is through the
  * rules set up by "remap_pfn_range()": the vma will have the VM_PFNMAP bit
- * set, and the vm_pgoff will point to the first PFN mapped: thus every
- * page that is a raw mapping will always honor the rule
+ * set, and the vm_pgoff will point to the first PFN mapped: thus every special
+ * mapping will always honor the rule
  *
  *     pfn_of_page == vma->vm_pgoff + ((addr - vma->vm_start) >> PAGE_SHIFT)
  *
- * A call to vm_normal_page() will return NULL for such a page.
+ * And for normal mappings this is false.
+ *
+ * This restricts such mappings to be a linear translation from virtual address
+ * to pfn. To get around this restriction, we allow arbitrary mappings so long
+ * as the vma is not a COW mapping; in that case, we know that all ptes are
+ * special (because none can have been COWed).
  *
- * If the page doesn't follow the "remap_pfn_range()" rule in a VM_PFNMAP
- * then the page has been COW'ed.  A COW'ed page _does_ have a "struct page"
- * associated with it even if it is in a VM_PFNMAP range.  Calling
- * vm_normal_page() on such a page will therefore return the "struct page".
  *
+ * In order to support COW of arbitrary special mappings, we have VM_MIXEDMAP.
  *
  * VM_MIXEDMAP mappings can likewise contain memory with or without "struct
  * page" backing, however the difference is that _all_ pages with a struct
@@ -407,24 +483,35 @@ static inline int is_cow_mapping(unsigned int flags)
  * advantage is that we don't have to follow the strict linearity rule of
  * PFNMAP mappings in order to support COWable mappings.
  *
- * A call to vm_normal_page() with a VM_MIXEDMAP mapping will return the
- * associated "struct page" or NULL for memory not backed by a "struct page".
- *
- *
- * All other mappings should have a valid struct page, which will be
- * returned by a call to vm_normal_page().
  */
-struct page *vm_normal_page(struct vm_area_struct *vma, unsigned long addr, pte_t pte)
+#ifdef __HAVE_ARCH_PTE_SPECIAL
+# define HAVE_PTE_SPECIAL 1
+#else
+# define HAVE_PTE_SPECIAL 0
+#endif
+struct page *vm_normal_page(struct vm_area_struct *vma, unsigned long addr,
+                               pte_t pte)
 {
        unsigned long pfn = pte_pfn(pte);
 
+       if (HAVE_PTE_SPECIAL) {
+               if (likely(!pte_special(pte)))
+                       goto check_pfn;
+               if (!(vma->vm_flags & (VM_PFNMAP | VM_MIXEDMAP)))
+                       print_bad_pte(vma, addr, pte, NULL);
+               return NULL;
+       }
+
+       /* !HAVE_PTE_SPECIAL case follows: */
+
        if (unlikely(vma->vm_flags & (VM_PFNMAP|VM_MIXEDMAP))) {
                if (vma->vm_flags & VM_MIXEDMAP) {
                        if (!pfn_valid(pfn))
                                return NULL;
                        goto out;
                } else {
-                       unsigned long off = (addr-vma->vm_start) >> PAGE_SHIFT;
+                       unsigned long off;
+                       off = (addr - vma->vm_start) >> PAGE_SHIFT;
                        if (pfn == vma->vm_pgoff + off)
                                return NULL;
                        if (!is_cow_mapping(vma->vm_flags))
@@ -432,25 +519,15 @@ struct page *vm_normal_page(struct vm_area_struct *vma, unsigned long addr, pte_
                }
        }
 
-#ifdef CONFIG_DEBUG_VM
-       /*
-        * Add some anal sanity checks for now. Eventually,
-        * we should just do "return pfn_to_page(pfn)", but
-        * in the meantime we check that we get a valid pfn,
-        * and that the resulting page looks ok.
-        */
-       if (unlikely(!pfn_valid(pfn))) {
-               print_bad_pte(vma, pte, addr);
+check_pfn:
+       if (unlikely(pfn > highest_memmap_pfn)) {
+               print_bad_pte(vma, addr, pte, NULL);
                return NULL;
        }
-#endif
 
        /*
-        * NOTE! We still have PageReserved() pages in the page 
-        * tables. 
-        *
-        * The PAGE_ZERO() pages and various VDSO mappings can
-        * cause them to exist.
+        * NOTE! We still have PageReserved() pages in the page tables.
+        * eg. VDSO mappings can cause them to exist.
         */
 out:
        return pfn_to_page(pfn);
@@ -627,6 +704,7 @@ int copy_page_range(struct mm_struct *dst_mm, struct mm_struct *src_mm,
        unsigned long next;
        unsigned long addr = vma->vm_start;
        unsigned long end = vma->vm_end;
+       int ret;
 
        /*
         * Don't copy ptes where a page fault will fill them correctly.
@@ -642,17 +720,43 @@ int copy_page_range(struct mm_struct *dst_mm, struct mm_struct *src_mm,
        if (is_vm_hugetlb_page(vma))
                return copy_hugetlb_page_range(dst_mm, src_mm, vma);
 
+       if (unlikely(is_pfn_mapping(vma))) {
+               /*
+                * We do not free on error cases below as remove_vma
+                * gets called on error from higher level routine
+                */
+               ret = track_pfn_vma_copy(vma);
+               if (ret)
+                       return ret;
+       }
+
+       /*
+        * We need to invalidate the secondary MMU mappings only when
+        * there could be a permission downgrade on the ptes of the
+        * parent mm. And a permission downgrade will only happen if
+        * is_cow_mapping() returns true.
+        */
+       if (is_cow_mapping(vma->vm_flags))
+               mmu_notifier_invalidate_range_start(src_mm, addr, end);
+
+       ret = 0;
        dst_pgd = pgd_offset(dst_mm, addr);
        src_pgd = pgd_offset(src_mm, addr);
        do {
                next = pgd_addr_end(addr, end);
                if (pgd_none_or_clear_bad(src_pgd))
                        continue;
-               if (copy_pud_range(dst_mm, src_mm, dst_pgd, src_pgd,
-                                               vma, addr, next))
-                       return -ENOMEM;
+               if (unlikely(copy_pud_range(dst_mm, src_mm, dst_pgd, src_pgd,
+                                           vma, addr, next))) {
+                       ret = -ENOMEM;
+                       break;
+               }
        } while (dst_pgd++, src_pgd++, addr = next, addr != end);
-       return 0;
+
+       if (is_cow_mapping(vma->vm_flags))
+               mmu_notifier_invalidate_range_end(src_mm,
+                                                 vma->vm_start, end);
+       return ret;
 }
 
 static unsigned long zap_pte_range(struct mmu_gather *tlb,
@@ -714,11 +818,14 @@ static unsigned long zap_pte_range(struct mmu_gather *tlb,
                        else {
                                if (pte_dirty(ptent))
                                        set_page_dirty(page);
-                               if (pte_young(ptent))
-                                       SetPageReferenced(page);
+                               if (pte_young(ptent) &&
+                                   likely(!VM_SequentialReadHint(vma)))
+                                       mark_page_accessed(page);
                                file_rss--;
                        }
-                       page_remove_rmap(page, vma);
+                       page_remove_rmap(page);
+                       if (unlikely(page_mapcount(page) < 0))
+                               print_bad_pte(vma, addr, ptent, page);
                        tlb_remove_page(tlb, page);
                        continue;
                }
@@ -728,8 +835,12 @@ static unsigned long zap_pte_range(struct mmu_gather *tlb,
                 */
                if (unlikely(details))
                        continue;
-               if (!pte_file(ptent))
-                       free_swap_and_cache(pte_to_swp_entry(ptent));
+               if (pte_file(ptent)) {
+                       if (unlikely(!(vma->vm_flags & VM_NONLINEAR)))
+                               print_bad_pte(vma, addr, ptent, NULL);
+               } else if
+                 (unlikely(!free_swap_and_cache(pte_to_swp_entry(ptent))))
+                       print_bad_pte(vma, addr, ptent, NULL);
                pte_clear_not_present_full(mm, addr, pte, tlb->fullmm);
        } while (pte++, addr += PAGE_SIZE, (addr != end && *zap_work > 0));
 
@@ -856,7 +967,9 @@ unsigned long unmap_vmas(struct mmu_gather **tlbp,
        unsigned long start = start_addr;
        spinlock_t *i_mmap_lock = details? details->i_mmap_lock: NULL;
        int fullmm = (*tlbp)->fullmm;
+       struct mm_struct *mm = vma->vm_mm;
 
+       mmu_notifier_invalidate_range_start(mm, start_addr, end_addr);
        for ( ; vma && vma->vm_start < end_addr; vma = vma->vm_next) {
                unsigned long end;
 
@@ -870,6 +983,9 @@ unsigned long unmap_vmas(struct mmu_gather **tlbp,
                if (vma->vm_flags & VM_ACCOUNT)
                        *nr_accounted += (end - start) >> PAGE_SHIFT;
 
+               if (unlikely(is_pfn_mapping(vma)))
+                       untrack_pfn_vma(vma, 0, 0);
+
                while (start != end) {
                        if (!tlb_start_valid) {
                                tlb_start = start;
@@ -877,9 +993,23 @@ unsigned long unmap_vmas(struct mmu_gather **tlbp,
                        }
 
                        if (unlikely(is_vm_hugetlb_page(vma))) {
-                               unmap_hugepage_range(vma, start, end);
-                               zap_work -= (end - start) /
-                                               (HPAGE_SIZE / PAGE_SIZE);
+                               /*
+                                * It is undesirable to test vma->vm_file as it
+                                * should be non-null for valid hugetlb area.
+                                * However, vm_file will be NULL in the error
+                                * cleanup path of do_mmap_pgoff. When
+                                * hugetlbfs ->mmap method fails,
+                                * do_mmap_pgoff() nullifies vma->vm_file
+                                * before calling this function to clean up.
+                                * Since no pte has actually been setup, it is
+                                * safe to do nothing in this case.
+                                */
+                               if (vma->vm_file) {
+                                       unmap_hugepage_range(vma, start, end, NULL);
+                                       zap_work -= (end - start) /
+                                       pages_per_huge_page(hstate_vma(vma));
+                               }
+
                                start = end;
                        } else
                                start = unmap_page_range(*tlbp, vma,
@@ -907,6 +1037,7 @@ unsigned long unmap_vmas(struct mmu_gather **tlbp,
                }
        }
 out:
+       mmu_notifier_invalidate_range_end(mm, start_addr, end_addr);
        return start;   /* which is now the end (or restart) address */
 }
 
@@ -934,6 +1065,29 @@ unsigned long zap_page_range(struct vm_area_struct *vma, unsigned long address,
        return end;
 }
 
+/**
+ * zap_vma_ptes - remove ptes mapping the vma
+ * @vma: vm_area_struct holding ptes to be zapped
+ * @address: starting address of pages to zap
+ * @size: number of bytes to zap
+ *
+ * This function only unmaps ptes assigned to VM_PFNMAP vmas.
+ *
+ * The entire address range must be fully contained within the vma.
+ *
+ * Returns 0 if successful.
+ */
+int zap_vma_ptes(struct vm_area_struct *vma, unsigned long address,
+               unsigned long size)
+{
+       if (address < vma->vm_start || address + size > vma->vm_end ||
+                       !(vma->vm_flags & VM_PFNMAP))
+               return -1;
+       zap_page_range(vma, address, size, NULL);
+       return 0;
+}
+EXPORT_SYMBOL_GPL(zap_vma_ptes);
+
 /*
  * Do a quick page-table lookup for a single page.
  */
@@ -960,31 +1114,37 @@ struct page *follow_page(struct vm_area_struct *vma, unsigned long address,
                goto no_page_table;
 
        pud = pud_offset(pgd, address);
-       if (pud_none(*pud) || unlikely(pud_bad(*pud)))
+       if (pud_none(*pud))
                goto no_page_table;
-       
-       pmd = pmd_offset(pud, address);
-       if (pmd_none(*pmd) || unlikely(pmd_bad(*pmd)))
+       if (pud_huge(*pud)) {
+               BUG_ON(flags & FOLL_GET);
+               page = follow_huge_pud(mm, address, pud, flags & FOLL_WRITE);
+               goto out;
+       }
+       if (unlikely(pud_bad(*pud)))
                goto no_page_table;
 
+       pmd = pmd_offset(pud, address);
+       if (pmd_none(*pmd))
+               goto no_page_table;
        if (pmd_huge(*pmd)) {
                BUG_ON(flags & FOLL_GET);
                page = follow_huge_pmd(mm, address, pmd, flags & FOLL_WRITE);
                goto out;
        }
+       if (unlikely(pmd_bad(*pmd)))
+               goto no_page_table;
 
        ptep = pte_offset_map_lock(mm, pmd, address, &ptl);
-       if (!ptep)
-               goto out;
 
        pte = *ptep;
        if (!pte_present(pte))
-               goto unlock;
+               goto no_page;
        if ((flags & FOLL_WRITE) && !pte_write(pte))
                goto unlock;
        page = vm_normal_page(vma, address, pte);
        if (unlikely(!page))
-               goto unlock;
+               goto bad_page;
 
        if (flags & FOLL_GET)
                get_page(page);
@@ -992,6 +1152,11 @@ struct page *follow_page(struct vm_area_struct *vma, unsigned long address,
                if ((flags & FOLL_WRITE) &&
                    !pte_dirty(pte) && !PageDirty(page))
                        set_page_dirty(page);
+               /*
+                * pte_mkyoung() would be more correct here, but atomic care
+                * is needed to avoid losing the dirty bit: it is easier to use
+                * mark_page_accessed().
+                */
                mark_page_accessed(page);
        }
 unlock:
@@ -999,6 +1164,15 @@ unlock:
 out:
        return page;
 
+bad_page:
+       pte_unmap_unlock(ptep, ptl);
+       return ERR_PTR(-EFAULT);
+
+no_page:
+       pte_unmap_unlock(ptep, ptl);
+       if (!pte_none(pte))
+               return page;
+       /* Fall through to ZERO_PAGE handling */
 no_page_table:
        /*
         * When core dumping an enormous anonymous area that nobody
@@ -1013,14 +1187,38 @@ no_page_table:
        return page;
 }
 
-int get_user_pages(struct task_struct *tsk, struct mm_struct *mm,
-               unsigned long start, int len, int write, int force,
-               struct page **pages, struct vm_area_struct **vmas)
+/* Can we do the FOLL_ANON optimization? */
+static inline int use_zero_page(struct vm_area_struct *vma)
+{
+       /*
+        * We don't want to optimize FOLL_ANON for make_pages_present()
+        * when it tries to page in a VM_LOCKED region. As to VM_SHARED,
+        * we want to get the page from the page tables to make sure
+        * that we serialize and update with any other user of that
+        * mapping.
+        */
+       if (vma->vm_flags & (VM_LOCKED | VM_SHARED))
+               return 0;
+       /*
+        * And if we have a fault routine, it's not an anonymous region.
+        */
+       return !vma->vm_ops || !vma->vm_ops->fault;
+}
+
+
+
+int __get_user_pages(struct task_struct *tsk, struct mm_struct *mm,
+                    unsigned long start, int nr_pages, int flags,
+                    struct page **pages, struct vm_area_struct **vmas)
 {
        int i;
-       unsigned int vm_flags;
+       unsigned int vm_flags = 0;
+       int write = !!(flags & GUP_FLAGS_WRITE);
+       int force = !!(flags & GUP_FLAGS_FORCE);
+       int ignore = !!(flags & GUP_FLAGS_IGNORE_VMA_PERMISSIONS);
+       int ignore_sigkill = !!(flags & GUP_FLAGS_IGNORE_SIGKILL);
 
-       if (len <= 0)
+       if (nr_pages <= 0)
                return 0;
        /* 
         * Require read or write permissions.
@@ -1042,7 +1240,9 @@ int get_user_pages(struct task_struct *tsk, struct mm_struct *mm,
                        pud_t *pud;
                        pmd_t *pmd;
                        pte_t *pte;
-                       if (write) /* user gate pages are read-only */
+
+                       /* user gate pages are read-only */
+                       if (!ignore && write)
                                return i ? : -EFAULT;
                        if (pg > TASK_SIZE)
                                pgd = pgd_offset_k(pg);
@@ -1070,37 +1270,40 @@ int get_user_pages(struct task_struct *tsk, struct mm_struct *mm,
                                vmas[i] = gate_vma;
                        i++;
                        start += PAGE_SIZE;
-                       len--;
+                       nr_pages--;
                        continue;
                }
 
-               if (!vma || (vma->vm_flags & (VM_IO | VM_PFNMAP))
-                               || !(vm_flags & vma->vm_flags))
+               if (!vma ||
+                   (vma->vm_flags & (VM_IO | VM_PFNMAP)) ||
+                   (!ignore && !(vm_flags & vma->vm_flags)))
                        return i ? : -EFAULT;
 
                if (is_vm_hugetlb_page(vma)) {
                        i = follow_hugetlb_page(mm, vma, pages, vmas,
-                                               &start, &len, i, write);
+                                               &start, &nr_pages, i, write);
                        continue;
                }
 
                foll_flags = FOLL_TOUCH;
                if (pages)
                        foll_flags |= FOLL_GET;
-               if (!write && !(vma->vm_flags & VM_LOCKED) &&
-                   (!vma->vm_ops || !vma->vm_ops->fault))
+               if (!write && use_zero_page(vma))
                        foll_flags |= FOLL_ANON;
 
                do {
                        struct page *page;
 
                        /*
-                        * If tsk is ooming, cut off its access to large memory
-                        * allocations. It has a pending SIGKILL, but it can't
-                        * be processed until returning to user space.
+                        * If we have a pending SIGKILL, don't keep faulting
+                        * pages and potentially allocating memory, unless
+                        * current is handling munlock--e.g., on exit. In
+                        * that case, we are not allocating memory.  Rather,
+                        * we're only unlocking already resident/mapped pages.
                         */
-                       if (unlikely(test_tsk_thread_flag(tsk, TIF_MEMDIE)))
-                               return -ENOMEM;
+                       if (unlikely(!ignore_sigkill &&
+                                       fatal_signal_pending(current)))
+                               return i ? i : -ERESTARTSYS;
 
                        if (write)
                                foll_flags |= FOLL_WRITE;
@@ -1108,12 +1311,16 @@ int get_user_pages(struct task_struct *tsk, struct mm_struct *mm,
                        cond_resched();
                        while (!(page = follow_page(vma, start, foll_flags))) {
                                int ret;
+
                                ret = handle_mm_fault(mm, vma, start,
-                                               foll_flags & FOLL_WRITE);
+                                       (foll_flags & FOLL_WRITE) ?
+                                       FAULT_FLAG_WRITE : 0);
+
                                if (ret & VM_FAULT_ERROR) {
                                        if (ret & VM_FAULT_OOM)
                                                return i ? i : -ENOMEM;
-                                       else if (ret & VM_FAULT_SIGBUS)
+                                       if (ret &
+                                           (VM_FAULT_HWPOISON|VM_FAULT_SIGBUS))
                                                return i ? i : -EFAULT;
                                        BUG();
                                }
@@ -1127,13 +1334,21 @@ int get_user_pages(struct task_struct *tsk, struct mm_struct *mm,
                                 * do_wp_page has broken COW when necessary,
                                 * even if maybe_mkwrite decided not to set
                                 * pte_write. We can thus safely do subsequent
-                                * page lookups as if they were reads.
+                                * page lookups as if they were reads. But only
+                                * do so when looping for pte_write is futile:
+                                * in some cases userspace may also be wanting
+                                * to write to the gotten user page, which a
+                                * read fault here might prevent (a readonly
+                                * page might get reCOWed by userspace write).
                                 */
-                               if (ret & VM_FAULT_WRITE)
+                               if ((ret & VM_FAULT_WRITE) &&
+                                   !(vma->vm_flags & VM_WRITE))
                                        foll_flags &= ~FOLL_WRITE;
 
                                cond_resched();
                        }
+                       if (IS_ERR(page))
+                               return i ? i : PTR_ERR(page);
                        if (pages) {
                                pages[i] = page;
 
@@ -1144,11 +1359,76 @@ int get_user_pages(struct task_struct *tsk, struct mm_struct *mm,
                                vmas[i] = vma;
                        i++;
                        start += PAGE_SIZE;
-                       len--;
-               } while (len && start < vma->vm_end);
-       } while (len);
+                       nr_pages--;
+               } while (nr_pages && start < vma->vm_end);
+       } while (nr_pages);
        return i;
 }
+
+/**
+ * get_user_pages() - pin user pages in memory
+ * @tsk:       task_struct of target task
+ * @mm:                mm_struct of target mm
+ * @start:     starting user address
+ * @nr_pages:  number of pages from start to pin
+ * @write:     whether pages will be written to by the caller
+ * @force:     whether to force write access even if user mapping is
+ *             readonly. This will result in the page being COWed even
+ *             in MAP_SHARED mappings. You do not want this.
+ * @pages:     array that receives pointers to the pages pinned.
+ *             Should be at least nr_pages long. Or NULL, if caller
+ *             only intends to ensure the pages are faulted in.
+ * @vmas:      array of pointers to vmas corresponding to each page.
+ *             Or NULL if the caller does not require them.
+ *
+ * Returns number of pages pinned. This may be fewer than the number
+ * requested. If nr_pages is 0 or negative, returns 0. If no pages
+ * were pinned, returns -errno. Each page returned must be released
+ * with a put_page() call when it is finished with. vmas will only
+ * remain valid while mmap_sem is held.
+ *
+ * Must be called with mmap_sem held for read or write.
+ *
+ * get_user_pages walks a process's page tables and takes a reference to
+ * each struct page that each user address corresponds to at a given
+ * instant. That is, it takes the page that would be accessed if a user
+ * thread accesses the given user virtual address at that instant.
+ *
+ * This does not guarantee that the page exists in the user mappings when
+ * get_user_pages returns, and there may even be a completely different
+ * page there in some cases (eg. if mmapped pagecache has been invalidated
+ * and subsequently re faulted). However it does guarantee that the page
+ * won't be freed completely. And mostly callers simply care that the page
+ * contains data that was valid *at some point in time*. Typically, an IO
+ * or similar operation cannot guarantee anything stronger anyway because
+ * locks can't be held over the syscall boundary.
+ *
+ * If write=0, the page must not be written to. If the page is written to,
+ * set_page_dirty (or set_page_dirty_lock, as appropriate) must be called
+ * after the page is finished with, and before put_page is called.
+ *
+ * get_user_pages is typically used for fewer-copy IO operations, to get a
+ * handle on the memory by some means other than accesses via the user virtual
+ * addresses. The pages may be submitted for DMA to devices or accessed via
+ * their kernel linear mapping (via the kmap APIs). Care should be taken to
+ * use the correct cache flushing APIs.
+ *
+ * See also get_user_pages_fast, for performance critical applications.
+ */
+int get_user_pages(struct task_struct *tsk, struct mm_struct *mm,
+               unsigned long start, int nr_pages, int write, int force,
+               struct page **pages, struct vm_area_struct **vmas)
+{
+       int flags = 0;
+
+       if (write)
+               flags |= GUP_FLAGS_WRITE;
+       if (force)
+               flags |= GUP_FLAGS_FORCE;
+
+       return __get_user_pages(tsk, mm, start, nr_pages, flags, pages, vmas);
+}
+
 EXPORT_SYMBOL(get_user_pages);
 
 pte_t *get_locked_pte(struct mm_struct *mm, unsigned long addr,
@@ -1171,24 +1451,22 @@ pte_t *get_locked_pte(struct mm_struct *mm, unsigned long addr,
  * old drivers should use this, and they needed to mark their
  * pages reserved for the old functions anyway.
  */
-static int insert_page(struct mm_struct *mm, unsigned long addr, struct page *page, pgprot_t prot)
+static int insert_page(struct vm_area_struct *vma, unsigned long addr,
+                       struct page *page, pgprot_t prot)
 {
+       struct mm_struct *mm = vma->vm_mm;
        int retval;
        pte_t *pte;
        spinlock_t *ptl;
 
-       retval = mem_cgroup_charge(page, mm, GFP_KERNEL);
-       if (retval)
-               goto out;
-
        retval = -EINVAL;
        if (PageAnon(page))
-               goto out_uncharge;
+               goto out;
        retval = -ENOMEM;
        flush_dcache_page(page);
        pte = get_locked_pte(mm, addr, &ptl);
        if (!pte)
-               goto out_uncharge;
+               goto out;
        retval = -EBUSY;
        if (!pte_none(*pte))
                goto out_unlock;
@@ -1204,8 +1482,6 @@ static int insert_page(struct mm_struct *mm, unsigned long addr, struct page *pa
        return retval;
 out_unlock:
        pte_unmap_unlock(pte, ptl);
-out_uncharge:
-       mem_cgroup_uncharge_page(page);
 out:
        return retval;
 }
@@ -1232,17 +1508,46 @@ out:
  *
  * The page does not need to be reserved.
  */
-int vm_insert_page(struct vm_area_struct *vma, unsigned long addr, struct page *page)
+int vm_insert_page(struct vm_area_struct *vma, unsigned long addr,
+                       struct page *page)
 {
        if (addr < vma->vm_start || addr >= vma->vm_end)
                return -EFAULT;
        if (!page_count(page))
                return -EINVAL;
        vma->vm_flags |= VM_INSERTPAGE;
-       return insert_page(vma->vm_mm, addr, page, vma->vm_page_prot);
+       return insert_page(vma, addr, page, vma->vm_page_prot);
 }
 EXPORT_SYMBOL(vm_insert_page);
 
+static int insert_pfn(struct vm_area_struct *vma, unsigned long addr,
+                       unsigned long pfn, pgprot_t prot)
+{
+       struct mm_struct *mm = vma->vm_mm;
+       int retval;
+       pte_t *pte, entry;
+       spinlock_t *ptl;
+
+       retval = -ENOMEM;
+       pte = get_locked_pte(mm, addr, &ptl);
+       if (!pte)
+               goto out;
+       retval = -EBUSY;
+       if (!pte_none(*pte))
+               goto out_unlock;
+
+       /* Ok, finally just insert the thing.. */
+       entry = pte_mkspecial(pfn_pte(pfn, prot));
+       set_pte_at(mm, addr, pte, entry);
+       update_mmu_cache(vma, addr, entry); /* XXX: why not for insert_page? */
+
+       retval = 0;
+out_unlock:
+       pte_unmap_unlock(pte, ptl);
+out:
+       return retval;
+}
+
 /**
  * vm_insert_pfn - insert single pfn into user vma
  * @vma: user vma to map to
@@ -1254,43 +1559,67 @@ EXPORT_SYMBOL(vm_insert_page);
  *
  * This function should only be called from a vm_ops->fault handler, and
  * in that case the handler should return NULL.
+ *
+ * vma cannot be a COW mapping.
+ *
+ * As this is called only for pages that do not currently exist, we
+ * do not need to flush old virtual caches or the TLB.
  */
 int vm_insert_pfn(struct vm_area_struct *vma, unsigned long addr,
-               unsigned long pfn)
+                       unsigned long pfn)
 {
-       struct mm_struct *mm = vma->vm_mm;
-       int retval;
-       pte_t *pte, entry;
-       spinlock_t *ptl;
-
+       int ret;
+       pgprot_t pgprot = vma->vm_page_prot;
+       /*
+        * Technically, architectures with pte_special can avoid all these
+        * restrictions (same for remap_pfn_range).  However we would like
+        * consistency in testing and feature parity among all, so we should
+        * try to keep these invariants in place for everybody.
+        */
        BUG_ON(!(vma->vm_flags & (VM_PFNMAP|VM_MIXEDMAP)));
        BUG_ON((vma->vm_flags & (VM_PFNMAP|VM_MIXEDMAP)) ==
                                                (VM_PFNMAP|VM_MIXEDMAP));
        BUG_ON((vma->vm_flags & VM_PFNMAP) && is_cow_mapping(vma->vm_flags));
        BUG_ON((vma->vm_flags & VM_MIXEDMAP) && pfn_valid(pfn));
 
-       retval = -ENOMEM;
-       pte = get_locked_pte(mm, addr, &ptl);
-       if (!pte)
-               goto out;
-       retval = -EBUSY;
-       if (!pte_none(*pte))
-               goto out_unlock;
+       if (addr < vma->vm_start || addr >= vma->vm_end)
+               return -EFAULT;
+       if (track_pfn_vma_new(vma, &pgprot, pfn, PAGE_SIZE))
+               return -EINVAL;
 
-       /* Ok, finally just insert the thing.. */
-       entry = pfn_pte(pfn, vma->vm_page_prot);
-       set_pte_at(mm, addr, pte, entry);
-       update_mmu_cache(vma, addr, entry);
+       ret = insert_pfn(vma, addr, pfn, pgprot);
 
-       retval = 0;
-out_unlock:
-       pte_unmap_unlock(pte, ptl);
+       if (ret)
+               untrack_pfn_vma(vma, pfn, PAGE_SIZE);
 
-out:
-       return retval;
+       return ret;
 }
 EXPORT_SYMBOL(vm_insert_pfn);
 
+int vm_insert_mixed(struct vm_area_struct *vma, unsigned long addr,
+                       unsigned long pfn)
+{
+       BUG_ON(!(vma->vm_flags & VM_MIXEDMAP));
+
+       if (addr < vma->vm_start || addr >= vma->vm_end)
+               return -EFAULT;
+
+       /*
+        * If we don't have pte special, then we have to use the pfn_valid()
+        * based VM_MIXEDMAP scheme (see vm_normal_page), and thus we *must*
+        * refcount the page if pfn_valid is true (hence insert_page rather
+        * than insert_pfn).
+        */
+       if (!HAVE_PTE_SPECIAL && pfn_valid(pfn)) {
+               struct page *page;
+
+               page = pfn_to_page(pfn);
+               return insert_page(vma, addr, page, vma->vm_page_prot);
+       }
+       return insert_pfn(vma, addr, pfn, vma->vm_page_prot);
+}
+EXPORT_SYMBOL(vm_insert_mixed);
+
 /*
  * maps a range of physical memory into the requested pages. the old
  * mappings are removed. any references to nonexistent pages results
@@ -1309,7 +1638,7 @@ static int remap_pte_range(struct mm_struct *mm, pmd_t *pmd,
        arch_enter_lazy_mmu_mode();
        do {
                BUG_ON(!pte_none(*pte));
-               set_pte_at(mm, addr, pte, pfn_pte(pfn, prot));
+               set_pte_at(mm, addr, pte, pte_mkspecial(pfn_pte(pfn, prot)));
                pfn++;
        } while (pte++, addr += PAGE_SIZE, addr != end);
        arch_leave_lazy_mmu_mode();
@@ -1394,14 +1723,25 @@ int remap_pfn_range(struct vm_area_struct *vma, unsigned long addr,
         * behaviour that some programs depend on. We mark the "original"
         * un-COW'ed pages by matching them up with "vma->vm_pgoff".
         */
-       if (is_cow_mapping(vma->vm_flags)) {
-               if (addr != vma->vm_start || end != vma->vm_end)
-                       return -EINVAL;
+       if (addr == vma->vm_start && end == vma->vm_end) {
                vma->vm_pgoff = pfn;
-       }
+               vma->vm_flags |= VM_PFN_AT_MMAP;
+       } else if (is_cow_mapping(vma->vm_flags))
+               return -EINVAL;
 
        vma->vm_flags |= VM_IO | VM_RESERVED | VM_PFNMAP;
 
+       err = track_pfn_vma_new(vma, &prot, pfn, PAGE_ALIGN(size));
+       if (err) {
+               /*
+                * To indicate that track_pfn related cleanup is not
+                * needed from higher level routine calling unmap_vmas
+                */
+               vma->vm_flags &= ~(VM_IO | VM_RESERVED | VM_PFNMAP);
+               vma->vm_flags &= ~VM_PFN_AT_MMAP;
+               return -EINVAL;
+       }
+
        BUG_ON(addr >= end);
        pfn -= addr >> PAGE_SHIFT;
        pgd = pgd_offset(mm, addr);
@@ -1413,6 +1753,10 @@ int remap_pfn_range(struct vm_area_struct *vma, unsigned long addr,
                if (err)
                        break;
        } while (pgd++, addr = next, addr != end);
+
+       if (err)
+               untrack_pfn_vma(vma, pfn, PAGE_ALIGN(size));
+
        return err;
 }
 EXPORT_SYMBOL(remap_pfn_range);
@@ -1434,6 +1778,8 @@ static int apply_to_pte_range(struct mm_struct *mm, pmd_t *pmd,
 
        BUG_ON(pmd_huge(*pmd));
 
+       arch_enter_lazy_mmu_mode();
+
        token = pmd_pgtable(*pmd);
 
        do {
@@ -1442,6 +1788,8 @@ static int apply_to_pte_range(struct mm_struct *mm, pmd_t *pmd,
                        break;
        } while (pte++, addr += PAGE_SIZE, addr != end);
 
+       arch_leave_lazy_mmu_mode();
+
        if (mm != &init_mm)
                pte_unmap_unlock(pte-1, ptl);
        return err;
@@ -1455,6 +1803,8 @@ static int apply_to_pmd_range(struct mm_struct *mm, pud_t *pud,
        unsigned long next;
        int err;
 
+       BUG_ON(pud_huge(*pud));
+
        pmd = pmd_alloc(mm, pud, addr);
        if (!pmd)
                return -ENOMEM;
@@ -1496,10 +1846,11 @@ int apply_to_page_range(struct mm_struct *mm, unsigned long addr,
 {
        pgd_t *pgd;
        unsigned long next;
-       unsigned long end = addr + size;
+       unsigned long start = addr, end = addr + size;
        int err;
 
        BUG_ON(addr >= end);
+       mmu_notifier_invalidate_range_start(mm, start, end);
        pgd = pgd_offset(mm, addr);
        do {
                next = pgd_addr_end(addr, end);
@@ -1507,6 +1858,7 @@ int apply_to_page_range(struct mm_struct *mm, unsigned long addr,
                if (err)
                        break;
        } while (pgd++, addr = next, addr != end);
+       mmu_notifier_invalidate_range_end(mm, start, end);
        return err;
 }
 EXPORT_SYMBOL_GPL(apply_to_page_range);
@@ -1604,18 +1956,40 @@ static int do_wp_page(struct mm_struct *mm, struct vm_area_struct *vma,
        struct page *dirty_page = NULL;
 
        old_page = vm_normal_page(vma, address, orig_pte);
-       if (!old_page)
+       if (!old_page) {
+               /*
+                * VM_MIXEDMAP !pfn_valid() case
+                *
+                * We should not cow pages in a shared writeable mapping.
+                * Just mark the pages writable as we can't do any dirty
+                * accounting on raw pfn maps.
+                */
+               if ((vma->vm_flags & (VM_WRITE|VM_SHARED)) ==
+                                    (VM_WRITE|VM_SHARED))
+                       goto reuse;
                goto gotten;
+       }
 
        /*
         * Take out anonymous pages first, anonymous shared vmas are
         * not dirty accountable.
         */
        if (PageAnon(old_page)) {
-               if (!TestSetPageLocked(old_page)) {
-                       reuse = can_share_swap_page(old_page);
-                       unlock_page(old_page);
+               if (!trylock_page(old_page)) {
+                       page_cache_get(old_page);
+                       pte_unmap_unlock(page_table, ptl);
+                       lock_page(old_page);
+                       page_table = pte_offset_map_lock(mm, pmd, address,
+                                                        &ptl);
+                       if (!pte_same(*page_table, orig_pte)) {
+                               unlock_page(old_page);
+                               page_cache_release(old_page);
+                               goto unlock;
+                       }
+                       page_cache_release(old_page);
                }
+               reuse = reuse_swap_page(old_page);
+               unlock_page(old_page);
        } else if (unlikely((vma->vm_flags & (VM_WRITE|VM_SHARED)) ==
                                        (VM_WRITE|VM_SHARED))) {
                /*
@@ -1624,6 +1998,15 @@ static int do_wp_page(struct mm_struct *mm, struct vm_area_struct *vma,
                 * get_user_pages(.write=1, .force=1).
                 */
                if (vma->vm_ops && vma->vm_ops->page_mkwrite) {
+                       struct vm_fault vmf;
+                       int tmp;
+
+                       vmf.virtual_address = (void __user *)(address &
+                                                               PAGE_MASK);
+                       vmf.pgoff = old_page->index;
+                       vmf.flags = FAULT_FLAG_WRITE|FAULT_FLAG_MKWRITE;
+                       vmf.page = old_page;
+
                        /*
                         * Notify the address space that the page is about to
                         * become writable so that it can prohibit this or wait
@@ -1635,8 +2018,21 @@ static int do_wp_page(struct mm_struct *mm, struct vm_area_struct *vma,
                        page_cache_get(old_page);
                        pte_unmap_unlock(page_table, ptl);
 
-                       if (vma->vm_ops->page_mkwrite(vma, old_page) < 0)
+                       tmp = vma->vm_ops->page_mkwrite(vma, &vmf);
+                       if (unlikely(tmp &
+                                       (VM_FAULT_ERROR | VM_FAULT_NOPAGE))) {
+                               ret = tmp;
                                goto unwritable_page;
+                       }
+                       if (unlikely(!(tmp & VM_FAULT_LOCKED))) {
+                               lock_page(old_page);
+                               if (!old_page->mapping) {
+                                       ret = 0; /* retry the fault */
+                                       unlock_page(old_page);
+                                       goto unwritable_page;
+                               }
+                       } else
+                               VM_BUG_ON(!PageLocked(old_page));
 
                        /*
                         * Since we dropped the lock we need to revalidate
@@ -1646,9 +2042,11 @@ static int do_wp_page(struct mm_struct *mm, struct vm_area_struct *vma,
                         */
                        page_table = pte_offset_map_lock(mm, pmd, address,
                                                         &ptl);
-                       page_cache_release(old_page);
-                       if (!pte_same(*page_table, orig_pte))
+                       if (!pte_same(*page_table, orig_pte)) {
+                               unlock_page(old_page);
+                               page_cache_release(old_page);
                                goto unlock;
+                       }
 
                        page_mkwrite = 1;
                }
@@ -1658,6 +2056,7 @@ static int do_wp_page(struct mm_struct *mm, struct vm_area_struct *vma,
        }
 
        if (reuse) {
+reuse:
                flush_cache_page(vma, address, pte_pfn(orig_pte));
                entry = pte_mkyoung(orig_pte);
                entry = maybe_mkwrite(pte_mkdirty(entry), vma);
@@ -1680,10 +2079,19 @@ gotten:
        new_page = alloc_page_vma(GFP_HIGHUSER_MOVABLE, vma, address);
        if (!new_page)
                goto oom;
+       /*
+        * Don't let another task, with possibly unlocked vma,
+        * keep the mlocked page.
+        */
+       if ((vma->vm_flags & VM_LOCKED) && old_page) {
+               lock_page(old_page);    /* for LRU manipulation */
+               clear_page_mlock(old_page);
+               unlock_page(old_page);
+       }
        cow_user_page(new_page, old_page, address, vma);
        __SetPageUptodate(new_page);
 
-       if (mem_cgroup_charge(new_page, mm, GFP_KERNEL))
+       if (mem_cgroup_newpage_charge(new_page, mm, GFP_KERNEL))
                goto oom_free_new;
 
        /*
@@ -1692,7 +2100,6 @@ gotten:
        page_table = pte_offset_map_lock(mm, pmd, address, &ptl);
        if (likely(pte_same(*page_table, orig_pte))) {
                if (old_page) {
-                       page_remove_rmap(old_page, vma);
                        if (!PageAnon(old_page)) {
                                dec_mm_counter(mm, file_rss);
                                inc_mm_counter(mm, anon_rss);
@@ -1708,11 +2115,35 @@ gotten:
                 * seen in the presence of one thread doing SMC and another
                 * thread doing COW.
                 */
-               ptep_clear_flush(vma, address, page_table);
+               ptep_clear_flush_notify(vma, address, page_table);
+               page_add_new_anon_rmap(new_page, vma, address);
                set_pte_at(mm, address, page_table, entry);
                update_mmu_cache(vma, address, entry);
-               lru_cache_add_active(new_page);
-               page_add_new_anon_rmap(new_page, vma, address);
+               if (old_page) {
+                       /*
+                        * Only after switching the pte to the new page may
+                        * we remove the mapcount here. Otherwise another
+                        * process may come and find the rmap count decremented
+                        * before the pte is switched to the new page, and
+                        * "reuse" the old page writing into it while our pte
+                        * here still points into it and can be read by other
+                        * threads.
+                        *
+                        * The critical issue is to order this
+                        * page_remove_rmap with the ptp_clear_flush above.
+                        * Those stores are ordered by (if nothing else,)
+                        * the barrier present in the atomic_add_negative
+                        * in page_remove_rmap.
+                        *
+                        * Then the TLB flush in ptep_clear_flush ensures that
+                        * no process can access the old page before the
+                        * decremented mapcount is visible. And the old page
+                        * cannot be reused until after the decremented
+                        * mapcount is visible. So transitively, TLBs to
+                        * old page will be flushed before it can be reused.
+                        */
+                       page_remove_rmap(old_page);
+               }
 
                /* Free the old page.. */
                new_page = old_page;
@@ -1727,9 +2158,6 @@ gotten:
 unlock:
        pte_unmap_unlock(page_table, ptl);
        if (dirty_page) {
-               if (vma->vm_file)
-                       file_update_time(vma->vm_file);
-
                /*
                 * Yes, Virginia, this is actually required to prevent a race
                 * with clear_page_dirty_for_io() from clearing the page dirty
@@ -1738,21 +2166,46 @@ unlock:
                 *
                 * do_no_page is protected similarly.
                 */
-               wait_on_page_locked(dirty_page);
-               set_page_dirty_balance(dirty_page, page_mkwrite);
+               if (!page_mkwrite) {
+                       wait_on_page_locked(dirty_page);
+                       set_page_dirty_balance(dirty_page, page_mkwrite);
+               }
                put_page(dirty_page);
+               if (page_mkwrite) {
+                       struct address_space *mapping = dirty_page->mapping;
+
+                       set_page_dirty(dirty_page);
+                       unlock_page(dirty_page);
+                       page_cache_release(dirty_page);
+                       if (mapping)    {
+                               /*
+                                * Some device drivers do not set page.mapping
+                                * but still dirty their pages
+                                */
+                               balance_dirty_pages_ratelimited(mapping);
+                       }
+               }
+
+               /* file_update_time outside page_lock */
+               if (vma->vm_file)
+                       file_update_time(vma->vm_file);
        }
        return ret;
 oom_free_new:
        page_cache_release(new_page);
 oom:
-       if (old_page)
+       if (old_page) {
+               if (page_mkwrite) {
+                       unlock_page(old_page);
+                       page_cache_release(old_page);
+               }
                page_cache_release(old_page);
+       }
        return VM_FAULT_OOM;
 
 unwritable_page:
        page_cache_release(old_page);
-       return VM_FAULT_SIGBUS;
+       return ret;
 }
 
 /*
@@ -2004,7 +2457,7 @@ int vmtruncate(struct inode * inode, loff_t offset)
                unmap_mapping_range(mapping, offset + PAGE_SIZE - 1, 0, 1);
        }
 
-       if (inode->i_op && inode->i_op->truncate)
+       if (inode->i_op->truncate)
                inode->i_op->truncate(inode);
        return 0;
 
@@ -2024,7 +2477,7 @@ int vmtruncate_range(struct inode *inode, loff_t offset, loff_t end)
         * a way to truncate a range of blocks (punch a hole) -
         * we should return failure right now.
         */
-       if (!inode->i_op || !inode->i_op->truncate_range)
+       if (!inode->i_op->truncate_range)
                return -ENOSYS;
 
        mutex_lock(&inode->i_mutex);
@@ -2046,26 +2499,34 @@ int vmtruncate_range(struct inode *inode, loff_t offset, loff_t end)
  */
 static int do_swap_page(struct mm_struct *mm, struct vm_area_struct *vma,
                unsigned long address, pte_t *page_table, pmd_t *pmd,
-               int write_access, pte_t orig_pte)
+               unsigned int flags, pte_t orig_pte)
 {
        spinlock_t *ptl;
        struct page *page;
        swp_entry_t entry;
        pte_t pte;
+       struct mem_cgroup *ptr = NULL;
        int ret = 0;
 
        if (!pte_unmap_same(mm, pmd, page_table, orig_pte))
                goto out;
 
        entry = pte_to_swp_entry(orig_pte);
-       if (is_migration_entry(entry)) {
-               migration_entry_wait(mm, pmd, address);
+       if (unlikely(non_swap_entry(entry))) {
+               if (is_migration_entry(entry)) {
+                       migration_entry_wait(mm, pmd, address);
+               } else if (is_hwpoison_entry(entry)) {
+                       ret = VM_FAULT_HWPOISON;
+               } else {
+                       print_bad_pte(vma, address, orig_pte, NULL);
+                       ret = VM_FAULT_OOM;
+               }
                goto out;
        }
        delayacct_set_flag(DELAYACCT_PF_SWAPIN);
        page = lookup_swap_cache(entry);
        if (!page) {
-               grab_swap_token(); /* Contend for token _before_ read-in */
+               grab_swap_token(mm); /* Contend for token _before_ read-in */
                page = swapin_readahead(entry,
                                        GFP_HIGHUSER_MOVABLE, vma, address);
                if (!page) {
@@ -2083,18 +2544,20 @@ static int do_swap_page(struct mm_struct *mm, struct vm_area_struct *vma,
                /* Had to read the page from swap area: Major fault */
                ret = VM_FAULT_MAJOR;
                count_vm_event(PGMAJFAULT);
-       }
-
-       if (mem_cgroup_charge(page, mm, GFP_KERNEL)) {
+       } else if (PageHWPoison(page)) {
+               ret = VM_FAULT_HWPOISON;
                delayacct_clear_flag(DELAYACCT_PF_SWAPIN);
-               ret = VM_FAULT_OOM;
                goto out;
        }
 
-       mark_page_accessed(page);
        lock_page(page);
        delayacct_clear_flag(DELAYACCT_PF_SWAPIN);
 
+       if (mem_cgroup_try_charge_swapin(mm, page, GFP_KERNEL, &ptr)) {
+               ret = VM_FAULT_OOM;
+               goto out_page;
+       }
+
        /*
         * Back out if somebody else already faulted in this pte.
         */
@@ -2107,25 +2570,38 @@ static int do_swap_page(struct mm_struct *mm, struct vm_area_struct *vma,
                goto out_nomap;
        }
 
-       /* The page isn't present yet, go ahead with the fault. */
+       /*
+        * The page isn't present yet, go ahead with the fault.
+        *
+        * Be careful about the sequence of operations here.
+        * To get its accounting right, reuse_swap_page() must be called
+        * while the page is counted on swap but not yet in mapcount i.e.
+        * before page_add_anon_rmap() and swap_free(); try_to_free_swap()
+        * must be called after the swap_free(), or it will never succeed.
+        * Because delete_from_swap_page() may be called by reuse_swap_page(),
+        * mem_cgroup_commit_charge_swapin() may not be able to find swp_entry
+        * in page->private. In this case, a record in swap_cgroup  is silently
+        * discarded at swap_free().
+        */
 
        inc_mm_counter(mm, anon_rss);
        pte = mk_pte(page, vma->vm_page_prot);
-       if (write_access && can_share_swap_page(page)) {
+       if ((flags & FAULT_FLAG_WRITE) && reuse_swap_page(page)) {
                pte = maybe_mkwrite(pte_mkdirty(pte), vma);
-               write_access = 0;
+               flags &= ~FAULT_FLAG_WRITE;
        }
-
        flush_icache_page(vma, page);
        set_pte_at(mm, address, page_table, pte);
        page_add_anon_rmap(page, vma, address);
+       /* It's better to call commit-charge after rmap is established */
+       mem_cgroup_commit_charge_swapin(page, ptr);
 
        swap_free(entry);
-       if (vm_swap_full())
-               remove_exclusive_swap_page(page);
+       if (vm_swap_full() || (vma->vm_flags & VM_LOCKED) || PageMlocked(page))
+               try_to_free_swap(page);
        unlock_page(page);
 
-       if (write_access) {
+       if (flags & FAULT_FLAG_WRITE) {
                ret |= do_wp_page(mm, vma, address, page_table, pmd, ptl, pte);
                if (ret & VM_FAULT_ERROR)
                        ret &= VM_FAULT_ERROR;
@@ -2139,8 +2615,9 @@ unlock:
 out:
        return ret;
 out_nomap:
-       mem_cgroup_uncharge_page(page);
+       mem_cgroup_cancel_charge_swapin(ptr);
        pte_unmap_unlock(page_table, ptl);
+out_page:
        unlock_page(page);
        page_cache_release(page);
        return ret;
@@ -2153,7 +2630,7 @@ out_nomap:
  */
 static int do_anonymous_page(struct mm_struct *mm, struct vm_area_struct *vma,
                unsigned long address, pte_t *page_table, pmd_t *pmd,
-               int write_access)
+               unsigned int flags)
 {
        struct page *page;
        spinlock_t *ptl;
@@ -2169,7 +2646,7 @@ static int do_anonymous_page(struct mm_struct *mm, struct vm_area_struct *vma,
                goto oom;
        __SetPageUptodate(page);
 
-       if (mem_cgroup_charge(page, mm, GFP_KERNEL))
+       if (mem_cgroup_newpage_charge(page, mm, GFP_KERNEL))
                goto oom_free_page;
 
        entry = mk_pte(page, vma->vm_page_prot);
@@ -2179,7 +2656,6 @@ static int do_anonymous_page(struct mm_struct *mm, struct vm_area_struct *vma,
        if (!pte_none(*page_table))
                goto release;
        inc_mm_counter(mm, anon_rss);
-       lru_cache_add_active(page);
        page_add_new_anon_rmap(page, vma, address);
        set_pte_at(mm, address, page_table, entry);
 
@@ -2220,6 +2696,7 @@ static int __do_fault(struct mm_struct *mm, struct vm_area_struct *vma,
        struct page *page;
        pte_t entry;
        int anon = 0;
+       int charged = 0;
        struct page *dirty_page = NULL;
        struct vm_fault vmf;
        int ret;
@@ -2230,12 +2707,16 @@ static int __do_fault(struct mm_struct *mm, struct vm_area_struct *vma,
        vmf.flags = flags;
        vmf.page = NULL;
 
-       BUG_ON(vma->vm_flags & VM_PFNMAP);
-
        ret = vma->vm_ops->fault(vma, &vmf);
        if (unlikely(ret & (VM_FAULT_ERROR | VM_FAULT_NOPAGE)))
                return ret;
 
+       if (unlikely(PageHWPoison(vmf.page))) {
+               if (ret & VM_FAULT_LOCKED)
+                       unlock_page(vmf.page);
+               return VM_FAULT_HWPOISON;
+       }
+
        /*
         * For consistency in subsequent calls, make the faulted page always
         * locked.
@@ -2262,6 +2743,18 @@ static int __do_fault(struct mm_struct *mm, struct vm_area_struct *vma,
                                ret = VM_FAULT_OOM;
                                goto out;
                        }
+                       if (mem_cgroup_newpage_charge(page, mm, GFP_KERNEL)) {
+                               ret = VM_FAULT_OOM;
+                               page_cache_release(page);
+                               goto out;
+                       }
+                       charged = 1;
+                       /*
+                        * Don't let another task, with possibly unlocked vma,
+                        * keep the mlocked page.
+                        */
+                       if (vma->vm_flags & VM_LOCKED)
+                               clear_page_mlock(vmf.page);
                        copy_user_highpage(page, vmf.page, address, vma);
                        __SetPageUptodate(page);
                } else {
@@ -2271,36 +2764,31 @@ static int __do_fault(struct mm_struct *mm, struct vm_area_struct *vma,
                         * to become writable
                         */
                        if (vma->vm_ops->page_mkwrite) {
+                               int tmp;
+
                                unlock_page(page);
-                               if (vma->vm_ops->page_mkwrite(vma, page) < 0) {
-                                       ret = VM_FAULT_SIGBUS;
-                                       anon = 1; /* no anon but release vmf.page */
-                                       goto out_unlocked;
-                               }
-                               lock_page(page);
-                               /*
-                                * XXX: this is not quite right (racy vs
-                                * invalidate) to unlock and relock the page
-                                * like this, however a better fix requires
-                                * reworking page_mkwrite locking API, which
-                                * is better done later.
-                                */
-                               if (!page->mapping) {
-                                       ret = 0;
-                                       anon = 1; /* no anon but release vmf.page */
-                                       goto out;
+                               vmf.flags = FAULT_FLAG_WRITE|FAULT_FLAG_MKWRITE;
+                               tmp = vma->vm_ops->page_mkwrite(vma, &vmf);
+                               if (unlikely(tmp &
+                                         (VM_FAULT_ERROR | VM_FAULT_NOPAGE))) {
+                                       ret = tmp;
+                                       goto unwritable_page;
                                }
+                               if (unlikely(!(tmp & VM_FAULT_LOCKED))) {
+                                       lock_page(page);
+                                       if (!page->mapping) {
+                                               ret = 0; /* retry the fault */
+                                               unlock_page(page);
+                                               goto unwritable_page;
+                                       }
+                               } else
+                                       VM_BUG_ON(!PageLocked(page));
                                page_mkwrite = 1;
                        }
                }
 
        }
 
-       if (mem_cgroup_charge(page, mm, GFP_KERNEL)) {
-               ret = VM_FAULT_OOM;
-               goto out;
-       }
-
        page_table = pte_offset_map_lock(mm, pmd, address, &ptl);
 
        /*
@@ -2308,7 +2796,7 @@ static int __do_fault(struct mm_struct *mm, struct vm_area_struct *vma,
         * due to the bad i386 page protection. But it's valid
         * for other architectures too.
         *
-        * Note that if write_access is true, we either now have
+        * Note that if FAULT_FLAG_WRITE is set, we either now have
         * an exclusive copy of the page, or this is a shared mapping,
         * so we can make it writable and dirty to avoid having to
         * handle that later.
@@ -2319,11 +2807,9 @@ static int __do_fault(struct mm_struct *mm, struct vm_area_struct *vma,
                entry = mk_pte(page, vma->vm_page_prot);
                if (flags & FAULT_FLAG_WRITE)
                        entry = maybe_mkwrite(pte_mkdirty(entry), vma);
-               set_pte_at(mm, address, page_table, entry);
                if (anon) {
-                        inc_mm_counter(mm, anon_rss);
-                        lru_cache_add_active(page);
-                        page_add_new_anon_rmap(page, vma, address);
+                       inc_mm_counter(mm, anon_rss);
+                       page_add_new_anon_rmap(page, vma, address);
                } else {
                        inc_mm_counter(mm, file_rss);
                        page_add_file_rmap(page);
@@ -2332,11 +2818,13 @@ static int __do_fault(struct mm_struct *mm, struct vm_area_struct *vma,
                                get_page(dirty_page);
                        }
                }
+               set_pte_at(mm, address, page_table, entry);
 
                /* no need to invalidate: a not-present page won't be cached */
                update_mmu_cache(vma, address, entry);
        } else {
-               mem_cgroup_uncharge_page(page);
+               if (charged)
+                       mem_cgroup_uncharge_page(page);
                if (anon)
                        page_cache_release(page);
                else
@@ -2346,86 +2834,48 @@ static int __do_fault(struct mm_struct *mm, struct vm_area_struct *vma,
        pte_unmap_unlock(page_table, ptl);
 
 out:
-       unlock_page(vmf.page);
-out_unlocked:
-       if (anon)
-               page_cache_release(vmf.page);
-       else if (dirty_page) {
-               if (vma->vm_file)
-                       file_update_time(vma->vm_file);
+       if (dirty_page) {
+               struct address_space *mapping = page->mapping;
 
-               set_page_dirty_balance(dirty_page, page_mkwrite);
+               if (set_page_dirty(dirty_page))
+                       page_mkwrite = 1;
+               unlock_page(dirty_page);
                put_page(dirty_page);
+               if (page_mkwrite && mapping) {
+                       /*
+                        * Some device drivers do not set page.mapping but still
+                        * dirty their pages
+                        */
+                       balance_dirty_pages_ratelimited(mapping);
+               }
+
+               /* file_update_time outside page_lock */
+               if (vma->vm_file)
+                       file_update_time(vma->vm_file);
+       } else {
+               unlock_page(vmf.page);
+               if (anon)
+                       page_cache_release(vmf.page);
        }
 
        return ret;
+
+unwritable_page:
+       page_cache_release(page);
+       return ret;
 }
 
 static int do_linear_fault(struct mm_struct *mm, struct vm_area_struct *vma,
                unsigned long address, pte_t *page_table, pmd_t *pmd,
-               int write_access, pte_t orig_pte)
+               unsigned int flags, pte_t orig_pte)
 {
        pgoff_t pgoff = (((address & PAGE_MASK)
                        - vma->vm_start) >> PAGE_SHIFT) + vma->vm_pgoff;
-       unsigned int flags = (write_access ? FAULT_FLAG_WRITE : 0);
 
        pte_unmap(page_table);
        return __do_fault(mm, vma, address, pmd, pgoff, flags, orig_pte);
 }
 
-
-/*
- * do_no_pfn() tries to create a new page mapping for a page without
- * a struct_page backing it
- *
- * As this is called only for pages that do not currently exist, we
- * do not need to flush old virtual caches or the TLB.
- *
- * We enter with non-exclusive mmap_sem (to exclude vma changes,
- * but allow concurrent faults), and pte mapped but not yet locked.
- * We return with mmap_sem still held, but pte unmapped and unlocked.
- *
- * It is expected that the ->nopfn handler always returns the same pfn
- * for a given virtual mapping.
- *
- * Mark this `noinline' to prevent it from bloating the main pagefault code.
- */
-static noinline int do_no_pfn(struct mm_struct *mm, struct vm_area_struct *vma,
-                    unsigned long address, pte_t *page_table, pmd_t *pmd,
-                    int write_access)
-{
-       spinlock_t *ptl;
-       pte_t entry;
-       unsigned long pfn;
-
-       pte_unmap(page_table);
-       BUG_ON(!(vma->vm_flags & (VM_PFNMAP|VM_MIXEDMAP)));
-       BUG_ON((vma->vm_flags & VM_PFNMAP) && is_cow_mapping(vma->vm_flags));
-
-       pfn = vma->vm_ops->nopfn(vma, address & PAGE_MASK);
-
-       BUG_ON((vma->vm_flags & VM_MIXEDMAP) && pfn_valid(pfn));
-
-       if (unlikely(pfn == NOPFN_OOM))
-               return VM_FAULT_OOM;
-       else if (unlikely(pfn == NOPFN_SIGBUS))
-               return VM_FAULT_SIGBUS;
-       else if (unlikely(pfn == NOPFN_REFAULT))
-               return 0;
-
-       page_table = pte_offset_map_lock(mm, pmd, address, &ptl);
-
-       /* Only go through if we didn't race with anybody else... */
-       if (pte_none(*page_table)) {
-               entry = pfn_pte(pfn, vma->vm_page_prot);
-               if (write_access)
-                       entry = maybe_mkwrite(pte_mkdirty(entry), vma);
-               set_pte_at(mm, address, page_table, entry);
-       }
-       pte_unmap_unlock(page_table, ptl);
-       return 0;
-}
-
 /*
  * Fault of a previously existing named mapping. Repopulate the pte
  * from the encoded file_pte if possible. This enables swappable
@@ -2437,21 +2887,20 @@ static noinline int do_no_pfn(struct mm_struct *mm, struct vm_area_struct *vma,
  */
 static int do_nonlinear_fault(struct mm_struct *mm, struct vm_area_struct *vma,
                unsigned long address, pte_t *page_table, pmd_t *pmd,
-               int write_access, pte_t orig_pte)
+               unsigned int flags, pte_t orig_pte)
 {
-       unsigned int flags = FAULT_FLAG_NONLINEAR |
-                               (write_access ? FAULT_FLAG_WRITE : 0);
        pgoff_t pgoff;
 
+       flags |= FAULT_FLAG_NONLINEAR;
+
        if (!pte_unmap_same(mm, pmd, page_table, orig_pte))
                return 0;
 
-       if (unlikely(!(vma->vm_flags & VM_NONLINEAR) ||
-                       !(vma->vm_flags & VM_CAN_NONLINEAR))) {
+       if (unlikely(!(vma->vm_flags & VM_NONLINEAR))) {
                /*
                 * Page table corrupted: show pte and kill process.
                 */
-               print_bad_pte(vma, orig_pte, address);
+               print_bad_pte(vma, address, orig_pte, NULL);
                return VM_FAULT_OOM;
        }
 
@@ -2474,7 +2923,7 @@ static int do_nonlinear_fault(struct mm_struct *mm, struct vm_area_struct *vma,
  */
 static inline int handle_pte_fault(struct mm_struct *mm,
                struct vm_area_struct *vma, unsigned long address,
-               pte_t *pte, pmd_t *pmd, int write_access)
+               pte_t *pte, pmd_t *pmd, unsigned int flags)
 {
        pte_t entry;
        spinlock_t *ptl;
@@ -2485,33 +2934,30 @@ static inline int handle_pte_fault(struct mm_struct *mm,
                        if (vma->vm_ops) {
                                if (likely(vma->vm_ops->fault))
                                        return do_linear_fault(mm, vma, address,
-                                               pte, pmd, write_access, entry);
-                               if (unlikely(vma->vm_ops->nopfn))
-                                       return do_no_pfn(mm, vma, address, pte,
-                                                        pmd, write_access);
+                                               pte, pmd, flags, entry);
                        }
                        return do_anonymous_page(mm, vma, address,
-                                                pte, pmd, write_access);
+                                                pte, pmd, flags);
                }
                if (pte_file(entry))
                        return do_nonlinear_fault(mm, vma, address,
-                                       pte, pmd, write_access, entry);
+                                       pte, pmd, flags, entry);
                return do_swap_page(mm, vma, address,
-                                       pte, pmd, write_access, entry);
+                                       pte, pmd, flags, entry);
        }
 
        ptl = pte_lockptr(mm, pmd);
        spin_lock(ptl);
        if (unlikely(!pte_same(*pte, entry)))
                goto unlock;
-       if (write_access) {
+       if (flags & FAULT_FLAG_WRITE) {
                if (!pte_write(entry))
                        return do_wp_page(mm, vma, address,
                                        pte, pmd, ptl, entry);
                entry = pte_mkdirty(entry);
        }
        entry = pte_mkyoung(entry);
-       if (ptep_set_access_flags(vma, address, pte, entry, write_access)) {
+       if (ptep_set_access_flags(vma, address, pte, entry, flags & FAULT_FLAG_WRITE)) {
                update_mmu_cache(vma, address, entry);
        } else {
                /*
@@ -2520,7 +2966,7 @@ static inline int handle_pte_fault(struct mm_struct *mm,
                 * This still avoids useless tlb flushes for .text page faults
                 * with threads.
                 */
-               if (write_access)
+               if (flags & FAULT_FLAG_WRITE)
                        flush_tlb_page(vma, address);
        }
 unlock:
@@ -2532,7 +2978,7 @@ unlock:
  * By the time we get here, we already hold the mm semaphore
  */
 int handle_mm_fault(struct mm_struct *mm, struct vm_area_struct *vma,
-               unsigned long address, int write_access)
+               unsigned long address, unsigned int flags)
 {
        pgd_t *pgd;
        pud_t *pud;
@@ -2544,7 +2990,7 @@ int handle_mm_fault(struct mm_struct *mm, struct vm_area_struct *vma,
        count_vm_event(PGFAULT);
 
        if (unlikely(is_vm_hugetlb_page(vma)))
-               return hugetlb_fault(mm, vma, address, write_access);
+               return hugetlb_fault(mm, vma, address, flags);
 
        pgd = pgd_offset(mm, address);
        pud = pud_alloc(mm, pgd, address);
@@ -2557,7 +3003,7 @@ int handle_mm_fault(struct mm_struct *mm, struct vm_area_struct *vma,
        if (!pte)
                return VM_FAULT_OOM;
 
-       return handle_pte_fault(mm, vma, address, pte, pmd, write_access);
+       return handle_pte_fault(mm, vma, address, pte, pmd, flags);
 }
 
 #ifndef __PAGETABLE_PUD_FOLDED
@@ -2571,6 +3017,8 @@ int __pud_alloc(struct mm_struct *mm, pgd_t *pgd, unsigned long address)
        if (!new)
                return -ENOMEM;
 
+       smp_wmb(); /* See comment in __pte_alloc */
+
        spin_lock(&mm->page_table_lock);
        if (pgd_present(*pgd))          /* Another has populated it */
                pud_free(mm, new);
@@ -2592,6 +3040,8 @@ int __pmd_alloc(struct mm_struct *mm, pud_t *pud, unsigned long address)
        if (!new)
                return -ENOMEM;
 
+       smp_wmb(); /* See comment in __pte_alloc */
+
        spin_lock(&mm->page_table_lock);
 #ifndef __ARCH_HAS_4LEVEL_HACK
        if (pud_present(*pud))          /* Another has populated it */
@@ -2616,7 +3066,7 @@ int make_pages_present(unsigned long addr, unsigned long end)
 
        vma = find_vma(current->mm, addr);
        if (!vma)
-               return -1;
+               return -ENOMEM;
        write = (vma->vm_flags & VM_WRITE) != 0;
        BUG_ON(addr >= end);
        BUG_ON(end > vma->vm_end);
@@ -2625,7 +3075,7 @@ int make_pages_present(unsigned long addr, unsigned long end)
                        len, write, 0, NULL, NULL);
        if (ret < 0)
                return ret;
-       return ret == len ? 0 : -1;
+       return ret == len ? 0 : -EFAULT;
 }
 
 #if !defined(__HAVE_ARCH_GATE_AREA)
@@ -2672,6 +3122,123 @@ int in_gate_area_no_task(unsigned long addr)
 
 #endif /* __HAVE_ARCH_GATE_AREA */
 
+static int follow_pte(struct mm_struct *mm, unsigned long address,
+               pte_t **ptepp, spinlock_t **ptlp)
+{
+       pgd_t *pgd;
+       pud_t *pud;
+       pmd_t *pmd;
+       pte_t *ptep;
+
+       pgd = pgd_offset(mm, address);
+       if (pgd_none(*pgd) || unlikely(pgd_bad(*pgd)))
+               goto out;
+
+       pud = pud_offset(pgd, address);
+       if (pud_none(*pud) || unlikely(pud_bad(*pud)))
+               goto out;
+
+       pmd = pmd_offset(pud, address);
+       if (pmd_none(*pmd) || unlikely(pmd_bad(*pmd)))
+               goto out;
+
+       /* We cannot handle huge page PFN maps. Luckily they don't exist. */
+       if (pmd_huge(*pmd))
+               goto out;
+
+       ptep = pte_offset_map_lock(mm, pmd, address, ptlp);
+       if (!ptep)
+               goto out;
+       if (!pte_present(*ptep))
+               goto unlock;
+       *ptepp = ptep;
+       return 0;
+unlock:
+       pte_unmap_unlock(ptep, *ptlp);
+out:
+       return -EINVAL;
+}
+
+/**
+ * follow_pfn - look up PFN at a user virtual address
+ * @vma: memory mapping
+ * @address: user virtual address
+ * @pfn: location to store found PFN
+ *
+ * Only IO mappings and raw PFN mappings are allowed.
+ *
+ * Returns zero and the pfn at @pfn on success, -ve otherwise.
+ */
+int follow_pfn(struct vm_area_struct *vma, unsigned long address,
+       unsigned long *pfn)
+{
+       int ret = -EINVAL;
+       spinlock_t *ptl;
+       pte_t *ptep;
+
+       if (!(vma->vm_flags & (VM_IO | VM_PFNMAP)))
+               return ret;
+
+       ret = follow_pte(vma->vm_mm, address, &ptep, &ptl);
+       if (ret)
+               return ret;
+       *pfn = pte_pfn(*ptep);
+       pte_unmap_unlock(ptep, ptl);
+       return 0;
+}
+EXPORT_SYMBOL(follow_pfn);
+
+#ifdef CONFIG_HAVE_IOREMAP_PROT
+int follow_phys(struct vm_area_struct *vma,
+               unsigned long address, unsigned int flags,
+               unsigned long *prot, resource_size_t *phys)
+{
+       int ret = -EINVAL;
+       pte_t *ptep, pte;
+       spinlock_t *ptl;
+
+       if (!(vma->vm_flags & (VM_IO | VM_PFNMAP)))
+               goto out;
+
+       if (follow_pte(vma->vm_mm, address, &ptep, &ptl))
+               goto out;
+       pte = *ptep;
+
+       if ((flags & FOLL_WRITE) && !pte_write(pte))
+               goto unlock;
+
+       *prot = pgprot_val(pte_pgprot(pte));
+       *phys = (resource_size_t)pte_pfn(pte) << PAGE_SHIFT;
+
+       ret = 0;
+unlock:
+       pte_unmap_unlock(ptep, ptl);
+out:
+       return ret;
+}
+
+int generic_access_phys(struct vm_area_struct *vma, unsigned long addr,
+                       void *buf, int len, int write)
+{
+       resource_size_t phys_addr;
+       unsigned long prot = 0;
+       void __iomem *maddr;
+       int offset = addr & (PAGE_SIZE-1);
+
+       if (follow_phys(vma, addr, write, &prot, &phys_addr))
+               return -EINVAL;
+
+       maddr = ioremap_prot(phys_addr, PAGE_SIZE, prot);
+       if (write)
+               memcpy_toio(maddr + offset, buf, len);
+       else
+               memcpy_fromio(buf, maddr + offset, len);
+       iounmap(maddr);
+
+       return len;
+}
+#endif
+
 /*
  * Access another process' address space.
  * Source/target buffer must be kernel space,
@@ -2681,7 +3248,6 @@ int access_process_vm(struct task_struct *tsk, unsigned long addr, void *buf, in
 {
        struct mm_struct *mm;
        struct vm_area_struct *vma;
-       struct page *page;
        void *old_buf = buf;
 
        mm = get_task_mm(tsk);
@@ -2693,28 +3259,44 @@ int access_process_vm(struct task_struct *tsk, unsigned long addr, void *buf, in
        while (len) {
                int bytes, ret, offset;
                void *maddr;
+               struct page *page = NULL;
 
                ret = get_user_pages(tsk, mm, addr, 1,
                                write, 1, &page, &vma);
-               if (ret <= 0)
-                       break;
-
-               bytes = len;
-               offset = addr & (PAGE_SIZE-1);
-               if (bytes > PAGE_SIZE-offset)
-                       bytes = PAGE_SIZE-offset;
-
-               maddr = kmap(page);
-               if (write) {
-                       copy_to_user_page(vma, page, addr,
-                                         maddr + offset, buf, bytes);
-                       set_page_dirty_lock(page);
+               if (ret <= 0) {
+                       /*
+                        * Check if this is a VM_IO | VM_PFNMAP VMA, which
+                        * we can access using slightly different code.
+                        */
+#ifdef CONFIG_HAVE_IOREMAP_PROT
+                       vma = find_vma(mm, addr);
+                       if (!vma)
+                               break;
+                       if (vma->vm_ops && vma->vm_ops->access)
+                               ret = vma->vm_ops->access(vma, addr, buf,
+                                                         len, write);
+                       if (ret <= 0)
+#endif
+                               break;
+                       bytes = ret;
                } else {
-                       copy_from_user_page(vma, page, addr,
-                                           buf, maddr + offset, bytes);
+                       bytes = len;
+                       offset = addr & (PAGE_SIZE-1);
+                       if (bytes > PAGE_SIZE-offset)
+                               bytes = PAGE_SIZE-offset;
+
+                       maddr = kmap(page);
+                       if (write) {
+                               copy_to_user_page(vma, page, addr,
+                                                 maddr + offset, buf, bytes);
+                               set_page_dirty_lock(page);
+                       } else {
+                               copy_from_user_page(vma, page, addr,
+                                                   buf, maddr + offset, bytes);
+                       }
+                       kunmap(page);
+                       page_cache_release(page);
                }
-               kunmap(page);
-               page_cache_release(page);
                len -= bytes;
                buf += bytes;
                addr += bytes;
@@ -2762,3 +3344,27 @@ void print_vma_addr(char *prefix, unsigned long ip)
        }
        up_read(&current->mm->mmap_sem);
 }
+
+#ifdef CONFIG_PROVE_LOCKING
+void might_fault(void)
+{
+       /*
+        * Some code (nfs/sunrpc) uses socket ops on kernel memory while
+        * holding the mmap_sem, this is safe because kernel memory doesn't
+        * get paged out, therefore we'll never actually fault, and the
+        * below annotations will generate false positives.
+        */
+       if (segment_eq(get_fs(), KERNEL_DS))
+               return;
+
+       might_sleep();
+       /*
+        * it would be nicer only to annotate paths which are not under
+        * pagefault_disable, however that requires a larger audit and
+        * providing helpers like get_user_atomic.
+        */
+       if (!in_atomic() && current->mm)
+               might_lock_read(&current->mm->mmap_sem);
+}
+EXPORT_SYMBOL(might_fault);
+#endif