mm: gup persist for write permission
[safe/jmp/linux-2.6] / mm / memory.c
index 8735032..f594bb6 100644 (file)
@@ -51,6 +51,7 @@
 #include <linux/init.h>
 #include <linux/writeback.h>
 #include <linux/memcontrol.h>
+#include <linux/mmu_notifier.h>
 
 #include <asm/pgalloc.h>
 #include <asm/uaccess.h>
@@ -61,6 +62,8 @@
 #include <linux/swapops.h>
 #include <linux/elf.h>
 
+#include "internal.h"
+
 #ifndef CONFIG_NEED_MULTIPLE_NODES
 /* use the per-pgdat data instead for discontigmem - mbligh */
 unsigned long max_mapnr;
@@ -211,7 +214,7 @@ static inline void free_pud_range(struct mmu_gather *tlb, pgd_t *pgd,
  *
  * Must be called with pagetable lock held.
  */
-void free_pgd_range(struct mmu_gather **tlb,
+void free_pgd_range(struct mmu_gather *tlb,
                        unsigned long addr, unsigned long end,
                        unsigned long floor, unsigned long ceiling)
 {
@@ -262,16 +265,16 @@ void free_pgd_range(struct mmu_gather **tlb,
                return;
 
        start = addr;
-       pgd = pgd_offset((*tlb)->mm, addr);
+       pgd = pgd_offset(tlb->mm, addr);
        do {
                next = pgd_addr_end(addr, end);
                if (pgd_none_or_clear_bad(pgd))
                        continue;
-               free_pud_range(*tlb, pgd, addr, next, floor, ceiling);
+               free_pud_range(tlb, pgd, addr, next, floor, ceiling);
        } while (pgd++, addr = next, addr != end);
 }
 
-void free_pgtables(struct mmu_gather **tlb, struct vm_area_struct *vma,
+void free_pgtables(struct mmu_gather *tlb, struct vm_area_struct *vma,
                unsigned long floor, unsigned long ceiling)
 {
        while (vma) {
@@ -372,7 +375,8 @@ static inline void add_mm_rss(struct mm_struct *mm, int file_rss, int anon_rss)
  *
  * The calling function must still handle the error.
  */
-void print_bad_pte(struct vm_area_struct *vma, pte_t pte, unsigned long vaddr)
+static void print_bad_pte(struct vm_area_struct *vma, pte_t pte,
+                         unsigned long vaddr)
 {
        printk(KERN_ERR "Bad pte = %08llx, process = %s, "
                        "vm_flags = %lx, vaddr = %lx\n",
@@ -649,6 +653,7 @@ int copy_page_range(struct mm_struct *dst_mm, struct mm_struct *src_mm,
        unsigned long next;
        unsigned long addr = vma->vm_start;
        unsigned long end = vma->vm_end;
+       int ret;
 
        /*
         * Don't copy ptes where a page fault will fill them correctly.
@@ -664,17 +669,43 @@ int copy_page_range(struct mm_struct *dst_mm, struct mm_struct *src_mm,
        if (is_vm_hugetlb_page(vma))
                return copy_hugetlb_page_range(dst_mm, src_mm, vma);
 
+       if (unlikely(is_pfn_mapping(vma))) {
+               /*
+                * We do not free on error cases below as remove_vma
+                * gets called on error from higher level routine
+                */
+               ret = track_pfn_vma_copy(vma);
+               if (ret)
+                       return ret;
+       }
+
+       /*
+        * We need to invalidate the secondary MMU mappings only when
+        * there could be a permission downgrade on the ptes of the
+        * parent mm. And a permission downgrade will only happen if
+        * is_cow_mapping() returns true.
+        */
+       if (is_cow_mapping(vma->vm_flags))
+               mmu_notifier_invalidate_range_start(src_mm, addr, end);
+
+       ret = 0;
        dst_pgd = pgd_offset(dst_mm, addr);
        src_pgd = pgd_offset(src_mm, addr);
        do {
                next = pgd_addr_end(addr, end);
                if (pgd_none_or_clear_bad(src_pgd))
                        continue;
-               if (copy_pud_range(dst_mm, src_mm, dst_pgd, src_pgd,
-                                               vma, addr, next))
-                       return -ENOMEM;
+               if (unlikely(copy_pud_range(dst_mm, src_mm, dst_pgd, src_pgd,
+                                           vma, addr, next))) {
+                       ret = -ENOMEM;
+                       break;
+               }
        } while (dst_pgd++, src_pgd++, addr = next, addr != end);
-       return 0;
+
+       if (is_cow_mapping(vma->vm_flags))
+               mmu_notifier_invalidate_range_end(src_mm,
+                                                 vma->vm_start, end);
+       return ret;
 }
 
 static unsigned long zap_pte_range(struct mmu_gather *tlb,
@@ -736,8 +767,9 @@ static unsigned long zap_pte_range(struct mmu_gather *tlb,
                        else {
                                if (pte_dirty(ptent))
                                        set_page_dirty(page);
-                               if (pte_young(ptent))
-                                       SetPageReferenced(page);
+                               if (pte_young(ptent) &&
+                                   likely(!VM_SequentialReadHint(vma)))
+                                       mark_page_accessed(page);
                                file_rss--;
                        }
                        page_remove_rmap(page, vma);
@@ -878,7 +910,9 @@ unsigned long unmap_vmas(struct mmu_gather **tlbp,
        unsigned long start = start_addr;
        spinlock_t *i_mmap_lock = details? details->i_mmap_lock: NULL;
        int fullmm = (*tlbp)->fullmm;
+       struct mm_struct *mm = vma->vm_mm;
 
+       mmu_notifier_invalidate_range_start(mm, start_addr, end_addr);
        for ( ; vma && vma->vm_start < end_addr; vma = vma->vm_next) {
                unsigned long end;
 
@@ -892,6 +926,9 @@ unsigned long unmap_vmas(struct mmu_gather **tlbp,
                if (vma->vm_flags & VM_ACCOUNT)
                        *nr_accounted += (end - start) >> PAGE_SHIFT;
 
+               if (unlikely(is_pfn_mapping(vma)))
+                       untrack_pfn_vma(vma, 0, 0);
+
                while (start != end) {
                        if (!tlb_start_valid) {
                                tlb_start = start;
@@ -899,9 +936,23 @@ unsigned long unmap_vmas(struct mmu_gather **tlbp,
                        }
 
                        if (unlikely(is_vm_hugetlb_page(vma))) {
-                               unmap_hugepage_range(vma, start, end);
-                               zap_work -= (end - start) /
-                                               (HPAGE_SIZE / PAGE_SIZE);
+                               /*
+                                * It is undesirable to test vma->vm_file as it
+                                * should be non-null for valid hugetlb area.
+                                * However, vm_file will be NULL in the error
+                                * cleanup path of do_mmap_pgoff. When
+                                * hugetlbfs ->mmap method fails,
+                                * do_mmap_pgoff() nullifies vma->vm_file
+                                * before calling this function to clean up.
+                                * Since no pte has actually been setup, it is
+                                * safe to do nothing in this case.
+                                */
+                               if (vma->vm_file) {
+                                       unmap_hugepage_range(vma, start, end, NULL);
+                                       zap_work -= (end - start) /
+                                       pages_per_huge_page(hstate_vma(vma));
+                               }
+
                                start = end;
                        } else
                                start = unmap_page_range(*tlbp, vma,
@@ -929,6 +980,7 @@ unsigned long unmap_vmas(struct mmu_gather **tlbp,
                }
        }
 out:
+       mmu_notifier_invalidate_range_end(mm, start_addr, end_addr);
        return start;   /* which is now the end (or restart) address */
 }
 
@@ -956,6 +1008,29 @@ unsigned long zap_page_range(struct vm_area_struct *vma, unsigned long address,
        return end;
 }
 
+/**
+ * zap_vma_ptes - remove ptes mapping the vma
+ * @vma: vm_area_struct holding ptes to be zapped
+ * @address: starting address of pages to zap
+ * @size: number of bytes to zap
+ *
+ * This function only unmaps ptes assigned to VM_PFNMAP vmas.
+ *
+ * The entire address range must be fully contained within the vma.
+ *
+ * Returns 0 if successful.
+ */
+int zap_vma_ptes(struct vm_area_struct *vma, unsigned long address,
+               unsigned long size)
+{
+       if (address < vma->vm_start || address + size > vma->vm_end ||
+                       !(vma->vm_flags & VM_PFNMAP))
+               return -1;
+       zap_page_range(vma, address, size, NULL);
+       return 0;
+}
+EXPORT_SYMBOL_GPL(zap_vma_ptes);
+
 /*
  * Do a quick page-table lookup for a single page.
  */
@@ -982,19 +1057,24 @@ struct page *follow_page(struct vm_area_struct *vma, unsigned long address,
                goto no_page_table;
 
        pud = pud_offset(pgd, address);
-       if (pud_none(*pud) || unlikely(pud_bad(*pud)))
+       if (pud_none(*pud))
+               goto no_page_table;
+       if (pud_huge(*pud)) {
+               BUG_ON(flags & FOLL_GET);
+               page = follow_huge_pud(mm, address, pud, flags & FOLL_WRITE);
+               goto out;
+       }
+       if (unlikely(pud_bad(*pud)))
                goto no_page_table;
-       
+
        pmd = pmd_offset(pud, address);
        if (pmd_none(*pmd))
                goto no_page_table;
-
        if (pmd_huge(*pmd)) {
                BUG_ON(flags & FOLL_GET);
                page = follow_huge_pmd(mm, address, pmd, flags & FOLL_WRITE);
                goto out;
        }
-
        if (unlikely(pmd_bad(*pmd)))
                goto no_page_table;
 
@@ -1063,12 +1143,17 @@ static inline int use_zero_page(struct vm_area_struct *vma)
        return !vma->vm_ops || !vma->vm_ops->fault;
 }
 
-int get_user_pages(struct task_struct *tsk, struct mm_struct *mm,
-               unsigned long start, int len, int write, int force,
+
+
+int __get_user_pages(struct task_struct *tsk, struct mm_struct *mm,
+                    unsigned long start, int len, int flags,
                struct page **pages, struct vm_area_struct **vmas)
 {
        int i;
-       unsigned int vm_flags;
+       unsigned int vm_flags = 0;
+       int write = !!(flags & GUP_FLAGS_WRITE);
+       int force = !!(flags & GUP_FLAGS_FORCE);
+       int ignore = !!(flags & GUP_FLAGS_IGNORE_VMA_PERMISSIONS);
 
        if (len <= 0)
                return 0;
@@ -1092,7 +1177,9 @@ int get_user_pages(struct task_struct *tsk, struct mm_struct *mm,
                        pud_t *pud;
                        pmd_t *pmd;
                        pte_t *pte;
-                       if (write) /* user gate pages are read-only */
+
+                       /* user gate pages are read-only */
+                       if (!ignore && write)
                                return i ? : -EFAULT;
                        if (pg > TASK_SIZE)
                                pgd = pgd_offset_k(pg);
@@ -1124,8 +1211,9 @@ int get_user_pages(struct task_struct *tsk, struct mm_struct *mm,
                        continue;
                }
 
-               if (!vma || (vma->vm_flags & (VM_IO | VM_PFNMAP))
-                               || !(vm_flags & vma->vm_flags))
+               if (!vma ||
+                   (vma->vm_flags & (VM_IO | VM_PFNMAP)) ||
+                   (!ignore && !(vm_flags & vma->vm_flags)))
                        return i ? : -EFAULT;
 
                if (is_vm_hugetlb_page(vma)) {
@@ -1176,9 +1264,15 @@ int get_user_pages(struct task_struct *tsk, struct mm_struct *mm,
                                 * do_wp_page has broken COW when necessary,
                                 * even if maybe_mkwrite decided not to set
                                 * pte_write. We can thus safely do subsequent
-                                * page lookups as if they were reads.
+                                * page lookups as if they were reads. But only
+                                * do so when looping for pte_write is futile:
+                                * in some cases userspace may also be wanting
+                                * to write to the gotten user page, which a
+                                * read fault here might prevent (a readonly
+                                * page might get reCOWed by userspace write).
                                 */
-                               if (ret & VM_FAULT_WRITE)
+                               if ((ret & VM_FAULT_WRITE) &&
+                                   !(vma->vm_flags & VM_WRITE))
                                        foll_flags &= ~FOLL_WRITE;
 
                                cond_resched();
@@ -1200,6 +1294,23 @@ int get_user_pages(struct task_struct *tsk, struct mm_struct *mm,
        } while (len);
        return i;
 }
+
+int get_user_pages(struct task_struct *tsk, struct mm_struct *mm,
+               unsigned long start, int len, int write, int force,
+               struct page **pages, struct vm_area_struct **vmas)
+{
+       int flags = 0;
+
+       if (write)
+               flags |= GUP_FLAGS_WRITE;
+       if (force)
+               flags |= GUP_FLAGS_FORCE;
+
+       return __get_user_pages(tsk, mm,
+                               start, len, flags,
+                               pages, vmas);
+}
+
 EXPORT_SYMBOL(get_user_pages);
 
 pte_t *get_locked_pte(struct mm_struct *mm, unsigned long addr,
@@ -1230,18 +1341,14 @@ static int insert_page(struct vm_area_struct *vma, unsigned long addr,
        pte_t *pte;
        spinlock_t *ptl;
 
-       retval = mem_cgroup_charge(page, mm, GFP_KERNEL);
-       if (retval)
-               goto out;
-
        retval = -EINVAL;
        if (PageAnon(page))
-               goto out_uncharge;
+               goto out;
        retval = -ENOMEM;
        flush_dcache_page(page);
        pte = get_locked_pte(mm, addr, &ptl);
        if (!pte)
-               goto out_uncharge;
+               goto out;
        retval = -EBUSY;
        if (!pte_none(*pte))
                goto out_unlock;
@@ -1257,8 +1364,6 @@ static int insert_page(struct vm_area_struct *vma, unsigned long addr,
        return retval;
 out_unlock:
        pte_unmap_unlock(pte, ptl);
-out_uncharge:
-       mem_cgroup_uncharge_page(page);
 out:
        return retval;
 }
@@ -1345,6 +1450,7 @@ out:
 int vm_insert_pfn(struct vm_area_struct *vma, unsigned long addr,
                        unsigned long pfn)
 {
+       int ret;
        /*
         * Technically, architectures with pte_special can avoid all these
         * restrictions (same for remap_pfn_range).  However we would like
@@ -1359,7 +1465,15 @@ int vm_insert_pfn(struct vm_area_struct *vma, unsigned long addr,
 
        if (addr < vma->vm_start || addr >= vma->vm_end)
                return -EFAULT;
-       return insert_pfn(vma, addr, pfn, vma->vm_page_prot);
+       if (track_pfn_vma_new(vma, vma->vm_page_prot, pfn, PAGE_SIZE))
+               return -EINVAL;
+
+       ret = insert_pfn(vma, addr, pfn, vma->vm_page_prot);
+
+       if (ret)
+               untrack_pfn_vma(vma, pfn, PAGE_SIZE);
+
+       return ret;
 }
 EXPORT_SYMBOL(vm_insert_pfn);
 
@@ -1490,14 +1604,17 @@ int remap_pfn_range(struct vm_area_struct *vma, unsigned long addr,
         * behaviour that some programs depend on. We mark the "original"
         * un-COW'ed pages by matching them up with "vma->vm_pgoff".
         */
-       if (is_cow_mapping(vma->vm_flags)) {
-               if (addr != vma->vm_start || end != vma->vm_end)
-                       return -EINVAL;
+       if (addr == vma->vm_start && end == vma->vm_end)
                vma->vm_pgoff = pfn;
-       }
+       else if (is_cow_mapping(vma->vm_flags))
+               return -EINVAL;
 
        vma->vm_flags |= VM_IO | VM_RESERVED | VM_PFNMAP;
 
+       err = track_pfn_vma_new(vma, prot, pfn, PAGE_ALIGN(size));
+       if (err)
+               return -EINVAL;
+
        BUG_ON(addr >= end);
        pfn -= addr >> PAGE_SHIFT;
        pgd = pgd_offset(mm, addr);
@@ -1509,6 +1626,10 @@ int remap_pfn_range(struct vm_area_struct *vma, unsigned long addr,
                if (err)
                        break;
        } while (pgd++, addr = next, addr != end);
+
+       if (err)
+               untrack_pfn_vma(vma, pfn, PAGE_ALIGN(size));
+
        return err;
 }
 EXPORT_SYMBOL(remap_pfn_range);
@@ -1530,6 +1651,8 @@ static int apply_to_pte_range(struct mm_struct *mm, pmd_t *pmd,
 
        BUG_ON(pmd_huge(*pmd));
 
+       arch_enter_lazy_mmu_mode();
+
        token = pmd_pgtable(*pmd);
 
        do {
@@ -1538,6 +1661,8 @@ static int apply_to_pte_range(struct mm_struct *mm, pmd_t *pmd,
                        break;
        } while (pte++, addr += PAGE_SIZE, addr != end);
 
+       arch_leave_lazy_mmu_mode();
+
        if (mm != &init_mm)
                pte_unmap_unlock(pte-1, ptl);
        return err;
@@ -1551,6 +1676,8 @@ static int apply_to_pmd_range(struct mm_struct *mm, pud_t *pud,
        unsigned long next;
        int err;
 
+       BUG_ON(pud_huge(*pud));
+
        pmd = pmd_alloc(mm, pud, addr);
        if (!pmd)
                return -ENOMEM;
@@ -1592,10 +1719,11 @@ int apply_to_page_range(struct mm_struct *mm, unsigned long addr,
 {
        pgd_t *pgd;
        unsigned long next;
-       unsigned long end = addr + size;
+       unsigned long start = addr, end = addr + size;
        int err;
 
        BUG_ON(addr >= end);
+       mmu_notifier_invalidate_range_start(mm, start, end);
        pgd = pgd_offset(mm, addr);
        do {
                next = pgd_addr_end(addr, end);
@@ -1603,6 +1731,7 @@ int apply_to_page_range(struct mm_struct *mm, unsigned long addr,
                if (err)
                        break;
        } while (pgd++, addr = next, addr != end);
+       mmu_notifier_invalidate_range_end(mm, start, end);
        return err;
 }
 EXPORT_SYMBOL_GPL(apply_to_page_range);
@@ -1719,7 +1848,7 @@ static int do_wp_page(struct mm_struct *mm, struct vm_area_struct *vma,
         * not dirty accountable.
         */
        if (PageAnon(old_page)) {
-               if (!TestSetPageLocked(old_page)) {
+               if (trylock_page(old_page)) {
                        reuse = can_share_swap_page(old_page);
                        unlock_page(old_page);
                }
@@ -1788,6 +1917,15 @@ gotten:
        new_page = alloc_page_vma(GFP_HIGHUSER_MOVABLE, vma, address);
        if (!new_page)
                goto oom;
+       /*
+        * Don't let another task, with possibly unlocked vma,
+        * keep the mlocked page.
+        */
+       if (vma->vm_flags & VM_LOCKED) {
+               lock_page(old_page);    /* for LRU manipulation */
+               clear_page_mlock(old_page);
+               unlock_page(old_page);
+       }
        cow_user_page(new_page, old_page, address, vma);
        __SetPageUptodate(new_page);
 
@@ -1815,12 +1953,10 @@ gotten:
                 * seen in the presence of one thread doing SMC and another
                 * thread doing COW.
                 */
-               ptep_clear_flush(vma, address, page_table);
+               ptep_clear_flush_notify(vma, address, page_table);
+               page_add_new_anon_rmap(new_page, vma, address);
                set_pte_at(mm, address, page_table, entry);
                update_mmu_cache(vma, address, entry);
-               lru_cache_add_active(new_page);
-               page_add_new_anon_rmap(new_page, vma, address);
-
                if (old_page) {
                        /*
                         * Only after switching the pte to the new page may
@@ -2137,7 +2273,7 @@ int vmtruncate(struct inode * inode, loff_t offset)
                unmap_mapping_range(mapping, offset + PAGE_SIZE - 1, 0, 1);
        }
 
-       if (inode->i_op && inode->i_op->truncate)
+       if (inode->i_op->truncate)
                inode->i_op->truncate(inode);
        return 0;
 
@@ -2157,7 +2293,7 @@ int vmtruncate_range(struct inode *inode, loff_t offset, loff_t end)
         * a way to truncate a range of blocks (punch a hole) -
         * we should return failure right now.
         */
-       if (!inode->i_op || !inode->i_op->truncate_range)
+       if (!inode->i_op->truncate_range)
                return -ENOSYS;
 
        mutex_lock(&inode->i_mutex);
@@ -2218,16 +2354,17 @@ static int do_swap_page(struct mm_struct *mm, struct vm_area_struct *vma,
                count_vm_event(PGMAJFAULT);
        }
 
+       mark_page_accessed(page);
+
+       lock_page(page);
+       delayacct_clear_flag(DELAYACCT_PF_SWAPIN);
+
        if (mem_cgroup_charge(page, mm, GFP_KERNEL)) {
-               delayacct_clear_flag(DELAYACCT_PF_SWAPIN);
                ret = VM_FAULT_OOM;
+               unlock_page(page);
                goto out;
        }
 
-       mark_page_accessed(page);
-       lock_page(page);
-       delayacct_clear_flag(DELAYACCT_PF_SWAPIN);
-
        /*
         * Back out if somebody else already faulted in this pte.
         */
@@ -2254,7 +2391,7 @@ static int do_swap_page(struct mm_struct *mm, struct vm_area_struct *vma,
        page_add_anon_rmap(page, vma, address);
 
        swap_free(entry);
-       if (vm_swap_full())
+       if (vm_swap_full() || (vma->vm_flags & VM_LOCKED) || PageMlocked(page))
                remove_exclusive_swap_page(page);
        unlock_page(page);
 
@@ -2312,7 +2449,6 @@ static int do_anonymous_page(struct mm_struct *mm, struct vm_area_struct *vma,
        if (!pte_none(*page_table))
                goto release;
        inc_mm_counter(mm, anon_rss);
-       lru_cache_add_active(page);
        page_add_new_anon_rmap(page, vma, address);
        set_pte_at(mm, address, page_table, entry);
 
@@ -2353,6 +2489,7 @@ static int __do_fault(struct mm_struct *mm, struct vm_area_struct *vma,
        struct page *page;
        pte_t entry;
        int anon = 0;
+       int charged = 0;
        struct page *dirty_page = NULL;
        struct vm_fault vmf;
        int ret;
@@ -2393,6 +2530,18 @@ static int __do_fault(struct mm_struct *mm, struct vm_area_struct *vma,
                                ret = VM_FAULT_OOM;
                                goto out;
                        }
+                       if (mem_cgroup_charge(page, mm, GFP_KERNEL)) {
+                               ret = VM_FAULT_OOM;
+                               page_cache_release(page);
+                               goto out;
+                       }
+                       charged = 1;
+                       /*
+                        * Don't let another task, with possibly unlocked vma,
+                        * keep the mlocked page.
+                        */
+                       if (vma->vm_flags & VM_LOCKED)
+                               clear_page_mlock(vmf.page);
                        copy_user_highpage(page, vmf.page, address, vma);
                        __SetPageUptodate(page);
                } else {
@@ -2427,11 +2576,6 @@ static int __do_fault(struct mm_struct *mm, struct vm_area_struct *vma,
 
        }
 
-       if (mem_cgroup_charge(page, mm, GFP_KERNEL)) {
-               ret = VM_FAULT_OOM;
-               goto out;
-       }
-
        page_table = pte_offset_map_lock(mm, pmd, address, &ptl);
 
        /*
@@ -2450,11 +2594,9 @@ static int __do_fault(struct mm_struct *mm, struct vm_area_struct *vma,
                entry = mk_pte(page, vma->vm_page_prot);
                if (flags & FAULT_FLAG_WRITE)
                        entry = maybe_mkwrite(pte_mkdirty(entry), vma);
-               set_pte_at(mm, address, page_table, entry);
                if (anon) {
-                        inc_mm_counter(mm, anon_rss);
-                        lru_cache_add_active(page);
-                        page_add_new_anon_rmap(page, vma, address);
+                       inc_mm_counter(mm, anon_rss);
+                       page_add_new_anon_rmap(page, vma, address);
                } else {
                        inc_mm_counter(mm, file_rss);
                        page_add_file_rmap(page);
@@ -2463,11 +2605,13 @@ static int __do_fault(struct mm_struct *mm, struct vm_area_struct *vma,
                                get_page(dirty_page);
                        }
                }
+               set_pte_at(mm, address, page_table, entry);
 
                /* no need to invalidate: a not-present page won't be cached */
                update_mmu_cache(vma, address, entry);
        } else {
-               mem_cgroup_uncharge_page(page);
+               if (charged)
+                       mem_cgroup_uncharge_page(page);
                if (anon)
                        page_cache_release(page);
                else
@@ -2695,7 +2839,7 @@ int make_pages_present(unsigned long addr, unsigned long end)
 
        vma = find_vma(current->mm, addr);
        if (!vma)
-               return -1;
+               return -ENOMEM;
        write = (vma->vm_flags & VM_WRITE) != 0;
        BUG_ON(addr >= end);
        BUG_ON(end > vma->vm_end);
@@ -2704,7 +2848,7 @@ int make_pages_present(unsigned long addr, unsigned long end)
                        len, write, 0, NULL, NULL);
        if (ret < 0)
                return ret;
-       return ret == len ? 0 : -1;
+       return ret == len ? 0 : -EFAULT;
 }
 
 #if !defined(__HAVE_ARCH_GATE_AREA)
@@ -2752,9 +2896,9 @@ int in_gate_area_no_task(unsigned long addr)
 #endif /* __HAVE_ARCH_GATE_AREA */
 
 #ifdef CONFIG_HAVE_IOREMAP_PROT
-static resource_size_t follow_phys(struct vm_area_struct *vma,
-                       unsigned long address, unsigned int flags,
-                       unsigned long *prot)
+int follow_phys(struct vm_area_struct *vma,
+               unsigned long address, unsigned int flags,
+               unsigned long *prot, resource_size_t *phys)
 {
        pgd_t *pgd;
        pud_t *pud;
@@ -2763,24 +2907,26 @@ static resource_size_t follow_phys(struct vm_area_struct *vma,
        spinlock_t *ptl;
        resource_size_t phys_addr = 0;
        struct mm_struct *mm = vma->vm_mm;
+       int ret = -EINVAL;
 
-       VM_BUG_ON(!(vma->vm_flags & (VM_IO | VM_PFNMAP)));
+       if (!(vma->vm_flags & (VM_IO | VM_PFNMAP)))
+               goto out;
 
        pgd = pgd_offset(mm, address);
        if (pgd_none(*pgd) || unlikely(pgd_bad(*pgd)))
-               goto no_page_table;
+               goto out;
 
        pud = pud_offset(pgd, address);
        if (pud_none(*pud) || unlikely(pud_bad(*pud)))
-               goto no_page_table;
+               goto out;
 
        pmd = pmd_offset(pud, address);
        if (pmd_none(*pmd) || unlikely(pmd_bad(*pmd)))
-               goto no_page_table;
+               goto out;
 
        /* We cannot handle huge page PFN maps. Luckily they don't exist. */
        if (pmd_huge(*pmd))
-               goto no_page_table;
+               goto out;
 
        ptep = pte_offset_map_lock(mm, pmd, address, &ptl);
        if (!ptep)
@@ -2795,13 +2941,13 @@ static resource_size_t follow_phys(struct vm_area_struct *vma,
        phys_addr <<= PAGE_SHIFT; /* Shift here to avoid overflow on PAE */
 
        *prot = pgprot_val(pte_pgprot(pte));
+       *phys = phys_addr;
+       ret = 0;
 
 unlock:
        pte_unmap_unlock(ptep, ptl);
 out:
-       return phys_addr;
-no_page_table:
-       return 0;
+       return ret;
 }
 
 int generic_access_phys(struct vm_area_struct *vma, unsigned long addr,
@@ -2812,12 +2958,7 @@ int generic_access_phys(struct vm_area_struct *vma, unsigned long addr,
        void *maddr;
        int offset = addr & (PAGE_SIZE-1);
 
-       if (!(vma->vm_flags & (VM_IO | VM_PFNMAP)))
-               return -EINVAL;
-
-       phys_addr = follow_phys(vma, addr, write, &prot);
-
-       if (!phys_addr)
+       if (follow_phys(vma, addr, write, &prot, &phys_addr))
                return -EINVAL;
 
        maddr = ioremap_prot(phys_addr, PAGE_SIZE, prot);
@@ -2936,3 +3077,18 @@ void print_vma_addr(char *prefix, unsigned long ip)
        }
        up_read(&current->mm->mmap_sem);
 }
+
+#ifdef CONFIG_PROVE_LOCKING
+void might_fault(void)
+{
+       might_sleep();
+       /*
+        * it would be nicer only to annotate paths which are not under
+        * pagefault_disable, however that requires a larger audit and
+        * providing helpers like get_user_atomic.
+        */
+       if (!in_atomic() && current->mm)
+               might_lock_read(&current->mm->mmap_sem);
+}
+EXPORT_SYMBOL(might_fault);
+#endif