Merge branch 'for-linus' of git://git.kernel.dk/linux-2.6-block
[safe/jmp/linux-2.6] / mm / memory.c
index 7b9db65..4126dd1 100644 (file)
@@ -52,6 +52,9 @@
 #include <linux/writeback.h>
 #include <linux/memcontrol.h>
 #include <linux/mmu_notifier.h>
+#include <linux/kallsyms.h>
+#include <linux/swapops.h>
+#include <linux/elf.h>
 
 #include <asm/pgalloc.h>
 #include <asm/uaccess.h>
@@ -59,9 +62,6 @@
 #include <asm/tlbflush.h>
 #include <asm/pgtable.h>
 
-#include <linux/swapops.h>
-#include <linux/elf.h>
-
 #include "internal.h"
 
 #ifndef CONFIG_NEED_MULTIPLE_NODES
@@ -375,15 +375,65 @@ static inline void add_mm_rss(struct mm_struct *mm, int file_rss, int anon_rss)
  *
  * The calling function must still handle the error.
  */
-static void print_bad_pte(struct vm_area_struct *vma, pte_t pte,
-                         unsigned long vaddr)
-{
-       printk(KERN_ERR "Bad pte = %08llx, process = %s, "
-                       "vm_flags = %lx, vaddr = %lx\n",
-               (long long)pte_val(pte),
-               (vma->vm_mm == current->mm ? current->comm : "???"),
-               vma->vm_flags, vaddr);
+static void print_bad_pte(struct vm_area_struct *vma, unsigned long addr,
+                         pte_t pte, struct page *page)
+{
+       pgd_t *pgd = pgd_offset(vma->vm_mm, addr);
+       pud_t *pud = pud_offset(pgd, addr);
+       pmd_t *pmd = pmd_offset(pud, addr);
+       struct address_space *mapping;
+       pgoff_t index;
+       static unsigned long resume;
+       static unsigned long nr_shown;
+       static unsigned long nr_unshown;
+
+       /*
+        * Allow a burst of 60 reports, then keep quiet for that minute;
+        * or allow a steady drip of one report per second.
+        */
+       if (nr_shown == 60) {
+               if (time_before(jiffies, resume)) {
+                       nr_unshown++;
+                       return;
+               }
+               if (nr_unshown) {
+                       printk(KERN_ALERT
+                               "BUG: Bad page map: %lu messages suppressed\n",
+                               nr_unshown);
+                       nr_unshown = 0;
+               }
+               nr_shown = 0;
+       }
+       if (nr_shown++ == 0)
+               resume = jiffies + 60 * HZ;
+
+       mapping = vma->vm_file ? vma->vm_file->f_mapping : NULL;
+       index = linear_page_index(vma, addr);
+
+       printk(KERN_ALERT
+               "BUG: Bad page map in process %s  pte:%08llx pmd:%08llx\n",
+               current->comm,
+               (long long)pte_val(pte), (long long)pmd_val(*pmd));
+       if (page) {
+               printk(KERN_ALERT
+               "page:%p flags:%p count:%d mapcount:%d mapping:%p index:%lx\n",
+               page, (void *)page->flags, page_count(page),
+               page_mapcount(page), page->mapping, page->index);
+       }
+       printk(KERN_ALERT
+               "addr:%p vm_flags:%08lx anon_vma:%p mapping:%p index:%lx\n",
+               (void *)addr, vma->vm_flags, vma->anon_vma, mapping, index);
+       /*
+        * Choose text because data symbols depend on CONFIG_KALLSYMS_ALL=y
+        */
+       if (vma->vm_ops)
+               print_symbol(KERN_ALERT "vma->vm_ops->fault: %s\n",
+                               (unsigned long)vma->vm_ops->fault);
+       if (vma->vm_file && vma->vm_file->f_op)
+               print_symbol(KERN_ALERT "vma->vm_file->f_op->mmap: %s\n",
+                               (unsigned long)vma->vm_file->f_op->mmap);
        dump_stack();
+       add_taint(TAINT_BAD_PAGE);
 }
 
 static inline int is_cow_mapping(unsigned int flags)
@@ -441,21 +491,18 @@ static inline int is_cow_mapping(unsigned int flags)
 struct page *vm_normal_page(struct vm_area_struct *vma, unsigned long addr,
                                pte_t pte)
 {
-       unsigned long pfn;
+       unsigned long pfn = pte_pfn(pte);
 
        if (HAVE_PTE_SPECIAL) {
-               if (likely(!pte_special(pte))) {
-                       VM_BUG_ON(!pfn_valid(pte_pfn(pte)));
-                       return pte_page(pte);
-               }
-               VM_BUG_ON(!(vma->vm_flags & (VM_PFNMAP | VM_MIXEDMAP)));
+               if (likely(!pte_special(pte)))
+                       goto check_pfn;
+               if (!(vma->vm_flags & (VM_PFNMAP | VM_MIXEDMAP)))
+                       print_bad_pte(vma, addr, pte, NULL);
                return NULL;
        }
 
        /* !HAVE_PTE_SPECIAL case follows: */
 
-       pfn = pte_pfn(pte);
-
        if (unlikely(vma->vm_flags & (VM_PFNMAP|VM_MIXEDMAP))) {
                if (vma->vm_flags & VM_MIXEDMAP) {
                        if (!pfn_valid(pfn))
@@ -471,11 +518,14 @@ struct page *vm_normal_page(struct vm_area_struct *vma, unsigned long addr,
                }
        }
 
-       VM_BUG_ON(!pfn_valid(pfn));
+check_pfn:
+       if (unlikely(pfn > highest_memmap_pfn)) {
+               print_bad_pte(vma, addr, pte, NULL);
+               return NULL;
+       }
 
        /*
         * NOTE! We still have PageReserved() pages in the page tables.
-        *
         * eg. VDSO mappings can cause them to exist.
         */
 out:
@@ -767,11 +817,14 @@ static unsigned long zap_pte_range(struct mmu_gather *tlb,
                        else {
                                if (pte_dirty(ptent))
                                        set_page_dirty(page);
-                               if (pte_young(ptent))
-                                       SetPageReferenced(page);
+                               if (pte_young(ptent) &&
+                                   likely(!VM_SequentialReadHint(vma)))
+                                       mark_page_accessed(page);
                                file_rss--;
                        }
-                       page_remove_rmap(page, vma);
+                       page_remove_rmap(page);
+                       if (unlikely(page_mapcount(page) < 0))
+                               print_bad_pte(vma, addr, ptent, page);
                        tlb_remove_page(tlb, page);
                        continue;
                }
@@ -781,8 +834,12 @@ static unsigned long zap_pte_range(struct mmu_gather *tlb,
                 */
                if (unlikely(details))
                        continue;
-               if (!pte_file(ptent))
-                       free_swap_and_cache(pte_to_swp_entry(ptent));
+               if (pte_file(ptent)) {
+                       if (unlikely(!(vma->vm_flags & VM_NONLINEAR)))
+                               print_bad_pte(vma, addr, ptent, NULL);
+               } else if
+                 (unlikely(!free_swap_and_cache(pte_to_swp_entry(ptent))))
+                       print_bad_pte(vma, addr, ptent, NULL);
                pte_clear_not_present_full(mm, addr, pte, tlb->fullmm);
        } while (pte++, addr += PAGE_SIZE, (addr != end && *zap_work > 0));
 
@@ -1094,6 +1151,11 @@ struct page *follow_page(struct vm_area_struct *vma, unsigned long address,
                if ((flags & FOLL_WRITE) &&
                    !pte_dirty(pte) && !PageDirty(page))
                        set_page_dirty(page);
+               /*
+                * pte_mkyoung() would be more correct here, but atomic care
+                * is needed to avoid losing the dirty bit: it is easier to use
+                * mark_page_accessed().
+                */
                mark_page_accessed(page);
        }
 unlock:
@@ -1153,6 +1215,7 @@ int __get_user_pages(struct task_struct *tsk, struct mm_struct *mm,
        int write = !!(flags & GUP_FLAGS_WRITE);
        int force = !!(flags & GUP_FLAGS_FORCE);
        int ignore = !!(flags & GUP_FLAGS_IGNORE_VMA_PERMISSIONS);
+       int ignore_sigkill = !!(flags & GUP_FLAGS_IGNORE_SIGKILL);
 
        if (len <= 0)
                return 0;
@@ -1231,12 +1294,15 @@ int __get_user_pages(struct task_struct *tsk, struct mm_struct *mm,
                        struct page *page;
 
                        /*
-                        * If tsk is ooming, cut off its access to large memory
-                        * allocations. It has a pending SIGKILL, but it can't
-                        * be processed until returning to user space.
+                        * If we have a pending SIGKILL, don't keep faulting
+                        * pages and potentially allocating memory, unless
+                        * current is handling munlock--e.g., on exit. In
+                        * that case, we are not allocating memory.  Rather,
+                        * we're only unlocking already resident/mapped pages.
                         */
-                       if (unlikely(test_tsk_thread_flag(tsk, TIF_MEMDIE)))
-                               return i ? i : -ENOMEM;
+                       if (unlikely(!ignore_sigkill &&
+                                       fatal_signal_pending(current)))
+                               return i ? i : -ERESTARTSYS;
 
                        if (write)
                                foll_flags |= FOLL_WRITE;
@@ -1263,9 +1329,15 @@ int __get_user_pages(struct task_struct *tsk, struct mm_struct *mm,
                                 * do_wp_page has broken COW when necessary,
                                 * even if maybe_mkwrite decided not to set
                                 * pte_write. We can thus safely do subsequent
-                                * page lookups as if they were reads.
+                                * page lookups as if they were reads. But only
+                                * do so when looping for pte_write is futile:
+                                * in some cases userspace may also be wanting
+                                * to write to the gotten user page, which a
+                                * read fault here might prevent (a readonly
+                                * page might get reCOWed by userspace write).
                                 */
-                               if (ret & VM_FAULT_WRITE)
+                               if ((ret & VM_FAULT_WRITE) &&
+                                   !(vma->vm_flags & VM_WRITE))
                                        foll_flags &= ~FOLL_WRITE;
 
                                cond_resched();
@@ -1444,6 +1516,7 @@ int vm_insert_pfn(struct vm_area_struct *vma, unsigned long addr,
                        unsigned long pfn)
 {
        int ret;
+       pgprot_t pgprot = vma->vm_page_prot;
        /*
         * Technically, architectures with pte_special can avoid all these
         * restrictions (same for remap_pfn_range).  However we would like
@@ -1458,10 +1531,10 @@ int vm_insert_pfn(struct vm_area_struct *vma, unsigned long addr,
 
        if (addr < vma->vm_start || addr >= vma->vm_end)
                return -EFAULT;
-       if (track_pfn_vma_new(vma, vma->vm_page_prot, pfn, PAGE_SIZE))
+       if (track_pfn_vma_new(vma, &pgprot, pfn, PAGE_SIZE))
                return -EINVAL;
 
-       ret = insert_pfn(vma, addr, pfn, vma->vm_page_prot);
+       ret = insert_pfn(vma, addr, pfn, pgprot);
 
        if (ret)
                untrack_pfn_vma(vma, pfn, PAGE_SIZE);
@@ -1597,16 +1670,24 @@ int remap_pfn_range(struct vm_area_struct *vma, unsigned long addr,
         * behaviour that some programs depend on. We mark the "original"
         * un-COW'ed pages by matching them up with "vma->vm_pgoff".
         */
-       if (addr == vma->vm_start && end == vma->vm_end)
+       if (addr == vma->vm_start && end == vma->vm_end) {
                vma->vm_pgoff = pfn;
-       else if (is_cow_mapping(vma->vm_flags))
+               vma->vm_flags |= VM_PFN_AT_MMAP;
+       } else if (is_cow_mapping(vma->vm_flags))
                return -EINVAL;
 
        vma->vm_flags |= VM_IO | VM_RESERVED | VM_PFNMAP;
 
-       err = track_pfn_vma_new(vma, prot, pfn, PAGE_ALIGN(size));
-       if (err)
+       err = track_pfn_vma_new(vma, &prot, pfn, PAGE_ALIGN(size));
+       if (err) {
+               /*
+                * To indicate that track_pfn related cleanup is not
+                * needed from higher level routine calling unmap_vmas
+                */
+               vma->vm_flags &= ~(VM_IO | VM_RESERVED | VM_PFNMAP);
+               vma->vm_flags &= ~VM_PFN_AT_MMAP;
                return -EINVAL;
+       }
 
        BUG_ON(addr >= end);
        pfn -= addr >> PAGE_SHIFT;
@@ -1644,6 +1725,8 @@ static int apply_to_pte_range(struct mm_struct *mm, pmd_t *pmd,
 
        BUG_ON(pmd_huge(*pmd));
 
+       arch_enter_lazy_mmu_mode();
+
        token = pmd_pgtable(*pmd);
 
        do {
@@ -1652,6 +1735,8 @@ static int apply_to_pte_range(struct mm_struct *mm, pmd_t *pmd,
                        break;
        } while (pte++, addr += PAGE_SIZE, addr != end);
 
+       arch_leave_lazy_mmu_mode();
+
        if (mm != &init_mm)
                pte_unmap_unlock(pte-1, ptl);
        return err;
@@ -1837,10 +1922,21 @@ static int do_wp_page(struct mm_struct *mm, struct vm_area_struct *vma,
         * not dirty accountable.
         */
        if (PageAnon(old_page)) {
-               if (trylock_page(old_page)) {
-                       reuse = can_share_swap_page(old_page);
-                       unlock_page(old_page);
+               if (!trylock_page(old_page)) {
+                       page_cache_get(old_page);
+                       pte_unmap_unlock(page_table, ptl);
+                       lock_page(old_page);
+                       page_table = pte_offset_map_lock(mm, pmd, address,
+                                                        &ptl);
+                       if (!pte_same(*page_table, orig_pte)) {
+                               unlock_page(old_page);
+                               page_cache_release(old_page);
+                               goto unlock;
+                       }
+                       page_cache_release(old_page);
                }
+               reuse = reuse_swap_page(old_page);
+               unlock_page(old_page);
        } else if (unlikely((vma->vm_flags & (VM_WRITE|VM_SHARED)) ==
                                        (VM_WRITE|VM_SHARED))) {
                /*
@@ -1849,6 +1945,15 @@ static int do_wp_page(struct mm_struct *mm, struct vm_area_struct *vma,
                 * get_user_pages(.write=1, .force=1).
                 */
                if (vma->vm_ops && vma->vm_ops->page_mkwrite) {
+                       struct vm_fault vmf;
+                       int tmp;
+
+                       vmf.virtual_address = (void __user *)(address &
+                                                               PAGE_MASK);
+                       vmf.pgoff = old_page->index;
+                       vmf.flags = FAULT_FLAG_WRITE|FAULT_FLAG_MKWRITE;
+                       vmf.page = old_page;
+
                        /*
                         * Notify the address space that the page is about to
                         * become writable so that it can prohibit this or wait
@@ -1860,8 +1965,21 @@ static int do_wp_page(struct mm_struct *mm, struct vm_area_struct *vma,
                        page_cache_get(old_page);
                        pte_unmap_unlock(page_table, ptl);
 
-                       if (vma->vm_ops->page_mkwrite(vma, old_page) < 0)
+                       tmp = vma->vm_ops->page_mkwrite(vma, &vmf);
+                       if (unlikely(tmp &
+                                       (VM_FAULT_ERROR | VM_FAULT_NOPAGE))) {
+                               ret = tmp;
                                goto unwritable_page;
+                       }
+                       if (unlikely(!(tmp & VM_FAULT_LOCKED))) {
+                               lock_page(old_page);
+                               if (!old_page->mapping) {
+                                       ret = 0; /* retry the fault */
+                                       unlock_page(old_page);
+                                       goto unwritable_page;
+                               }
+                       } else
+                               VM_BUG_ON(!PageLocked(old_page));
 
                        /*
                         * Since we dropped the lock we need to revalidate
@@ -1871,9 +1989,11 @@ static int do_wp_page(struct mm_struct *mm, struct vm_area_struct *vma,
                         */
                        page_table = pte_offset_map_lock(mm, pmd, address,
                                                         &ptl);
-                       page_cache_release(old_page);
-                       if (!pte_same(*page_table, orig_pte))
+                       if (!pte_same(*page_table, orig_pte)) {
+                               unlock_page(old_page);
+                               page_cache_release(old_page);
                                goto unlock;
+                       }
 
                        page_mkwrite = 1;
                }
@@ -1910,7 +2030,7 @@ gotten:
         * Don't let another task, with possibly unlocked vma,
         * keep the mlocked page.
         */
-       if (vma->vm_flags & VM_LOCKED) {
+       if ((vma->vm_flags & VM_LOCKED) && old_page) {
                lock_page(old_page);    /* for LRU manipulation */
                clear_page_mlock(old_page);
                unlock_page(old_page);
@@ -1918,7 +2038,7 @@ gotten:
        cow_user_page(new_page, old_page, address, vma);
        __SetPageUptodate(new_page);
 
-       if (mem_cgroup_charge(new_page, mm, GFP_KERNEL))
+       if (mem_cgroup_newpage_charge(new_page, mm, GFP_KERNEL))
                goto oom_free_new;
 
        /*
@@ -1943,11 +2063,7 @@ gotten:
                 * thread doing COW.
                 */
                ptep_clear_flush_notify(vma, address, page_table);
-               SetPageSwapBacked(new_page);
-               lru_cache_add_active_or_unevictable(new_page, vma);
                page_add_new_anon_rmap(new_page, vma, address);
-
-//TODO:  is this safe?  do_anonymous_page() does it this way.
                set_pte_at(mm, address, page_table, entry);
                update_mmu_cache(vma, address, entry);
                if (old_page) {
@@ -1973,7 +2089,7 @@ gotten:
                         * mapcount is visible. So transitively, TLBs to
                         * old page will be flushed before it can be reused.
                         */
-                       page_remove_rmap(old_page, vma);
+                       page_remove_rmap(old_page);
                }
 
                /* Free the old page.. */
@@ -1989,9 +2105,6 @@ gotten:
 unlock:
        pte_unmap_unlock(page_table, ptl);
        if (dirty_page) {
-               if (vma->vm_file)
-                       file_update_time(vma->vm_file);
-
                /*
                 * Yes, Virginia, this is actually required to prevent a race
                 * with clear_page_dirty_for_io() from clearing the page dirty
@@ -2000,21 +2113,46 @@ unlock:
                 *
                 * do_no_page is protected similarly.
                 */
-               wait_on_page_locked(dirty_page);
-               set_page_dirty_balance(dirty_page, page_mkwrite);
+               if (!page_mkwrite) {
+                       wait_on_page_locked(dirty_page);
+                       set_page_dirty_balance(dirty_page, page_mkwrite);
+               }
                put_page(dirty_page);
+               if (page_mkwrite) {
+                       struct address_space *mapping = dirty_page->mapping;
+
+                       set_page_dirty(dirty_page);
+                       unlock_page(dirty_page);
+                       page_cache_release(dirty_page);
+                       if (mapping)    {
+                               /*
+                                * Some device drivers do not set page.mapping
+                                * but still dirty their pages
+                                */
+                               balance_dirty_pages_ratelimited(mapping);
+                       }
+               }
+
+               /* file_update_time outside page_lock */
+               if (vma->vm_file)
+                       file_update_time(vma->vm_file);
        }
        return ret;
 oom_free_new:
        page_cache_release(new_page);
 oom:
-       if (old_page)
+       if (old_page) {
+               if (page_mkwrite) {
+                       unlock_page(old_page);
+                       page_cache_release(old_page);
+               }
                page_cache_release(old_page);
+       }
        return VM_FAULT_OOM;
 
 unwritable_page:
        page_cache_release(old_page);
-       return VM_FAULT_SIGBUS;
+       return ret;
 }
 
 /*
@@ -2314,6 +2452,7 @@ static int do_swap_page(struct mm_struct *mm, struct vm_area_struct *vma,
        struct page *page;
        swp_entry_t entry;
        pte_t pte;
+       struct mem_cgroup *ptr = NULL;
        int ret = 0;
 
        if (!pte_unmap_same(mm, pmd, page_table, orig_pte))
@@ -2347,15 +2486,12 @@ static int do_swap_page(struct mm_struct *mm, struct vm_area_struct *vma,
                count_vm_event(PGMAJFAULT);
        }
 
-       mark_page_accessed(page);
-
        lock_page(page);
        delayacct_clear_flag(DELAYACCT_PF_SWAPIN);
 
-       if (mem_cgroup_charge(page, mm, GFP_KERNEL)) {
+       if (mem_cgroup_try_charge_swapin(mm, page, GFP_KERNEL, &ptr)) {
                ret = VM_FAULT_OOM;
-               unlock_page(page);
-               goto out;
+               goto out_page;
        }
 
        /*
@@ -2370,22 +2506,35 @@ static int do_swap_page(struct mm_struct *mm, struct vm_area_struct *vma,
                goto out_nomap;
        }
 
-       /* The page isn't present yet, go ahead with the fault. */
+       /*
+        * The page isn't present yet, go ahead with the fault.
+        *
+        * Be careful about the sequence of operations here.
+        * To get its accounting right, reuse_swap_page() must be called
+        * while the page is counted on swap but not yet in mapcount i.e.
+        * before page_add_anon_rmap() and swap_free(); try_to_free_swap()
+        * must be called after the swap_free(), or it will never succeed.
+        * Because delete_from_swap_page() may be called by reuse_swap_page(),
+        * mem_cgroup_commit_charge_swapin() may not be able to find swp_entry
+        * in page->private. In this case, a record in swap_cgroup  is silently
+        * discarded at swap_free().
+        */
 
        inc_mm_counter(mm, anon_rss);
        pte = mk_pte(page, vma->vm_page_prot);
-       if (write_access && can_share_swap_page(page)) {
+       if (write_access && reuse_swap_page(page)) {
                pte = maybe_mkwrite(pte_mkdirty(pte), vma);
                write_access = 0;
        }
-
        flush_icache_page(vma, page);
        set_pte_at(mm, address, page_table, pte);
        page_add_anon_rmap(page, vma, address);
+       /* It's better to call commit-charge after rmap is established */
+       mem_cgroup_commit_charge_swapin(page, ptr);
 
        swap_free(entry);
        if (vm_swap_full() || (vma->vm_flags & VM_LOCKED) || PageMlocked(page))
-               remove_exclusive_swap_page(page);
+               try_to_free_swap(page);
        unlock_page(page);
 
        if (write_access) {
@@ -2402,8 +2551,9 @@ unlock:
 out:
        return ret;
 out_nomap:
-       mem_cgroup_uncharge_page(page);
+       mem_cgroup_cancel_charge_swapin(ptr);
        pte_unmap_unlock(page_table, ptl);
+out_page:
        unlock_page(page);
        page_cache_release(page);
        return ret;
@@ -2432,7 +2582,7 @@ static int do_anonymous_page(struct mm_struct *mm, struct vm_area_struct *vma,
                goto oom;
        __SetPageUptodate(page);
 
-       if (mem_cgroup_charge(page, mm, GFP_KERNEL))
+       if (mem_cgroup_newpage_charge(page, mm, GFP_KERNEL))
                goto oom_free_page;
 
        entry = mk_pte(page, vma->vm_page_prot);
@@ -2442,8 +2592,6 @@ static int do_anonymous_page(struct mm_struct *mm, struct vm_area_struct *vma,
        if (!pte_none(*page_table))
                goto release;
        inc_mm_counter(mm, anon_rss);
-       SetPageSwapBacked(page);
-       lru_cache_add_active_or_unevictable(page, vma);
        page_add_new_anon_rmap(page, vma, address);
        set_pte_at(mm, address, page_table, entry);
 
@@ -2525,7 +2673,7 @@ static int __do_fault(struct mm_struct *mm, struct vm_area_struct *vma,
                                ret = VM_FAULT_OOM;
                                goto out;
                        }
-                       if (mem_cgroup_charge(page, mm, GFP_KERNEL)) {
+                       if (mem_cgroup_newpage_charge(page, mm, GFP_KERNEL)) {
                                ret = VM_FAULT_OOM;
                                page_cache_release(page);
                                goto out;
@@ -2546,25 +2694,25 @@ static int __do_fault(struct mm_struct *mm, struct vm_area_struct *vma,
                         * to become writable
                         */
                        if (vma->vm_ops->page_mkwrite) {
+                               int tmp;
+
                                unlock_page(page);
-                               if (vma->vm_ops->page_mkwrite(vma, page) < 0) {
-                                       ret = VM_FAULT_SIGBUS;
-                                       anon = 1; /* no anon but release vmf.page */
-                                       goto out_unlocked;
-                               }
-                               lock_page(page);
-                               /*
-                                * XXX: this is not quite right (racy vs
-                                * invalidate) to unlock and relock the page
-                                * like this, however a better fix requires
-                                * reworking page_mkwrite locking API, which
-                                * is better done later.
-                                */
-                               if (!page->mapping) {
-                                       ret = 0;
-                                       anon = 1; /* no anon but release vmf.page */
-                                       goto out;
+                               vmf.flags = FAULT_FLAG_WRITE|FAULT_FLAG_MKWRITE;
+                               tmp = vma->vm_ops->page_mkwrite(vma, &vmf);
+                               if (unlikely(tmp &
+                                         (VM_FAULT_ERROR | VM_FAULT_NOPAGE))) {
+                                       ret = tmp;
+                                       goto unwritable_page;
                                }
+                               if (unlikely(!(tmp & VM_FAULT_LOCKED))) {
+                                       lock_page(page);
+                                       if (!page->mapping) {
+                                               ret = 0; /* retry the fault */
+                                               unlock_page(page);
+                                               goto unwritable_page;
+                                       }
+                               } else
+                                       VM_BUG_ON(!PageLocked(page));
                                page_mkwrite = 1;
                        }
                }
@@ -2591,8 +2739,6 @@ static int __do_fault(struct mm_struct *mm, struct vm_area_struct *vma,
                        entry = maybe_mkwrite(pte_mkdirty(entry), vma);
                if (anon) {
                        inc_mm_counter(mm, anon_rss);
-                       SetPageSwapBacked(page);
-                       lru_cache_add_active_or_unevictable(page, vma);
                        page_add_new_anon_rmap(page, vma, address);
                } else {
                        inc_mm_counter(mm, file_rss);
@@ -2602,7 +2748,6 @@ static int __do_fault(struct mm_struct *mm, struct vm_area_struct *vma,
                                get_page(dirty_page);
                        }
                }
-//TODO:  is this safe?  do_anonymous_page() does it this way.
                set_pte_at(mm, address, page_table, entry);
 
                /* no need to invalidate: a not-present page won't be cached */
@@ -2619,19 +2764,35 @@ static int __do_fault(struct mm_struct *mm, struct vm_area_struct *vma,
        pte_unmap_unlock(page_table, ptl);
 
 out:
-       unlock_page(vmf.page);
-out_unlocked:
-       if (anon)
-               page_cache_release(vmf.page);
-       else if (dirty_page) {
-               if (vma->vm_file)
-                       file_update_time(vma->vm_file);
+       if (dirty_page) {
+               struct address_space *mapping = page->mapping;
 
-               set_page_dirty_balance(dirty_page, page_mkwrite);
+               if (set_page_dirty(dirty_page))
+                       page_mkwrite = 1;
+               unlock_page(dirty_page);
                put_page(dirty_page);
+               if (page_mkwrite && mapping) {
+                       /*
+                        * Some device drivers do not set page.mapping but still
+                        * dirty their pages
+                        */
+                       balance_dirty_pages_ratelimited(mapping);
+               }
+
+               /* file_update_time outside page_lock */
+               if (vma->vm_file)
+                       file_update_time(vma->vm_file);
+       } else {
+               unlock_page(vmf.page);
+               if (anon)
+                       page_cache_release(vmf.page);
        }
 
        return ret;
+
+unwritable_page:
+       page_cache_release(page);
+       return ret;
 }
 
 static int do_linear_fault(struct mm_struct *mm, struct vm_area_struct *vma,
@@ -2666,12 +2827,11 @@ static int do_nonlinear_fault(struct mm_struct *mm, struct vm_area_struct *vma,
        if (!pte_unmap_same(mm, pmd, page_table, orig_pte))
                return 0;
 
-       if (unlikely(!(vma->vm_flags & VM_NONLINEAR) ||
-                       !(vma->vm_flags & VM_CAN_NONLINEAR))) {
+       if (unlikely(!(vma->vm_flags & VM_NONLINEAR))) {
                /*
                 * Page table corrupted: show pte and kill process.
                 */
-               print_bad_pte(vma, orig_pte, address);
+               print_bad_pte(vma, address, orig_pte, NULL);
                return VM_FAULT_OOM;
        }
 
@@ -2953,7 +3113,7 @@ int generic_access_phys(struct vm_area_struct *vma, unsigned long addr,
 {
        resource_size_t phys_addr;
        unsigned long prot = 0;
-       void *maddr;
+       void __iomem *maddr;
        int offset = addr & (PAGE_SIZE-1);
 
        if (follow_phys(vma, addr, write, &prot, &phys_addr))
@@ -3079,6 +3239,15 @@ void print_vma_addr(char *prefix, unsigned long ip)
 #ifdef CONFIG_PROVE_LOCKING
 void might_fault(void)
 {
+       /*
+        * Some code (nfs/sunrpc) uses socket ops on kernel memory while
+        * holding the mmap_sem, this is safe because kernel memory doesn't
+        * get paged out, therefore we'll never actually fault, and the
+        * below annotations will generate false positives.
+        */
+       if (segment_eq(get_fs(), KERNEL_DS))
+               return;
+
        might_sleep();
        /*
         * it would be nicer only to annotate paths which are not under