hugetlb: modular state for hugetlb page size
[safe/jmp/linux-2.6] / mm / memory.c
index 9d073fa..c1c1d6d 100644 (file)
@@ -50,6 +50,7 @@
 #include <linux/delayacct.h>
 #include <linux/init.h>
 #include <linux/writeback.h>
+#include <linux/memcontrol.h>
 
 #include <asm/pgalloc.h>
 #include <asm/uaccess.h>
@@ -60,6 +61,8 @@
 #include <linux/swapops.h>
 #include <linux/elf.h>
 
+#include "internal.h"
+
 #ifndef CONFIG_NEED_MULTIPLE_NODES
 /* use the per-pgdat data instead for discontigmem - mbligh */
 unsigned long max_mapnr;
@@ -133,11 +136,9 @@ void pmd_clear_bad(pmd_t *pmd)
  */
 static void free_pte_range(struct mmu_gather *tlb, pmd_t *pmd)
 {
-       struct page *page = pmd_page(*pmd);
+       pgtable_t token = pmd_pgtable(*pmd);
        pmd_clear(pmd);
-       pte_lock_deinit(page);
-       pte_free_tlb(tlb, page);
-       dec_zone_page_state(page, NR_PAGETABLE);
+       pte_free_tlb(tlb, token);
        tlb->mm->nr_ptes--;
 }
 
@@ -212,7 +213,7 @@ static inline void free_pud_range(struct mmu_gather *tlb, pgd_t *pgd,
  *
  * Must be called with pagetable lock held.
  */
-void free_pgd_range(struct mmu_gather **tlb,
+void free_pgd_range(struct mmu_gather *tlb,
                        unsigned long addr, unsigned long end,
                        unsigned long floor, unsigned long ceiling)
 {
@@ -263,16 +264,16 @@ void free_pgd_range(struct mmu_gather **tlb,
                return;
 
        start = addr;
-       pgd = pgd_offset((*tlb)->mm, addr);
+       pgd = pgd_offset(tlb->mm, addr);
        do {
                next = pgd_addr_end(addr, end);
                if (pgd_none_or_clear_bad(pgd))
                        continue;
-               free_pud_range(*tlb, pgd, addr, next, floor, ceiling);
+               free_pud_range(tlb, pgd, addr, next, floor, ceiling);
        } while (pgd++, addr = next, addr != end);
 }
 
-void free_pgtables(struct mmu_gather **tlb, struct vm_area_struct *vma,
+void free_pgtables(struct mmu_gather *tlb, struct vm_area_struct *vma,
                unsigned long floor, unsigned long ceiling)
 {
        while (vma) {
@@ -308,21 +309,34 @@ void free_pgtables(struct mmu_gather **tlb, struct vm_area_struct *vma,
 
 int __pte_alloc(struct mm_struct *mm, pmd_t *pmd, unsigned long address)
 {
-       struct page *new = pte_alloc_one(mm, address);
+       pgtable_t new = pte_alloc_one(mm, address);
        if (!new)
                return -ENOMEM;
 
-       pte_lock_init(new);
+       /*
+        * Ensure all pte setup (eg. pte page lock and page clearing) are
+        * visible before the pte is made visible to other CPUs by being
+        * put into page tables.
+        *
+        * The other side of the story is the pointer chasing in the page
+        * table walking code (when walking the page table without locking;
+        * ie. most of the time). Fortunately, these data accesses consist
+        * of a chain of data-dependent loads, meaning most CPUs (alpha
+        * being the notable exception) will already guarantee loads are
+        * seen in-order. See the alpha page table accessors for the
+        * smp_read_barrier_depends() barriers in page table walking code.
+        */
+       smp_wmb(); /* Could be smp_wmb__xxx(before|after)_spin_lock */
+
        spin_lock(&mm->page_table_lock);
-       if (pmd_present(*pmd)) {        /* Another has populated it */
-               pte_lock_deinit(new);
-               pte_free(mm, new);
-       } else {
+       if (!pmd_present(*pmd)) {       /* Has another populated it ? */
                mm->nr_ptes++;
-               inc_zone_page_state(new, NR_PAGETABLE);
                pmd_populate(mm, pmd, new);
+               new = NULL;
        }
        spin_unlock(&mm->page_table_lock);
+       if (new)
+               pte_free(mm, new);
        return 0;
 }
 
@@ -332,12 +346,16 @@ int __pte_alloc_kernel(pmd_t *pmd, unsigned long address)
        if (!new)
                return -ENOMEM;
 
+       smp_wmb(); /* See comment in __pte_alloc */
+
        spin_lock(&init_mm.page_table_lock);
-       if (pmd_present(*pmd))          /* Another has populated it */
-               pte_free_kernel(&init_mm, new);
-       else
+       if (!pmd_present(*pmd)) {       /* Has another populated it ? */
                pmd_populate_kernel(&init_mm, pmd, new);
+               new = NULL;
+       }
        spin_unlock(&init_mm.page_table_lock);
+       if (new)
+               pte_free_kernel(&init_mm, new);
        return 0;
 }
 
@@ -372,57 +390,93 @@ static inline int is_cow_mapping(unsigned int flags)
 }
 
 /*
- * This function gets the "struct page" associated with a pte.
+ * vm_normal_page -- This function gets the "struct page" associated with a pte.
+ *
+ * "Special" mappings do not wish to be associated with a "struct page" (either
+ * it doesn't exist, or it exists but they don't want to touch it). In this
+ * case, NULL is returned here. "Normal" mappings do have a struct page.
  *
- * NOTE! Some mappings do not have "struct pages". A raw PFN mapping
- * will have each page table entry just pointing to a raw page frame
- * number, and as far as the VM layer is concerned, those do not have
- * pages associated with them - even if the PFN might point to memory
- * that otherwise is perfectly fine and has a "struct page".
+ * There are 2 broad cases. Firstly, an architecture may define a pte_special()
+ * pte bit, in which case this function is trivial. Secondly, an architecture
+ * may not have a spare pte bit, which requires a more complicated scheme,
+ * described below.
  *
- * The way we recognize those mappings is through the rules set up
- * by "remap_pfn_range()": the vma will have the VM_PFNMAP bit set,
- * and the vm_pgoff will point to the first PFN mapped: thus every
- * page that is a raw mapping will always honor the rule
+ * A raw VM_PFNMAP mapping (ie. one that is not COWed) is always considered a
+ * special mapping (even if there are underlying and valid "struct pages").
+ * COWed pages of a VM_PFNMAP are always normal.
+ *
+ * The way we recognize COWed pages within VM_PFNMAP mappings is through the
+ * rules set up by "remap_pfn_range()": the vma will have the VM_PFNMAP bit
+ * set, and the vm_pgoff will point to the first PFN mapped: thus every special
+ * mapping will always honor the rule
  *
  *     pfn_of_page == vma->vm_pgoff + ((addr - vma->vm_start) >> PAGE_SHIFT)
  *
- * and if that isn't true, the page has been COW'ed (in which case it
- * _does_ have a "struct page" associated with it even if it is in a
- * VM_PFNMAP range).
+ * And for normal mappings this is false.
+ *
+ * This restricts such mappings to be a linear translation from virtual address
+ * to pfn. To get around this restriction, we allow arbitrary mappings so long
+ * as the vma is not a COW mapping; in that case, we know that all ptes are
+ * special (because none can have been COWed).
+ *
+ *
+ * In order to support COW of arbitrary special mappings, we have VM_MIXEDMAP.
+ *
+ * VM_MIXEDMAP mappings can likewise contain memory with or without "struct
+ * page" backing, however the difference is that _all_ pages with a struct
+ * page (that is, those where pfn_valid is true) are refcounted and considered
+ * normal pages by the VM. The disadvantage is that pages are refcounted
+ * (which can be slower and simply not an option for some PFNMAP users). The
+ * advantage is that we don't have to follow the strict linearity rule of
+ * PFNMAP mappings in order to support COWable mappings.
+ *
  */
-struct page *vm_normal_page(struct vm_area_struct *vma, unsigned long addr, pte_t pte)
+#ifdef __HAVE_ARCH_PTE_SPECIAL
+# define HAVE_PTE_SPECIAL 1
+#else
+# define HAVE_PTE_SPECIAL 0
+#endif
+struct page *vm_normal_page(struct vm_area_struct *vma, unsigned long addr,
+                               pte_t pte)
 {
-       unsigned long pfn = pte_pfn(pte);
+       unsigned long pfn;
 
-       if (unlikely(vma->vm_flags & VM_PFNMAP)) {
-               unsigned long off = (addr - vma->vm_start) >> PAGE_SHIFT;
-               if (pfn == vma->vm_pgoff + off)
-                       return NULL;
-               if (!is_cow_mapping(vma->vm_flags))
-                       return NULL;
+       if (HAVE_PTE_SPECIAL) {
+               if (likely(!pte_special(pte))) {
+                       VM_BUG_ON(!pfn_valid(pte_pfn(pte)));
+                       return pte_page(pte);
+               }
+               VM_BUG_ON(!(vma->vm_flags & (VM_PFNMAP | VM_MIXEDMAP)));
+               return NULL;
        }
 
-#ifdef CONFIG_DEBUG_VM
-       /*
-        * Add some anal sanity checks for now. Eventually,
-        * we should just do "return pfn_to_page(pfn)", but
-        * in the meantime we check that we get a valid pfn,
-        * and that the resulting page looks ok.
-        */
-       if (unlikely(!pfn_valid(pfn))) {
-               print_bad_pte(vma, pte, addr);
-               return NULL;
+       /* !HAVE_PTE_SPECIAL case follows: */
+
+       pfn = pte_pfn(pte);
+
+       if (unlikely(vma->vm_flags & (VM_PFNMAP|VM_MIXEDMAP))) {
+               if (vma->vm_flags & VM_MIXEDMAP) {
+                       if (!pfn_valid(pfn))
+                               return NULL;
+                       goto out;
+               } else {
+                       unsigned long off;
+                       off = (addr - vma->vm_start) >> PAGE_SHIFT;
+                       if (pfn == vma->vm_pgoff + off)
+                               return NULL;
+                       if (!is_cow_mapping(vma->vm_flags))
+                               return NULL;
+               }
        }
-#endif
+
+       VM_BUG_ON(!pfn_valid(pfn));
 
        /*
-        * NOTE! We still have PageReserved() pages in the page 
-        * tables. 
+        * NOTE! We still have PageReserved() pages in the page tables.
         *
-        * The PAGE_ZERO() pages and various VDSO mappings can
-        * cause them to exist.
+        * eg. VDSO mappings can cause them to exist.
         */
+out:
        return pfn_to_page(pfn);
 }
 
@@ -847,9 +901,9 @@ unsigned long unmap_vmas(struct mmu_gather **tlbp,
                        }
 
                        if (unlikely(is_vm_hugetlb_page(vma))) {
-                               unmap_hugepage_range(vma, start, end);
+                               unmap_hugepage_range(vma, start, end, NULL);
                                zap_work -= (end - start) /
-                                               (HPAGE_SIZE / PAGE_SIZE);
+                                       pages_per_huge_page(hstate_vma(vma));
                                start = end;
                        } else
                                start = unmap_page_range(*tlbp, vma,
@@ -934,7 +988,7 @@ struct page *follow_page(struct vm_area_struct *vma, unsigned long address,
                goto no_page_table;
        
        pmd = pmd_offset(pud, address);
-       if (pmd_none(*pmd) || unlikely(pmd_bad(*pmd)))
+       if (pmd_none(*pmd))
                goto no_page_table;
 
        if (pmd_huge(*pmd)) {
@@ -943,18 +997,19 @@ struct page *follow_page(struct vm_area_struct *vma, unsigned long address,
                goto out;
        }
 
+       if (unlikely(pmd_bad(*pmd)))
+               goto no_page_table;
+
        ptep = pte_offset_map_lock(mm, pmd, address, &ptl);
-       if (!ptep)
-               goto out;
 
        pte = *ptep;
        if (!pte_present(pte))
-               goto unlock;
+               goto no_page;
        if ((flags & FOLL_WRITE) && !pte_write(pte))
                goto unlock;
        page = vm_normal_page(vma, address, pte);
        if (unlikely(!page))
-               goto unlock;
+               goto bad_page;
 
        if (flags & FOLL_GET)
                get_page(page);
@@ -969,6 +1024,15 @@ unlock:
 out:
        return page;
 
+bad_page:
+       pte_unmap_unlock(ptep, ptl);
+       return ERR_PTR(-EFAULT);
+
+no_page:
+       pte_unmap_unlock(ptep, ptl);
+       if (!pte_none(pte))
+               return page;
+       /* Fall through to ZERO_PAGE handling */
 no_page_table:
        /*
         * When core dumping an enormous anonymous area that nobody
@@ -983,6 +1047,24 @@ no_page_table:
        return page;
 }
 
+/* Can we do the FOLL_ANON optimization? */
+static inline int use_zero_page(struct vm_area_struct *vma)
+{
+       /*
+        * We don't want to optimize FOLL_ANON for make_pages_present()
+        * when it tries to page in a VM_LOCKED region. As to VM_SHARED,
+        * we want to get the page from the page tables to make sure
+        * that we serialize and update with any other user of that
+        * mapping.
+        */
+       if (vma->vm_flags & (VM_LOCKED | VM_SHARED))
+               return 0;
+       /*
+        * And if we have a fault routine, it's not an anonymous region.
+        */
+       return !vma->vm_ops || !vma->vm_ops->fault;
+}
+
 int get_user_pages(struct task_struct *tsk, struct mm_struct *mm,
                unsigned long start, int len, int write, int force,
                struct page **pages, struct vm_area_struct **vmas)
@@ -990,6 +1072,8 @@ int get_user_pages(struct task_struct *tsk, struct mm_struct *mm,
        int i;
        unsigned int vm_flags;
 
+       if (len <= 0)
+               return 0;
        /* 
         * Require read or write permissions.
         * If 'force' is set, we only require the "MAY" flags.
@@ -1055,9 +1139,7 @@ int get_user_pages(struct task_struct *tsk, struct mm_struct *mm,
                foll_flags = FOLL_TOUCH;
                if (pages)
                        foll_flags |= FOLL_GET;
-               if (!write && !(vma->vm_flags & VM_LOCKED) &&
-                   (!vma->vm_ops || (!vma->vm_ops->nopage &&
-                                       !vma->vm_ops->fault)))
+               if (!write && use_zero_page(vma))
                        foll_flags |= FOLL_ANON;
 
                do {
@@ -1069,7 +1151,7 @@ int get_user_pages(struct task_struct *tsk, struct mm_struct *mm,
                         * be processed until returning to user space.
                         */
                        if (unlikely(test_tsk_thread_flag(tsk, TIF_MEMDIE)))
-                               return -ENOMEM;
+                               return i ? i : -ENOMEM;
 
                        if (write)
                                foll_flags |= FOLL_WRITE;
@@ -1103,6 +1185,8 @@ int get_user_pages(struct task_struct *tsk, struct mm_struct *mm,
 
                                cond_resched();
                        }
+                       if (IS_ERR(page))
+                               return i ? i : PTR_ERR(page);
                        if (pages) {
                                pages[i] = page;
 
@@ -1140,20 +1224,26 @@ pte_t *get_locked_pte(struct mm_struct *mm, unsigned long addr,
  * old drivers should use this, and they needed to mark their
  * pages reserved for the old functions anyway.
  */
-static int insert_page(struct mm_struct *mm, unsigned long addr, struct page *page, pgprot_t prot)
+static int insert_page(struct vm_area_struct *vma, unsigned long addr,
+                       struct page *page, pgprot_t prot)
 {
+       struct mm_struct *mm = vma->vm_mm;
        int retval;
        pte_t *pte;
-       spinlock_t *ptl;  
+       spinlock_t *ptl;
+
+       retval = mem_cgroup_charge(page, mm, GFP_KERNEL);
+       if (retval)
+               goto out;
 
        retval = -EINVAL;
        if (PageAnon(page))
-               goto out;
+               goto out_uncharge;
        retval = -ENOMEM;
        flush_dcache_page(page);
        pte = get_locked_pte(mm, addr, &ptl);
        if (!pte)
-               goto out;
+               goto out_uncharge;
        retval = -EBUSY;
        if (!pte_none(*pte))
                goto out_unlock;
@@ -1165,8 +1255,12 @@ static int insert_page(struct mm_struct *mm, unsigned long addr, struct page *pa
        set_pte_at(mm, addr, pte, mk_pte(page, prot));
 
        retval = 0;
+       pte_unmap_unlock(pte, ptl);
+       return retval;
 out_unlock:
        pte_unmap_unlock(pte, ptl);
+out_uncharge:
+       mem_cgroup_uncharge_page(page);
 out:
        return retval;
 }
@@ -1193,40 +1287,26 @@ out:
  *
  * The page does not need to be reserved.
  */
-int vm_insert_page(struct vm_area_struct *vma, unsigned long addr, struct page *page)
+int vm_insert_page(struct vm_area_struct *vma, unsigned long addr,
+                       struct page *page)
 {
        if (addr < vma->vm_start || addr >= vma->vm_end)
                return -EFAULT;
        if (!page_count(page))
                return -EINVAL;
        vma->vm_flags |= VM_INSERTPAGE;
-       return insert_page(vma->vm_mm, addr, page, vma->vm_page_prot);
+       return insert_page(vma, addr, page, vma->vm_page_prot);
 }
 EXPORT_SYMBOL(vm_insert_page);
 
-/**
- * vm_insert_pfn - insert single pfn into user vma
- * @vma: user vma to map to
- * @addr: target user address of this page
- * @pfn: source kernel pfn
- *
- * Similar to vm_inert_page, this allows drivers to insert individual pages
- * they've allocated into a user vma. Same comments apply.
- *
- * This function should only be called from a vm_ops->fault handler, and
- * in that case the handler should return NULL.
- */
-int vm_insert_pfn(struct vm_area_struct *vma, unsigned long addr,
-               unsigned long pfn)
+static int insert_pfn(struct vm_area_struct *vma, unsigned long addr,
+                       unsigned long pfn, pgprot_t prot)
 {
        struct mm_struct *mm = vma->vm_mm;
        int retval;
        pte_t *pte, entry;
        spinlock_t *ptl;
 
-       BUG_ON(!(vma->vm_flags & VM_PFNMAP));
-       BUG_ON(is_cow_mapping(vma->vm_flags));
-
        retval = -ENOMEM;
        pte = get_locked_pte(mm, addr, &ptl);
        if (!pte)
@@ -1236,19 +1316,79 @@ int vm_insert_pfn(struct vm_area_struct *vma, unsigned long addr,
                goto out_unlock;
 
        /* Ok, finally just insert the thing.. */
-       entry = pfn_pte(pfn, vma->vm_page_prot);
+       entry = pte_mkspecial(pfn_pte(pfn, prot));
        set_pte_at(mm, addr, pte, entry);
-       update_mmu_cache(vma, addr, entry);
+       update_mmu_cache(vma, addr, entry); /* XXX: why not for insert_page? */
 
        retval = 0;
 out_unlock:
        pte_unmap_unlock(pte, ptl);
-
 out:
        return retval;
 }
+
+/**
+ * vm_insert_pfn - insert single pfn into user vma
+ * @vma: user vma to map to
+ * @addr: target user address of this page
+ * @pfn: source kernel pfn
+ *
+ * Similar to vm_inert_page, this allows drivers to insert individual pages
+ * they've allocated into a user vma. Same comments apply.
+ *
+ * This function should only be called from a vm_ops->fault handler, and
+ * in that case the handler should return NULL.
+ *
+ * vma cannot be a COW mapping.
+ *
+ * As this is called only for pages that do not currently exist, we
+ * do not need to flush old virtual caches or the TLB.
+ */
+int vm_insert_pfn(struct vm_area_struct *vma, unsigned long addr,
+                       unsigned long pfn)
+{
+       /*
+        * Technically, architectures with pte_special can avoid all these
+        * restrictions (same for remap_pfn_range).  However we would like
+        * consistency in testing and feature parity among all, so we should
+        * try to keep these invariants in place for everybody.
+        */
+       BUG_ON(!(vma->vm_flags & (VM_PFNMAP|VM_MIXEDMAP)));
+       BUG_ON((vma->vm_flags & (VM_PFNMAP|VM_MIXEDMAP)) ==
+                                               (VM_PFNMAP|VM_MIXEDMAP));
+       BUG_ON((vma->vm_flags & VM_PFNMAP) && is_cow_mapping(vma->vm_flags));
+       BUG_ON((vma->vm_flags & VM_MIXEDMAP) && pfn_valid(pfn));
+
+       if (addr < vma->vm_start || addr >= vma->vm_end)
+               return -EFAULT;
+       return insert_pfn(vma, addr, pfn, vma->vm_page_prot);
+}
 EXPORT_SYMBOL(vm_insert_pfn);
 
+int vm_insert_mixed(struct vm_area_struct *vma, unsigned long addr,
+                       unsigned long pfn)
+{
+       BUG_ON(!(vma->vm_flags & VM_MIXEDMAP));
+
+       if (addr < vma->vm_start || addr >= vma->vm_end)
+               return -EFAULT;
+
+       /*
+        * If we don't have pte special, then we have to use the pfn_valid()
+        * based VM_MIXEDMAP scheme (see vm_normal_page), and thus we *must*
+        * refcount the page if pfn_valid is true (hence insert_page rather
+        * than insert_pfn).
+        */
+       if (!HAVE_PTE_SPECIAL && pfn_valid(pfn)) {
+               struct page *page;
+
+               page = pfn_to_page(pfn);
+               return insert_page(vma, addr, page, vma->vm_page_prot);
+       }
+       return insert_pfn(vma, addr, pfn, vma->vm_page_prot);
+}
+EXPORT_SYMBOL(vm_insert_mixed);
+
 /*
  * maps a range of physical memory into the requested pages. the old
  * mappings are removed. any references to nonexistent pages results
@@ -1267,7 +1407,7 @@ static int remap_pte_range(struct mm_struct *mm, pmd_t *pmd,
        arch_enter_lazy_mmu_mode();
        do {
                BUG_ON(!pte_none(*pte));
-               set_pte_at(mm, addr, pte, pfn_pte(pfn, prot));
+               set_pte_at(mm, addr, pte, pte_mkspecial(pfn_pte(pfn, prot)));
                pfn++;
        } while (pte++, addr += PAGE_SIZE, addr != end);
        arch_leave_lazy_mmu_mode();
@@ -1381,7 +1521,7 @@ static int apply_to_pte_range(struct mm_struct *mm, pmd_t *pmd,
 {
        pte_t *pte;
        int err;
-       struct page *pmd_page;
+       pgtable_t token;
        spinlock_t *uninitialized_var(ptl);
 
        pte = (mm == &init_mm) ?
@@ -1392,10 +1532,10 @@ static int apply_to_pte_range(struct mm_struct *mm, pmd_t *pmd,
 
        BUG_ON(pmd_huge(*pmd));
 
-       pmd_page = pmd_page(*pmd);
+       token = pmd_pgtable(*pmd);
 
        do {
-               err = fn(pte, pmd_page, addr, data);
+               err = fn(pte, token, addr, data);
                if (err)
                        break;
        } while (pte++, addr += PAGE_SIZE, addr != end);
@@ -1562,8 +1702,19 @@ static int do_wp_page(struct mm_struct *mm, struct vm_area_struct *vma,
        struct page *dirty_page = NULL;
 
        old_page = vm_normal_page(vma, address, orig_pte);
-       if (!old_page)
+       if (!old_page) {
+               /*
+                * VM_MIXEDMAP !pfn_valid() case
+                *
+                * We should not cow pages in a shared writeable mapping.
+                * Just mark the pages writable as we can't do any dirty
+                * accounting on raw pfn maps.
+                */
+               if ((vma->vm_flags & (VM_WRITE|VM_SHARED)) ==
+                                    (VM_WRITE|VM_SHARED))
+                       goto reuse;
                goto gotten;
+       }
 
        /*
         * Take out anonymous pages first, anonymous shared vmas are
@@ -1616,6 +1767,7 @@ static int do_wp_page(struct mm_struct *mm, struct vm_area_struct *vma,
        }
 
        if (reuse) {
+reuse:
                flush_cache_page(vma, address, pte_pfn(orig_pte));
                entry = pte_mkyoung(orig_pte);
                entry = maybe_mkwrite(pte_mkdirty(entry), vma);
@@ -1641,13 +1793,15 @@ gotten:
        cow_user_page(new_page, old_page, address, vma);
        __SetPageUptodate(new_page);
 
+       if (mem_cgroup_charge(new_page, mm, GFP_KERNEL))
+               goto oom_free_new;
+
        /*
         * Re-check the pte - we dropped the lock
         */
        page_table = pte_offset_map_lock(mm, pmd, address, &ptl);
        if (likely(pte_same(*page_table, orig_pte))) {
                if (old_page) {
-                       page_remove_rmap(old_page, vma);
                        if (!PageAnon(old_page)) {
                                dec_mm_counter(mm, file_rss);
                                inc_mm_counter(mm, anon_rss);
@@ -1669,10 +1823,38 @@ gotten:
                lru_cache_add_active(new_page);
                page_add_new_anon_rmap(new_page, vma, address);
 
+               if (old_page) {
+                       /*
+                        * Only after switching the pte to the new page may
+                        * we remove the mapcount here. Otherwise another
+                        * process may come and find the rmap count decremented
+                        * before the pte is switched to the new page, and
+                        * "reuse" the old page writing into it while our pte
+                        * here still points into it and can be read by other
+                        * threads.
+                        *
+                        * The critical issue is to order this
+                        * page_remove_rmap with the ptp_clear_flush above.
+                        * Those stores are ordered by (if nothing else,)
+                        * the barrier present in the atomic_add_negative
+                        * in page_remove_rmap.
+                        *
+                        * Then the TLB flush in ptep_clear_flush ensures that
+                        * no process can access the old page before the
+                        * decremented mapcount is visible. And the old page
+                        * cannot be reused until after the decremented
+                        * mapcount is visible. So transitively, TLBs to
+                        * old page will be flushed before it can be reused.
+                        */
+                       page_remove_rmap(old_page, vma);
+               }
+
                /* Free the old page.. */
                new_page = old_page;
                ret |= VM_FAULT_WRITE;
-       }
+       } else
+               mem_cgroup_uncharge_page(new_page);
+
        if (new_page)
                page_cache_release(new_page);
        if (old_page)
@@ -1696,6 +1878,8 @@ unlock:
                put_page(dirty_page);
        }
        return ret;
+oom_free_new:
+       page_cache_release(new_page);
 oom:
        if (old_page)
                page_cache_release(old_page);
@@ -2036,6 +2220,12 @@ static int do_swap_page(struct mm_struct *mm, struct vm_area_struct *vma,
                count_vm_event(PGMAJFAULT);
        }
 
+       if (mem_cgroup_charge(page, mm, GFP_KERNEL)) {
+               delayacct_clear_flag(DELAYACCT_PF_SWAPIN);
+               ret = VM_FAULT_OOM;
+               goto out;
+       }
+
        mark_page_accessed(page);
        lock_page(page);
        delayacct_clear_flag(DELAYACCT_PF_SWAPIN);
@@ -2071,10 +2261,9 @@ static int do_swap_page(struct mm_struct *mm, struct vm_area_struct *vma,
        unlock_page(page);
 
        if (write_access) {
-               /* XXX: We could OR the do_wp_page code with this one? */
-               if (do_wp_page(mm, vma, address,
-                               page_table, pmd, ptl, pte) & VM_FAULT_OOM)
-                       ret = VM_FAULT_OOM;
+               ret |= do_wp_page(mm, vma, address, page_table, pmd, ptl, pte);
+               if (ret & VM_FAULT_ERROR)
+                       ret &= VM_FAULT_ERROR;
                goto out;
        }
 
@@ -2085,6 +2274,7 @@ unlock:
 out:
        return ret;
 out_nomap:
+       mem_cgroup_uncharge_page(page);
        pte_unmap_unlock(page_table, ptl);
        unlock_page(page);
        page_cache_release(page);
@@ -2114,6 +2304,9 @@ static int do_anonymous_page(struct mm_struct *mm, struct vm_area_struct *vma,
                goto oom;
        __SetPageUptodate(page);
 
+       if (mem_cgroup_charge(page, mm, GFP_KERNEL))
+               goto oom_free_page;
+
        entry = mk_pte(page, vma->vm_page_prot);
        entry = maybe_mkwrite(pte_mkdirty(entry), vma);
 
@@ -2131,8 +2324,11 @@ unlock:
        pte_unmap_unlock(page_table, ptl);
        return 0;
 release:
+       mem_cgroup_uncharge_page(page);
        page_cache_release(page);
        goto unlock;
+oom_free_page:
+       page_cache_release(page);
 oom:
        return VM_FAULT_OOM;
 }
@@ -2169,22 +2365,9 @@ static int __do_fault(struct mm_struct *mm, struct vm_area_struct *vma,
        vmf.flags = flags;
        vmf.page = NULL;
 
-       BUG_ON(vma->vm_flags & VM_PFNMAP);
-
-       if (likely(vma->vm_ops->fault)) {
-               ret = vma->vm_ops->fault(vma, &vmf);
-               if (unlikely(ret & (VM_FAULT_ERROR | VM_FAULT_NOPAGE)))
-                       return ret;
-       } else {
-               /* Legacy ->nopage path */
-               ret = 0;
-               vmf.page = vma->vm_ops->nopage(vma, address & PAGE_MASK, &ret);
-               /* no page was available -- either SIGBUS or OOM */
-               if (unlikely(vmf.page == NOPAGE_SIGBUS))
-                       return VM_FAULT_SIGBUS;
-               else if (unlikely(vmf.page == NOPAGE_OOM))
-                       return VM_FAULT_OOM;
-       }
+       ret = vma->vm_ops->fault(vma, &vmf);
+       if (unlikely(ret & (VM_FAULT_ERROR | VM_FAULT_NOPAGE)))
+               return ret;
 
        /*
         * For consistency in subsequent calls, make the faulted page always
@@ -2246,6 +2429,11 @@ static int __do_fault(struct mm_struct *mm, struct vm_area_struct *vma,
 
        }
 
+       if (mem_cgroup_charge(page, mm, GFP_KERNEL)) {
+               ret = VM_FAULT_OOM;
+               goto out;
+       }
+
        page_table = pte_offset_map_lock(mm, pmd, address, &ptl);
 
        /*
@@ -2281,6 +2469,7 @@ static int __do_fault(struct mm_struct *mm, struct vm_area_struct *vma,
                /* no need to invalidate: a not-present page won't be cached */
                update_mmu_cache(vma, address, entry);
        } else {
+               mem_cgroup_uncharge_page(page);
                if (anon)
                        page_cache_release(page);
                else
@@ -2317,56 +2506,6 @@ static int do_linear_fault(struct mm_struct *mm, struct vm_area_struct *vma,
        return __do_fault(mm, vma, address, pmd, pgoff, flags, orig_pte);
 }
 
-
-/*
- * do_no_pfn() tries to create a new page mapping for a page without
- * a struct_page backing it
- *
- * As this is called only for pages that do not currently exist, we
- * do not need to flush old virtual caches or the TLB.
- *
- * We enter with non-exclusive mmap_sem (to exclude vma changes,
- * but allow concurrent faults), and pte mapped but not yet locked.
- * We return with mmap_sem still held, but pte unmapped and unlocked.
- *
- * It is expected that the ->nopfn handler always returns the same pfn
- * for a given virtual mapping.
- *
- * Mark this `noinline' to prevent it from bloating the main pagefault code.
- */
-static noinline int do_no_pfn(struct mm_struct *mm, struct vm_area_struct *vma,
-                    unsigned long address, pte_t *page_table, pmd_t *pmd,
-                    int write_access)
-{
-       spinlock_t *ptl;
-       pte_t entry;
-       unsigned long pfn;
-
-       pte_unmap(page_table);
-       BUG_ON(!(vma->vm_flags & VM_PFNMAP));
-       BUG_ON(is_cow_mapping(vma->vm_flags));
-
-       pfn = vma->vm_ops->nopfn(vma, address & PAGE_MASK);
-       if (unlikely(pfn == NOPFN_OOM))
-               return VM_FAULT_OOM;
-       else if (unlikely(pfn == NOPFN_SIGBUS))
-               return VM_FAULT_SIGBUS;
-       else if (unlikely(pfn == NOPFN_REFAULT))
-               return 0;
-
-       page_table = pte_offset_map_lock(mm, pmd, address, &ptl);
-
-       /* Only go through if we didn't race with anybody else... */
-       if (pte_none(*page_table)) {
-               entry = pfn_pte(pfn, vma->vm_page_prot);
-               if (write_access)
-                       entry = maybe_mkwrite(pte_mkdirty(entry), vma);
-               set_pte_at(mm, address, page_table, entry);
-       }
-       pte_unmap_unlock(page_table, ptl);
-       return 0;
-}
-
 /*
  * Fault of a previously existing named mapping. Repopulate the pte
  * from the encoded file_pte if possible. This enables swappable
@@ -2424,12 +2563,9 @@ static inline int handle_pte_fault(struct mm_struct *mm,
        if (!pte_present(entry)) {
                if (pte_none(entry)) {
                        if (vma->vm_ops) {
-                               if (vma->vm_ops->fault || vma->vm_ops->nopage)
+                               if (likely(vma->vm_ops->fault))
                                        return do_linear_fault(mm, vma, address,
                                                pte, pmd, write_access, entry);
-                               if (unlikely(vma->vm_ops->nopfn))
-                                       return do_no_pfn(mm, vma, address, pte,
-                                                        pmd, write_access);
                        }
                        return do_anonymous_page(mm, vma, address,
                                                 pte, pmd, write_access);
@@ -2512,6 +2648,8 @@ int __pud_alloc(struct mm_struct *mm, pgd_t *pgd, unsigned long address)
        if (!new)
                return -ENOMEM;
 
+       smp_wmb(); /* See comment in __pte_alloc */
+
        spin_lock(&mm->page_table_lock);
        if (pgd_present(*pgd))          /* Another has populated it */
                pud_free(mm, new);
@@ -2533,6 +2671,8 @@ int __pmd_alloc(struct mm_struct *mm, pud_t *pud, unsigned long address)
        if (!new)
                return -ENOMEM;
 
+       smp_wmb(); /* See comment in __pte_alloc */
+
        spin_lock(&mm->page_table_lock);
 #ifndef __ARCH_HAS_4LEVEL_HACK
        if (pud_present(*pud))          /* Another has populated it */
@@ -2613,6 +2753,86 @@ int in_gate_area_no_task(unsigned long addr)
 
 #endif /* __HAVE_ARCH_GATE_AREA */
 
+#ifdef CONFIG_HAVE_IOREMAP_PROT
+static resource_size_t follow_phys(struct vm_area_struct *vma,
+                       unsigned long address, unsigned int flags,
+                       unsigned long *prot)
+{
+       pgd_t *pgd;
+       pud_t *pud;
+       pmd_t *pmd;
+       pte_t *ptep, pte;
+       spinlock_t *ptl;
+       resource_size_t phys_addr = 0;
+       struct mm_struct *mm = vma->vm_mm;
+
+       VM_BUG_ON(!(vma->vm_flags & (VM_IO | VM_PFNMAP)));
+
+       pgd = pgd_offset(mm, address);
+       if (pgd_none(*pgd) || unlikely(pgd_bad(*pgd)))
+               goto no_page_table;
+
+       pud = pud_offset(pgd, address);
+       if (pud_none(*pud) || unlikely(pud_bad(*pud)))
+               goto no_page_table;
+
+       pmd = pmd_offset(pud, address);
+       if (pmd_none(*pmd) || unlikely(pmd_bad(*pmd)))
+               goto no_page_table;
+
+       /* We cannot handle huge page PFN maps. Luckily they don't exist. */
+       if (pmd_huge(*pmd))
+               goto no_page_table;
+
+       ptep = pte_offset_map_lock(mm, pmd, address, &ptl);
+       if (!ptep)
+               goto out;
+
+       pte = *ptep;
+       if (!pte_present(pte))
+               goto unlock;
+       if ((flags & FOLL_WRITE) && !pte_write(pte))
+               goto unlock;
+       phys_addr = pte_pfn(pte);
+       phys_addr <<= PAGE_SHIFT; /* Shift here to avoid overflow on PAE */
+
+       *prot = pgprot_val(pte_pgprot(pte));
+
+unlock:
+       pte_unmap_unlock(ptep, ptl);
+out:
+       return phys_addr;
+no_page_table:
+       return 0;
+}
+
+int generic_access_phys(struct vm_area_struct *vma, unsigned long addr,
+                       void *buf, int len, int write)
+{
+       resource_size_t phys_addr;
+       unsigned long prot = 0;
+       void *maddr;
+       int offset = addr & (PAGE_SIZE-1);
+
+       if (!(vma->vm_flags & (VM_IO | VM_PFNMAP)))
+               return -EINVAL;
+
+       phys_addr = follow_phys(vma, addr, write, &prot);
+
+       if (!phys_addr)
+               return -EINVAL;
+
+       maddr = ioremap_prot(phys_addr, PAGE_SIZE, prot);
+       if (write)
+               memcpy_toio(maddr + offset, buf, len);
+       else
+               memcpy_fromio(buf, maddr + offset, len);
+       iounmap(maddr);
+
+       return len;
+}
+#endif
+
 /*
  * Access another process' address space.
  * Source/target buffer must be kernel space,
@@ -2622,7 +2842,6 @@ int access_process_vm(struct task_struct *tsk, unsigned long addr, void *buf, in
 {
        struct mm_struct *mm;
        struct vm_area_struct *vma;
-       struct page *page;
        void *old_buf = buf;
 
        mm = get_task_mm(tsk);
@@ -2634,28 +2853,44 @@ int access_process_vm(struct task_struct *tsk, unsigned long addr, void *buf, in
        while (len) {
                int bytes, ret, offset;
                void *maddr;
+               struct page *page = NULL;
 
                ret = get_user_pages(tsk, mm, addr, 1,
                                write, 1, &page, &vma);
-               if (ret <= 0)
-                       break;
-
-               bytes = len;
-               offset = addr & (PAGE_SIZE-1);
-               if (bytes > PAGE_SIZE-offset)
-                       bytes = PAGE_SIZE-offset;
-
-               maddr = kmap(page);
-               if (write) {
-                       copy_to_user_page(vma, page, addr,
-                                         maddr + offset, buf, bytes);
-                       set_page_dirty_lock(page);
+               if (ret <= 0) {
+                       /*
+                        * Check if this is a VM_IO | VM_PFNMAP VMA, which
+                        * we can access using slightly different code.
+                        */
+#ifdef CONFIG_HAVE_IOREMAP_PROT
+                       vma = find_vma(mm, addr);
+                       if (!vma)
+                               break;
+                       if (vma->vm_ops && vma->vm_ops->access)
+                               ret = vma->vm_ops->access(vma, addr, buf,
+                                                         len, write);
+                       if (ret <= 0)
+#endif
+                               break;
+                       bytes = ret;
                } else {
-                       copy_from_user_page(vma, page, addr,
-                                           buf, maddr + offset, bytes);
+                       bytes = len;
+                       offset = addr & (PAGE_SIZE-1);
+                       if (bytes > PAGE_SIZE-offset)
+                               bytes = PAGE_SIZE-offset;
+
+                       maddr = kmap(page);
+                       if (write) {
+                               copy_to_user_page(vma, page, addr,
+                                                 maddr + offset, buf, bytes);
+                               set_page_dirty_lock(page);
+                       } else {
+                               copy_from_user_page(vma, page, addr,
+                                                   buf, maddr + offset, bytes);
+                       }
+                       kunmap(page);
+                       page_cache_release(page);
                }
-               kunmap(page);
-               page_cache_release(page);
                len -= bytes;
                buf += bytes;
                addr += bytes;
@@ -2674,6 +2909,13 @@ void print_vma_addr(char *prefix, unsigned long ip)
        struct mm_struct *mm = current->mm;
        struct vm_area_struct *vma;
 
+       /*
+        * Do not print if we are in atomic
+        * contexts (in exception stacks, etc.):
+        */
+       if (preempt_count())
+               return;
+
        down_read(&mm->mmap_sem);
        vma = find_vma(mm, ip);
        if (vma && vma->vm_file) {
@@ -2682,7 +2924,7 @@ void print_vma_addr(char *prefix, unsigned long ip)
                if (buf) {
                        char *p, *s;
 
-                       p = d_path(f->f_dentry, f->f_vfsmnt, buf, PAGE_SIZE);
+                       p = d_path(&f->f_path, buf, PAGE_SIZE);
                        if (IS_ERR(p))
                                p = "?";
                        s = strrchr(p, '/');