[PATCH] unpaged: VM_UNPAGED
[safe/jmp/linux-2.6] / mm / memory.c
index 4bdd118..ece0496 100644 (file)
@@ -114,6 +114,7 @@ static void free_pte_range(struct mmu_gather *tlb, pmd_t *pmd)
 {
        struct page *page = pmd_page(*pmd);
        pmd_clear(pmd);
+       pte_lock_deinit(page);
        pte_free_tlb(tlb, page);
        dec_page_state(nr_page_table_pages);
        tlb->mm->nr_ptes--;
@@ -260,6 +261,12 @@ void free_pgtables(struct mmu_gather **tlb, struct vm_area_struct *vma,
                struct vm_area_struct *next = vma->vm_next;
                unsigned long addr = vma->vm_start;
 
+               /*
+                * Hide vma from rmap and vmtruncate before freeing pgtables
+                */
+               anon_vma_unlink(vma);
+               unlink_file_vma(vma);
+
                if (is_hugepage_only_range(vma->vm_mm, addr, HPAGE_SIZE)) {
                        hugetlb_free_pgd_range(tlb, addr, vma->vm_end,
                                floor, next? next->vm_start: ceiling);
@@ -272,6 +279,8 @@ void free_pgtables(struct mmu_gather **tlb, struct vm_area_struct *vma,
                                                        HPAGE_SIZE)) {
                                vma = next;
                                next = vma->vm_next;
+                               anon_vma_unlink(vma);
+                               unlink_file_vma(vma);
                        }
                        free_pgd_range(tlb, addr, vma->vm_end,
                                floor, next? next->vm_start: ceiling);
@@ -282,21 +291,21 @@ void free_pgtables(struct mmu_gather **tlb, struct vm_area_struct *vma,
 
 int __pte_alloc(struct mm_struct *mm, pmd_t *pmd, unsigned long address)
 {
-       struct page *new;
-
-       spin_unlock(&mm->page_table_lock);
-       new = pte_alloc_one(mm, address);
-       spin_lock(&mm->page_table_lock);
+       struct page *new = pte_alloc_one(mm, address);
        if (!new)
                return -ENOMEM;
 
-       if (pmd_present(*pmd))          /* Another has populated it */
+       pte_lock_init(new);
+       spin_lock(&mm->page_table_lock);
+       if (pmd_present(*pmd)) {        /* Another has populated it */
+               pte_lock_deinit(new);
                pte_free(new);
-       else {
+       else {
                mm->nr_ptes++;
                inc_page_state(nr_page_table_pages);
                pmd_populate(mm, pmd, new);
        }
+       spin_unlock(&mm->page_table_lock);
        return 0;
 }
 
@@ -325,7 +334,7 @@ static inline void add_mm_rss(struct mm_struct *mm, int file_rss, int anon_rss)
 
 /*
  * This function is called to print an error when a pte in a
- * !VM_RESERVED region is found pointing to an invalid pfn (which
+ * !VM_UNPAGED region is found pointing to an invalid pfn (which
  * is an error.
  *
  * The calling function must still handle the error.
@@ -344,9 +353,6 @@ void print_bad_pte(struct vm_area_struct *vma, pte_t pte, unsigned long vaddr)
  * copy one vm_area from one task to the other. Assumes the page tables
  * already present in the new task to be cleared in the whole range
  * covered by this vma.
- *
- * dst->page_table_lock is held on entry and exit,
- * but may be dropped within p[mg]d_alloc() and pte_alloc_map().
  */
 
 static inline void
@@ -366,22 +372,24 @@ copy_one_pte(struct mm_struct *dst_mm, struct mm_struct *src_mm,
                        /* make sure dst_mm is on swapoff's mmlist. */
                        if (unlikely(list_empty(&dst_mm->mmlist))) {
                                spin_lock(&mmlist_lock);
-                               list_add(&dst_mm->mmlist, &src_mm->mmlist);
+                               if (list_empty(&dst_mm->mmlist))
+                                       list_add(&dst_mm->mmlist,
+                                                &src_mm->mmlist);
                                spin_unlock(&mmlist_lock);
                        }
                }
                goto out_set_pte;
        }
 
-       /* If the region is VM_RESERVED, the mapping is not
+       /* If the region is VM_UNPAGED, the mapping is not
         * mapped via rmap - duplicate the pte as is.
         */
-       if (vm_flags & VM_RESERVED)
+       if (vm_flags & VM_UNPAGED)
                goto out_set_pte;
 
        pfn = pte_pfn(pte);
        /* If the pte points outside of valid memory but
-        * the region is not VM_RESERVED, we have a problem.
+        * the region is not VM_UNPAGED, we have a problem.
         */
        if (unlikely(!pfn_valid(pfn))) {
                print_bad_pte(vma, pte, addr);
@@ -419,17 +427,19 @@ static int copy_pte_range(struct mm_struct *dst_mm, struct mm_struct *src_mm,
                unsigned long addr, unsigned long end)
 {
        pte_t *src_pte, *dst_pte;
+       spinlock_t *src_ptl, *dst_ptl;
        int progress = 0;
        int rss[2];
 
 again:
        rss[1] = rss[0] = 0;
-       dst_pte = pte_alloc_map(dst_mm, dst_pmd, addr);
+       dst_pte = pte_alloc_map_lock(dst_mm, dst_pmd, addr, &dst_ptl);
        if (!dst_pte)
                return -ENOMEM;
        src_pte = pte_offset_map_nested(src_pmd, addr);
+       src_ptl = pte_lockptr(src_mm, src_pmd);
+       spin_lock(src_ptl);
 
-       spin_lock(&src_mm->page_table_lock);
        do {
                /*
                 * We are holding two locks at this point - either of them
@@ -438,8 +448,8 @@ again:
                if (progress >= 32) {
                        progress = 0;
                        if (need_resched() ||
-                           need_lockbreak(&src_mm->page_table_lock) ||
-                           need_lockbreak(&dst_mm->page_table_lock))
+                           need_lockbreak(src_ptl) ||
+                           need_lockbreak(dst_ptl))
                                break;
                }
                if (pte_none(*src_pte)) {
@@ -449,12 +459,12 @@ again:
                copy_one_pte(dst_mm, src_mm, dst_pte, src_pte, vma, addr, rss);
                progress += 8;
        } while (dst_pte++, src_pte++, addr += PAGE_SIZE, addr != end);
-       spin_unlock(&src_mm->page_table_lock);
 
+       spin_unlock(src_ptl);
        pte_unmap_nested(src_pte - 1);
-       pte_unmap(dst_pte - 1);
        add_mm_rss(dst_mm, rss[0], rss[1]);
-       cond_resched_lock(&dst_mm->page_table_lock);
+       pte_unmap_unlock(dst_pte - 1, dst_ptl);
+       cond_resched();
        if (addr != end)
                goto again;
        return 0;
@@ -518,7 +528,7 @@ int copy_page_range(struct mm_struct *dst_mm, struct mm_struct *src_mm,
         * readonly mappings. The tradeoff is that copy_page_range is more
         * efficient than faulting.
         */
-       if (!(vma->vm_flags & (VM_HUGETLB|VM_NONLINEAR|VM_RESERVED))) {
+       if (!(vma->vm_flags & (VM_HUGETLB|VM_NONLINEAR|VM_UNPAGED))) {
                if (!vma->anon_vma)
                        return 0;
        }
@@ -539,24 +549,30 @@ int copy_page_range(struct mm_struct *dst_mm, struct mm_struct *src_mm,
        return 0;
 }
 
-static void zap_pte_range(struct mmu_gather *tlb,
+static unsigned long zap_pte_range(struct mmu_gather *tlb,
                                struct vm_area_struct *vma, pmd_t *pmd,
                                unsigned long addr, unsigned long end,
-                               struct zap_details *details)
+                               long *zap_work, struct zap_details *details)
 {
        struct mm_struct *mm = tlb->mm;
        pte_t *pte;
+       spinlock_t *ptl;
        int file_rss = 0;
        int anon_rss = 0;
 
-       pte = pte_offset_map(pmd, addr);
+       pte = pte_offset_map_lock(mm, pmd, addr, &ptl);
        do {
                pte_t ptent = *pte;
-               if (pte_none(ptent))
+               if (pte_none(ptent)) {
+                       (*zap_work)--;
                        continue;
+               }
                if (pte_present(ptent)) {
                        struct page *page = NULL;
-                       if (!(vma->vm_flags & VM_RESERVED)) {
+
+                       (*zap_work) -= PAGE_SIZE;
+
+                       if (!(vma->vm_flags & VM_UNPAGED)) {
                                unsigned long pfn = pte_pfn(ptent);
                                if (unlikely(!pfn_valid(pfn)))
                                        print_bad_pte(vma, ptent, addr);
@@ -613,16 +629,18 @@ static void zap_pte_range(struct mmu_gather *tlb,
                if (!pte_file(ptent))
                        free_swap_and_cache(pte_to_swp_entry(ptent));
                pte_clear_full(mm, addr, pte, tlb->fullmm);
-       } while (pte++, addr += PAGE_SIZE, addr != end);
+       } while (pte++, addr += PAGE_SIZE, (addr != end && *zap_work > 0));
 
        add_mm_rss(mm, file_rss, anon_rss);
-       pte_unmap(pte - 1);
+       pte_unmap_unlock(pte - 1, ptl);
+
+       return addr;
 }
 
-static inline void zap_pmd_range(struct mmu_gather *tlb,
+static inline unsigned long zap_pmd_range(struct mmu_gather *tlb,
                                struct vm_area_struct *vma, pud_t *pud,
                                unsigned long addr, unsigned long end,
-                               struct zap_details *details)
+                               long *zap_work, struct zap_details *details)
 {
        pmd_t *pmd;
        unsigned long next;
@@ -630,16 +648,21 @@ static inline void zap_pmd_range(struct mmu_gather *tlb,
        pmd = pmd_offset(pud, addr);
        do {
                next = pmd_addr_end(addr, end);
-               if (pmd_none_or_clear_bad(pmd))
+               if (pmd_none_or_clear_bad(pmd)) {
+                       (*zap_work)--;
                        continue;
-               zap_pte_range(tlb, vma, pmd, addr, next, details);
-       } while (pmd++, addr = next, addr != end);
+               }
+               next = zap_pte_range(tlb, vma, pmd, addr, next,
+                                               zap_work, details);
+       } while (pmd++, addr = next, (addr != end && *zap_work > 0));
+
+       return addr;
 }
 
-static inline void zap_pud_range(struct mmu_gather *tlb,
+static inline unsigned long zap_pud_range(struct mmu_gather *tlb,
                                struct vm_area_struct *vma, pgd_t *pgd,
                                unsigned long addr, unsigned long end,
-                               struct zap_details *details)
+                               long *zap_work, struct zap_details *details)
 {
        pud_t *pud;
        unsigned long next;
@@ -647,15 +670,21 @@ static inline void zap_pud_range(struct mmu_gather *tlb,
        pud = pud_offset(pgd, addr);
        do {
                next = pud_addr_end(addr, end);
-               if (pud_none_or_clear_bad(pud))
+               if (pud_none_or_clear_bad(pud)) {
+                       (*zap_work)--;
                        continue;
-               zap_pmd_range(tlb, vma, pud, addr, next, details);
-       } while (pud++, addr = next, addr != end);
+               }
+               next = zap_pmd_range(tlb, vma, pud, addr, next,
+                                               zap_work, details);
+       } while (pud++, addr = next, (addr != end && *zap_work > 0));
+
+       return addr;
 }
 
-static void unmap_page_range(struct mmu_gather *tlb, struct vm_area_struct *vma,
+static unsigned long unmap_page_range(struct mmu_gather *tlb,
+                               struct vm_area_struct *vma,
                                unsigned long addr, unsigned long end,
-                               struct zap_details *details)
+                               long *zap_work, struct zap_details *details)
 {
        pgd_t *pgd;
        unsigned long next;
@@ -668,11 +697,16 @@ static void unmap_page_range(struct mmu_gather *tlb, struct vm_area_struct *vma,
        pgd = pgd_offset(vma->vm_mm, addr);
        do {
                next = pgd_addr_end(addr, end);
-               if (pgd_none_or_clear_bad(pgd))
+               if (pgd_none_or_clear_bad(pgd)) {
+                       (*zap_work)--;
                        continue;
-               zap_pud_range(tlb, vma, pgd, addr, next, details);
-       } while (pgd++, addr = next, addr != end);
+               }
+               next = zap_pud_range(tlb, vma, pgd, addr, next,
+                                               zap_work, details);
+       } while (pgd++, addr = next, (addr != end && *zap_work > 0));
        tlb_end_vma(tlb, vma);
+
+       return addr;
 }
 
 #ifdef CONFIG_PREEMPT
@@ -685,7 +719,6 @@ static void unmap_page_range(struct mmu_gather *tlb, struct vm_area_struct *vma,
 /**
  * unmap_vmas - unmap a range of memory covered by a list of vma's
  * @tlbp: address of the caller's struct mmu_gather
- * @mm: the controlling mm_struct
  * @vma: the starting vma
  * @start_addr: virtual address at which to start unmapping
  * @end_addr: virtual address at which to end unmapping
@@ -694,10 +727,10 @@ static void unmap_page_range(struct mmu_gather *tlb, struct vm_area_struct *vma,
  *
  * Returns the end address of the unmapping (restart addr if interrupted).
  *
- * Unmap all pages in the vma list.  Called under page_table_lock.
+ * Unmap all pages in the vma list.
  *
- * We aim to not hold page_table_lock for too long (for scheduling latency
- * reasons).  So zap pages in ZAP_BLOCK_SIZE bytecounts.  This means we need to
+ * We aim to not hold locks for too long (for scheduling latency reasons).
+ * So zap pages in ZAP_BLOCK_SIZE bytecounts.  This means we need to
  * return the ending mmu_gather to the caller.
  *
  * Only addresses between `start' and `end' will be unmapped.
@@ -709,12 +742,12 @@ static void unmap_page_range(struct mmu_gather *tlb, struct vm_area_struct *vma,
  * ensure that any thus-far unmapped pages are flushed before unmap_vmas()
  * drops the lock and schedules.
  */
-unsigned long unmap_vmas(struct mmu_gather **tlbp, struct mm_struct *mm,
+unsigned long unmap_vmas(struct mmu_gather **tlbp,
                struct vm_area_struct *vma, unsigned long start_addr,
                unsigned long end_addr, unsigned long *nr_accounted,
                struct zap_details *details)
 {
-       unsigned long zap_bytes = ZAP_BLOCK_SIZE;
+       long zap_work = ZAP_BLOCK_SIZE;
        unsigned long tlb_start = 0;    /* For tlb_finish_mmu */
        int tlb_start_valid = 0;
        unsigned long start = start_addr;
@@ -735,45 +768,39 @@ unsigned long unmap_vmas(struct mmu_gather **tlbp, struct mm_struct *mm,
                        *nr_accounted += (end - start) >> PAGE_SHIFT;
 
                while (start != end) {
-                       unsigned long block;
-
                        if (!tlb_start_valid) {
                                tlb_start = start;
                                tlb_start_valid = 1;
                        }
 
-                       if (is_vm_hugetlb_page(vma)) {
-                               block = end - start;
+                       if (unlikely(is_vm_hugetlb_page(vma))) {
                                unmap_hugepage_range(vma, start, end);
-                       } else {
-                               block = min(zap_bytes, end - start);
-                               unmap_page_range(*tlbp, vma, start,
-                                               start + block, details);
+                               zap_work -= (end - start) /
+                                               (HPAGE_SIZE / PAGE_SIZE);
+                               start = end;
+                       } else
+                               start = unmap_page_range(*tlbp, vma,
+                                               start, end, &zap_work, details);
+
+                       if (zap_work > 0) {
+                               BUG_ON(start != end);
+                               break;
                        }
 
-                       start += block;
-                       zap_bytes -= block;
-                       if ((long)zap_bytes > 0)
-                               continue;
-
                        tlb_finish_mmu(*tlbp, tlb_start, start);
 
                        if (need_resched() ||
-                               need_lockbreak(&mm->page_table_lock) ||
                                (i_mmap_lock && need_lockbreak(i_mmap_lock))) {
                                if (i_mmap_lock) {
-                                       /* must reset count of rss freed */
-                                       *tlbp = tlb_gather_mmu(mm, fullmm);
+                                       *tlbp = NULL;
                                        goto out;
                                }
-                               spin_unlock(&mm->page_table_lock);
                                cond_resched();
-                               spin_lock(&mm->page_table_lock);
                        }
 
-                       *tlbp = tlb_gather_mmu(mm, fullmm);
+                       *tlbp = tlb_gather_mmu(vma->vm_mm, fullmm);
                        tlb_start_valid = 0;
-                       zap_bytes = ZAP_BLOCK_SIZE;
+                       zap_work = ZAP_BLOCK_SIZE;
                }
        }
 out:
@@ -795,124 +822,93 @@ unsigned long zap_page_range(struct vm_area_struct *vma, unsigned long address,
        unsigned long end = address + size;
        unsigned long nr_accounted = 0;
 
-       if (is_vm_hugetlb_page(vma)) {
-               zap_hugepage_range(vma, address, size);
-               return end;
-       }
-
        lru_add_drain();
-       spin_lock(&mm->page_table_lock);
        tlb = tlb_gather_mmu(mm, 0);
        update_hiwater_rss(mm);
-       end = unmap_vmas(&tlb, mm, vma, address, end, &nr_accounted, details);
-       tlb_finish_mmu(tlb, address, end);
-       spin_unlock(&mm->page_table_lock);
+       end = unmap_vmas(&tlb, vma, address, end, &nr_accounted, details);
+       if (tlb)
+               tlb_finish_mmu(tlb, address, end);
        return end;
 }
 
 /*
  * Do a quick page-table lookup for a single page.
- * mm->page_table_lock must be held.
  */
-static struct page *__follow_page(struct mm_struct *mm, unsigned long address,
-                       int read, int write, int accessed)
+struct page *follow_page(struct mm_struct *mm, unsigned long address,
+                       unsigned int flags)
 {
        pgd_t *pgd;
        pud_t *pud;
        pmd_t *pmd;
        pte_t *ptep, pte;
+       spinlock_t *ptl;
        unsigned long pfn;
        struct page *page;
 
-       page = follow_huge_addr(mm, address, write);
-       if (! IS_ERR(page))
-               return page;
+       page = follow_huge_addr(mm, address, flags & FOLL_WRITE);
+       if (!IS_ERR(page)) {
+               BUG_ON(flags & FOLL_GET);
+               goto out;
+       }
 
+       page = NULL;
        pgd = pgd_offset(mm, address);
        if (pgd_none(*pgd) || unlikely(pgd_bad(*pgd)))
-               goto out;
+               goto no_page_table;
 
        pud = pud_offset(pgd, address);
        if (pud_none(*pud) || unlikely(pud_bad(*pud)))
-               goto out;
+               goto no_page_table;
        
        pmd = pmd_offset(pud, address);
        if (pmd_none(*pmd) || unlikely(pmd_bad(*pmd)))
+               goto no_page_table;
+
+       if (pmd_huge(*pmd)) {
+               BUG_ON(flags & FOLL_GET);
+               page = follow_huge_pmd(mm, address, pmd, flags & FOLL_WRITE);
                goto out;
-       if (pmd_huge(*pmd))
-               return follow_huge_pmd(mm, address, pmd, write);
+       }
 
-       ptep = pte_offset_map(pmd, address);
+       ptep = pte_offset_map_lock(mm, pmd, address, &ptl);
        if (!ptep)
                goto out;
 
        pte = *ptep;
-       pte_unmap(ptep);
-       if (pte_present(pte)) {
-               if (write && !pte_write(pte))
-                       goto out;
-               if (read && !pte_read(pte))
-                       goto out;
-               pfn = pte_pfn(pte);
-               if (pfn_valid(pfn)) {
-                       page = pfn_to_page(pfn);
-                       if (accessed) {
-                               if (write && !pte_dirty(pte) &&!PageDirty(page))
-                                       set_page_dirty(page);
-                               mark_page_accessed(page);
-                       }
-                       return page;
-               }
-       }
+       if (!pte_present(pte))
+               goto unlock;
+       if ((flags & FOLL_WRITE) && !pte_write(pte))
+               goto unlock;
+       pfn = pte_pfn(pte);
+       if (!pfn_valid(pfn))
+               goto unlock;
 
+       page = pfn_to_page(pfn);
+       if (flags & FOLL_GET)
+               get_page(page);
+       if (flags & FOLL_TOUCH) {
+               if ((flags & FOLL_WRITE) &&
+                   !pte_dirty(pte) && !PageDirty(page))
+                       set_page_dirty(page);
+               mark_page_accessed(page);
+       }
+unlock:
+       pte_unmap_unlock(ptep, ptl);
 out:
-       return NULL;
-}
-
-inline struct page *
-follow_page(struct mm_struct *mm, unsigned long address, int write)
-{
-       return __follow_page(mm, address, 0, write, 1);
-}
-
-/*
- * check_user_page_readable() can be called frm niterrupt context by oprofile,
- * so we need to avoid taking any non-irq-safe locks
- */
-int check_user_page_readable(struct mm_struct *mm, unsigned long address)
-{
-       return __follow_page(mm, address, 1, 0, 0) != NULL;
-}
-EXPORT_SYMBOL(check_user_page_readable);
-
-static inline int
-untouched_anonymous_page(struct mm_struct* mm, struct vm_area_struct *vma,
-                        unsigned long address)
-{
-       pgd_t *pgd;
-       pud_t *pud;
-       pmd_t *pmd;
-
-       /* Check if the vma is for an anonymous mapping. */
-       if (vma->vm_ops && vma->vm_ops->nopage)
-               return 0;
-
-       /* Check if page directory entry exists. */
-       pgd = pgd_offset(mm, address);
-       if (pgd_none(*pgd) || unlikely(pgd_bad(*pgd)))
-               return 1;
-
-       pud = pud_offset(pgd, address);
-       if (pud_none(*pud) || unlikely(pud_bad(*pud)))
-               return 1;
-
-       /* Check if page middle directory entry exists. */
-       pmd = pmd_offset(pud, address);
-       if (pmd_none(*pmd) || unlikely(pmd_bad(*pmd)))
-               return 1;
+       return page;
 
-       /* There is a pte slot for 'address' in 'mm'. */
-       return 0;
+no_page_table:
+       /*
+        * When core dumping an enormous anonymous area that nobody
+        * has touched so far, we don't want to allocate page tables.
+        */
+       if (flags & FOLL_ANON) {
+               page = ZERO_PAGE(address);
+               if (flags & FOLL_GET)
+                       get_page(page);
+               BUG_ON(flags & FOLL_WRITE);
+       }
+       return page;
 }
 
 int get_user_pages(struct task_struct *tsk, struct mm_struct *mm,
@@ -920,18 +916,19 @@ int get_user_pages(struct task_struct *tsk, struct mm_struct *mm,
                struct page **pages, struct vm_area_struct **vmas)
 {
        int i;
-       unsigned int flags;
+       unsigned int vm_flags;
 
        /* 
         * Require read or write permissions.
         * If 'force' is set, we only require the "MAY" flags.
         */
-       flags = write ? (VM_WRITE | VM_MAYWRITE) : (VM_READ | VM_MAYREAD);
-       flags &= force ? (VM_MAYREAD | VM_MAYWRITE) : (VM_READ | VM_WRITE);
+       vm_flags  = write ? (VM_WRITE | VM_MAYWRITE) : (VM_READ | VM_MAYREAD);
+       vm_flags &= force ? (VM_MAYREAD | VM_MAYWRITE) : (VM_READ | VM_WRITE);
        i = 0;
 
        do {
-               struct vm_area_struct * vma;
+               struct vm_area_struct *vma;
+               unsigned int foll_flags;
 
                vma = find_extend_vma(mm, start);
                if (!vma && in_gate_area(tsk, start)) {
@@ -971,8 +968,8 @@ int get_user_pages(struct task_struct *tsk, struct mm_struct *mm,
                        continue;
                }
 
-               if (!vma || (vma->vm_flags & (VM_IO | VM_RESERVED))
-                               || !(flags & vma->vm_flags))
+               if (!vma || (vma->vm_flags & VM_IO)
+                               || !(vm_flags & vma->vm_flags))
                        return i ? : -EFAULT;
 
                if (is_vm_hugetlb_page(vma)) {
@@ -980,29 +977,25 @@ int get_user_pages(struct task_struct *tsk, struct mm_struct *mm,
                                                &start, &len, i);
                        continue;
                }
-               spin_lock(&mm->page_table_lock);
+
+               foll_flags = FOLL_TOUCH;
+               if (pages)
+                       foll_flags |= FOLL_GET;
+               if (!write && !(vma->vm_flags & VM_LOCKED) &&
+                   (!vma->vm_ops || !vma->vm_ops->nopage))
+                       foll_flags |= FOLL_ANON;
+
                do {
-                       int write_access = write;
                        struct page *page;
 
-                       cond_resched_lock(&mm->page_table_lock);
-                       while (!(page = follow_page(mm, start, write_access))) {
-                               int ret;
-
-                               /*
-                                * Shortcut for anonymous pages. We don't want
-                                * to force the creation of pages tables for
-                                * insanely big anonymously mapped areas that
-                                * nobody touched so far. This is important
-                                * for doing a core dump for these mappings.
-                                */
-                               if (!write && untouched_anonymous_page(mm,vma,start)) {
-                                       page = ZERO_PAGE(start);
-                                       break;
-                               }
-                               spin_unlock(&mm->page_table_lock);
-                               ret = __handle_mm_fault(mm, vma, start, write_access);
+                       if (write)
+                               foll_flags |= FOLL_WRITE;
 
+                       cond_resched();
+                       while (!(page = follow_page(mm, start, foll_flags))) {
+                               int ret;
+                               ret = __handle_mm_fault(mm, vma, start,
+                                               foll_flags & FOLL_WRITE);
                                /*
                                 * The VM_FAULT_WRITE bit tells us that do_wp_page has
                                 * broken COW when necessary, even if maybe_mkwrite
@@ -1010,7 +1003,7 @@ int get_user_pages(struct task_struct *tsk, struct mm_struct *mm,
                                 * subsequent page lookups as if they were reads.
                                 */
                                if (ret & VM_FAULT_WRITE)
-                                       write_access = 0;
+                                       foll_flags &= ~FOLL_WRITE;
                                
                                switch (ret & ~VM_FAULT_WRITE) {
                                case VM_FAULT_MINOR:
@@ -1026,12 +1019,10 @@ int get_user_pages(struct task_struct *tsk, struct mm_struct *mm,
                                default:
                                        BUG();
                                }
-                               spin_lock(&mm->page_table_lock);
                        }
                        if (pages) {
                                pages[i] = page;
                                flush_dcache_page(page);
-                               page_cache_get(page);
                        }
                        if (vmas)
                                vmas[i] = vma;
@@ -1039,7 +1030,6 @@ int get_user_pages(struct task_struct *tsk, struct mm_struct *mm,
                        start += PAGE_SIZE;
                        len--;
                } while (len && start < vma->vm_end);
-               spin_unlock(&mm->page_table_lock);
        } while (len);
        return i;
 }
@@ -1049,8 +1039,9 @@ static int zeromap_pte_range(struct mm_struct *mm, pmd_t *pmd,
                        unsigned long addr, unsigned long end, pgprot_t prot)
 {
        pte_t *pte;
+       spinlock_t *ptl;
 
-       pte = pte_alloc_map(mm, pmd, addr);
+       pte = pte_alloc_map_lock(mm, pmd, addr, &ptl);
        if (!pte)
                return -ENOMEM;
        do {
@@ -1062,7 +1053,7 @@ static int zeromap_pte_range(struct mm_struct *mm, pmd_t *pmd,
                BUG_ON(!pte_none(*pte));
                set_pte_at(mm, addr, pte, zero_pte);
        } while (pte++, addr += PAGE_SIZE, addr != end);
-       pte_unmap(pte - 1);
+       pte_unmap_unlock(pte - 1, ptl);
        return 0;
 }
 
@@ -1112,14 +1103,12 @@ int zeromap_page_range(struct vm_area_struct *vma,
        BUG_ON(addr >= end);
        pgd = pgd_offset(mm, addr);
        flush_cache_range(vma, addr, end);
-       spin_lock(&mm->page_table_lock);
        do {
                next = pgd_addr_end(addr, end);
                err = zeromap_pud_range(mm, pgd, addr, next, prot);
                if (err)
                        break;
        } while (pgd++, addr = next, addr != end);
-       spin_unlock(&mm->page_table_lock);
        return err;
 }
 
@@ -1133,8 +1122,9 @@ static int remap_pte_range(struct mm_struct *mm, pmd_t *pmd,
                        unsigned long pfn, pgprot_t prot)
 {
        pte_t *pte;
+       spinlock_t *ptl;
 
-       pte = pte_alloc_map(mm, pmd, addr);
+       pte = pte_alloc_map_lock(mm, pmd, addr, &ptl);
        if (!pte)
                return -ENOMEM;
        do {
@@ -1142,7 +1132,7 @@ static int remap_pte_range(struct mm_struct *mm, pmd_t *pmd,
                set_pte_at(mm, addr, pte, pfn_pte(pfn, prot));
                pfn++;
        } while (pte++, addr += PAGE_SIZE, addr != end);
-       pte_unmap(pte - 1);
+       pte_unmap_unlock(pte - 1, ptl);
        return 0;
 }
 
@@ -1201,16 +1191,21 @@ int remap_pfn_range(struct vm_area_struct *vma, unsigned long addr,
         * rest of the world about it:
         *   VM_IO tells people not to look at these pages
         *      (accesses can have side effects).
-        *   VM_RESERVED tells the core MM not to "manage" these pages
-         *     (e.g. refcount, mapcount, try to swap them out).
+        *   VM_RESERVED is specified all over the place, because
+        *      in 2.4 it kept swapout's vma scan off this vma; but
+        *      in 2.6 the LRU scan won't even find its pages, so this
+        *      flag means no more than count its pages in reserved_vm,
+        *      and omit it from core dump, even when VM_IO turned off.
+        *   VM_UNPAGED tells the core MM not to "manage" these pages
+         *     (e.g. refcount, mapcount, try to swap them out): in
+        *      particular, zap_pte_range does not try to free them.
         */
-       vma->vm_flags |= VM_IO | VM_RESERVED;
+       vma->vm_flags |= VM_IO | VM_RESERVED | VM_UNPAGED;
 
        BUG_ON(addr >= end);
        pfn -= addr >> PAGE_SHIFT;
        pgd = pgd_offset(mm, addr);
        flush_cache_range(vma, addr, end);
-       spin_lock(&mm->page_table_lock);
        do {
                next = pgd_addr_end(addr, end);
                err = remap_pud_range(mm, pgd, addr, next,
@@ -1218,12 +1213,36 @@ int remap_pfn_range(struct vm_area_struct *vma, unsigned long addr,
                if (err)
                        break;
        } while (pgd++, addr = next, addr != end);
-       spin_unlock(&mm->page_table_lock);
        return err;
 }
 EXPORT_SYMBOL(remap_pfn_range);
 
 /*
+ * handle_pte_fault chooses page fault handler according to an entry
+ * which was read non-atomically.  Before making any commitment, on
+ * those architectures or configurations (e.g. i386 with PAE) which
+ * might give a mix of unmatched parts, do_swap_page and do_file_page
+ * must check under lock before unmapping the pte and proceeding
+ * (but do_wp_page is only called after already making such a check;
+ * and do_anonymous_page and do_no_page can safely check later on).
+ */
+static inline int pte_unmap_same(struct mm_struct *mm, pmd_t *pmd,
+                               pte_t *page_table, pte_t orig_pte)
+{
+       int same = 1;
+#if defined(CONFIG_SMP) || defined(CONFIG_PREEMPT)
+       if (sizeof(pte_t) > sizeof(unsigned long)) {
+               spinlock_t *ptl = pte_lockptr(mm, pmd);
+               spin_lock(ptl);
+               same = pte_same(*page_table, orig_pte);
+               spin_unlock(ptl);
+       }
+#endif
+       pte_unmap(page_table);
+       return same;
+}
+
+/*
  * Do pte_mkwrite, but only if the vma says VM_WRITE.  We do this when
  * servicing faults for write access.  In the normal case, do always want
  * pte_mkwrite.  But get_user_pages can cause write faults for mappings
@@ -1250,19 +1269,20 @@ static inline pte_t maybe_mkwrite(pte_t pte, struct vm_area_struct *vma)
  * change only once the write actually happens. This avoids a few races,
  * and potentially makes it more efficient.
  *
- * We hold the mm semaphore and the page_table_lock on entry and exit
- * with the page_table_lock released.
+ * We enter with non-exclusive mmap_sem (to exclude vma changes,
+ * but allow concurrent faults), with pte both mapped and locked.
+ * We return with mmap_sem still held, but pte unmapped and unlocked.
  */
 static int do_wp_page(struct mm_struct *mm, struct vm_area_struct *vma,
                unsigned long address, pte_t *page_table, pmd_t *pmd,
-               pte_t orig_pte)
+               spinlock_t *ptl, pte_t orig_pte)
 {
        struct page *old_page, *new_page;
        unsigned long pfn = pte_pfn(orig_pte);
        pte_t entry;
        int ret = VM_FAULT_MINOR;
 
-       BUG_ON(vma->vm_flags & VM_RESERVED);
+       BUG_ON(vma->vm_flags & VM_UNPAGED);
 
        if (unlikely(!pfn_valid(pfn))) {
                /*
@@ -1293,8 +1313,7 @@ static int do_wp_page(struct mm_struct *mm, struct vm_area_struct *vma,
         * Ok, we need to copy. Oh, well..
         */
        page_cache_get(old_page);
-       pte_unmap(page_table);
-       spin_unlock(&mm->page_table_lock);
+       pte_unmap_unlock(page_table, ptl);
 
        if (unlikely(anon_vma_prepare(vma)))
                goto oom;
@@ -1312,8 +1331,7 @@ static int do_wp_page(struct mm_struct *mm, struct vm_area_struct *vma,
        /*
         * Re-check the pte - we dropped the lock
         */
-       spin_lock(&mm->page_table_lock);
-       page_table = pte_offset_map(pmd, address);
+       page_table = pte_offset_map_lock(mm, pmd, address, &ptl);
        if (likely(pte_same(*page_table, orig_pte))) {
                page_remove_rmap(old_page);
                if (!PageAnon(old_page)) {
@@ -1326,7 +1344,6 @@ static int do_wp_page(struct mm_struct *mm, struct vm_area_struct *vma,
                ptep_establish(vma, address, page_table, entry);
                update_mmu_cache(vma, address, entry);
                lazy_mmu_prot_update(entry);
-
                lru_cache_add_active(new_page);
                page_add_anon_rmap(new_page, vma, address);
 
@@ -1337,8 +1354,7 @@ static int do_wp_page(struct mm_struct *mm, struct vm_area_struct *vma,
        page_cache_release(new_page);
        page_cache_release(old_page);
 unlock:
-       pte_unmap(page_table);
-       spin_unlock(&mm->page_table_lock);
+       pte_unmap_unlock(page_table, ptl);
        return ret;
 oom:
        page_cache_release(old_page);
@@ -1410,13 +1426,6 @@ again:
 
        restart_addr = zap_page_range(vma, start_addr,
                                        end_addr - start_addr, details);
-
-       /*
-        * We cannot rely on the break test in unmap_vmas:
-        * on the one hand, we don't want to restart our loop
-        * just because that broke out for the page_table_lock;
-        * on the other hand, it does no test when vma is small.
-        */
        need_break = need_resched() ||
                        need_lockbreak(details->i_mmap_lock);
 
@@ -1665,20 +1674,22 @@ void swapin_readahead(swp_entry_t entry, unsigned long addr,struct vm_area_struc
 }
 
 /*
- * We hold the mm semaphore and the page_table_lock on entry and
- * should release the pagetable lock on exit..
+ * We enter with non-exclusive mmap_sem (to exclude vma changes,
+ * but allow concurrent faults), and pte mapped but not yet locked.
+ * We return with mmap_sem still held, but pte unmapped and unlocked.
  */
 static int do_swap_page(struct mm_struct *mm, struct vm_area_struct *vma,
                unsigned long address, pte_t *page_table, pmd_t *pmd,
                int write_access, pte_t orig_pte)
 {
+       spinlock_t *ptl;
        struct page *page;
        swp_entry_t entry;
        pte_t pte;
        int ret = VM_FAULT_MINOR;
 
-       pte_unmap(page_table);
-       spin_unlock(&mm->page_table_lock);
+       if (!pte_unmap_same(mm, pmd, page_table, orig_pte))
+               goto out;
 
        entry = pte_to_swp_entry(orig_pte);
        page = lookup_swap_cache(entry);
@@ -1687,11 +1698,10 @@ static int do_swap_page(struct mm_struct *mm, struct vm_area_struct *vma,
                page = read_swap_cache_async(entry, vma, address);
                if (!page) {
                        /*
-                        * Back out if somebody else faulted in this pte while
-                        * we released the page table lock.
+                        * Back out if somebody else faulted in this pte
+                        * while we released the pte lock.
                         */
-                       spin_lock(&mm->page_table_lock);
-                       page_table = pte_offset_map(pmd, address);
+                       page_table = pte_offset_map_lock(mm, pmd, address, &ptl);
                        if (likely(pte_same(*page_table, orig_pte)))
                                ret = VM_FAULT_OOM;
                        goto unlock;
@@ -1707,11 +1717,9 @@ static int do_swap_page(struct mm_struct *mm, struct vm_area_struct *vma,
        lock_page(page);
 
        /*
-        * Back out if somebody else faulted in this pte while we
-        * released the page table lock.
+        * Back out if somebody else already faulted in this pte.
         */
-       spin_lock(&mm->page_table_lock);
-       page_table = pte_offset_map(pmd, address);
+       page_table = pte_offset_map_lock(mm, pmd, address, &ptl);
        if (unlikely(!pte_same(*page_table, orig_pte)))
                goto out_nomap;
 
@@ -1740,7 +1748,7 @@ static int do_swap_page(struct mm_struct *mm, struct vm_area_struct *vma,
 
        if (write_access) {
                if (do_wp_page(mm, vma, address,
-                               page_table, pmd, pte) == VM_FAULT_OOM)
+                               page_table, pmd, ptl, pte) == VM_FAULT_OOM)
                        ret = VM_FAULT_OOM;
                goto out;
        }
@@ -1749,37 +1757,32 @@ static int do_swap_page(struct mm_struct *mm, struct vm_area_struct *vma,
        update_mmu_cache(vma, address, pte);
        lazy_mmu_prot_update(pte);
 unlock:
-       pte_unmap(page_table);
-       spin_unlock(&mm->page_table_lock);
+       pte_unmap_unlock(page_table, ptl);
 out:
        return ret;
 out_nomap:
-       pte_unmap(page_table);
-       spin_unlock(&mm->page_table_lock);
+       pte_unmap_unlock(page_table, ptl);
        unlock_page(page);
        page_cache_release(page);
        return ret;
 }
 
 /*
- * We are called with the MM semaphore and page_table_lock
- * spinlock held to protect against concurrent faults in
- * multithreaded programs. 
+ * We enter with non-exclusive mmap_sem (to exclude vma changes,
+ * but allow concurrent faults), and pte mapped but not yet locked.
+ * We return with mmap_sem still held, but pte unmapped and unlocked.
  */
 static int do_anonymous_page(struct mm_struct *mm, struct vm_area_struct *vma,
                unsigned long address, pte_t *page_table, pmd_t *pmd,
                int write_access)
 {
-       struct page *page = ZERO_PAGE(addr);
+       struct page *page;
+       spinlock_t *ptl;
        pte_t entry;
 
-       /* Mapping of ZERO_PAGE - vm_page_prot is readonly */
-       entry = mk_pte(page, vma->vm_page_prot);
-
        if (write_access) {
                /* Allocate our own private page. */
                pte_unmap(page_table);
-               spin_unlock(&mm->page_table_lock);
 
                if (unlikely(anon_vma_prepare(vma)))
                        goto oom;
@@ -1787,23 +1790,28 @@ static int do_anonymous_page(struct mm_struct *mm, struct vm_area_struct *vma,
                if (!page)
                        goto oom;
 
-               spin_lock(&mm->page_table_lock);
-               page_table = pte_offset_map(pmd, address);
-
-               if (!pte_none(*page_table)) {
-                       page_cache_release(page);
-                       goto unlock;
-               }
-               inc_mm_counter(mm, anon_rss);
                entry = mk_pte(page, vma->vm_page_prot);
                entry = maybe_mkwrite(pte_mkdirty(entry), vma);
+
+               page_table = pte_offset_map_lock(mm, pmd, address, &ptl);
+               if (!pte_none(*page_table))
+                       goto release;
+               inc_mm_counter(mm, anon_rss);
                lru_cache_add_active(page);
                SetPageReferenced(page);
                page_add_anon_rmap(page, vma, address);
        } else {
+               /* Map the ZERO_PAGE - vm_page_prot is readonly */
+               page = ZERO_PAGE(address);
+               page_cache_get(page);
+               entry = mk_pte(page, vma->vm_page_prot);
+
+               ptl = pte_lockptr(mm, pmd);
+               spin_lock(ptl);
+               if (!pte_none(*page_table))
+                       goto release;
                inc_mm_counter(mm, file_rss);
                page_add_file_rmap(page);
-               page_cache_get(page);
        }
 
        set_pte_at(mm, address, page_table, entry);
@@ -1812,9 +1820,11 @@ static int do_anonymous_page(struct mm_struct *mm, struct vm_area_struct *vma,
        update_mmu_cache(vma, address, entry);
        lazy_mmu_prot_update(entry);
 unlock:
-       pte_unmap(page_table);
-       spin_unlock(&mm->page_table_lock);
+       pte_unmap_unlock(page_table, ptl);
        return VM_FAULT_MINOR;
+release:
+       page_cache_release(page);
+       goto unlock;
 oom:
        return VM_FAULT_OOM;
 }
@@ -1828,13 +1838,15 @@ oom:
  * As this is called only for pages that do not currently exist, we
  * do not need to flush old virtual caches or the TLB.
  *
- * This is called with the MM semaphore held and the page table
- * spinlock held. Exit with the spinlock released.
+ * We enter with non-exclusive mmap_sem (to exclude vma changes,
+ * but allow concurrent faults), and pte mapped but not yet locked.
+ * We return with mmap_sem still held, but pte unmapped and unlocked.
  */
 static int do_no_page(struct mm_struct *mm, struct vm_area_struct *vma,
                unsigned long address, pte_t *page_table, pmd_t *pmd,
                int write_access)
 {
+       spinlock_t *ptl;
        struct page *new_page;
        struct address_space *mapping = NULL;
        pte_t entry;
@@ -1843,7 +1855,6 @@ static int do_no_page(struct mm_struct *mm, struct vm_area_struct *vma,
        int anon = 0;
 
        pte_unmap(page_table);
-       spin_unlock(&mm->page_table_lock);
 
        if (vma->vm_file) {
                mapping = vma->vm_file->f_mapping;
@@ -1883,21 +1894,20 @@ retry:
                anon = 1;
        }
 
-       spin_lock(&mm->page_table_lock);
+       page_table = pte_offset_map_lock(mm, pmd, address, &ptl);
        /*
         * For a file-backed vma, someone could have truncated or otherwise
         * invalidated this page.  If unmap_mapping_range got called,
         * retry getting the page.
         */
        if (mapping && unlikely(sequence != mapping->truncate_count)) {
-               spin_unlock(&mm->page_table_lock);
+               pte_unmap_unlock(page_table, ptl);
                page_cache_release(new_page);
                cond_resched();
                sequence = mapping->truncate_count;
                smp_rmb();
                goto retry;
        }
-       page_table = pte_offset_map(pmd, address);
 
        /*
         * This silly early PAGE_DIRTY setting removes a race
@@ -1920,7 +1930,7 @@ retry:
                        inc_mm_counter(mm, anon_rss);
                        lru_cache_add_active(new_page);
                        page_add_anon_rmap(new_page, vma, address);
-               } else if (!(vma->vm_flags & VM_RESERVED)) {
+               } else if (!(vma->vm_flags & VM_UNPAGED)) {
                        inc_mm_counter(mm, file_rss);
                        page_add_file_rmap(new_page);
                }
@@ -1934,8 +1944,7 @@ retry:
        update_mmu_cache(vma, address, entry);
        lazy_mmu_prot_update(entry);
 unlock:
-       pte_unmap(page_table);
-       spin_unlock(&mm->page_table_lock);
+       pte_unmap_unlock(page_table, ptl);
        return ret;
 oom:
        page_cache_release(new_page);
@@ -1946,6 +1955,10 @@ oom:
  * Fault of a previously existing named mapping. Repopulate the pte
  * from the encoded file_pte if possible. This enables swappable
  * nonlinear vmas.
+ *
+ * We enter with non-exclusive mmap_sem (to exclude vma changes,
+ * but allow concurrent faults), and pte mapped but not yet locked.
+ * We return with mmap_sem still held, but pte unmapped and unlocked.
  */
 static int do_file_page(struct mm_struct *mm, struct vm_area_struct *vma,
                unsigned long address, pte_t *page_table, pmd_t *pmd,
@@ -1954,8 +1967,8 @@ static int do_file_page(struct mm_struct *mm, struct vm_area_struct *vma,
        pgoff_t pgoff;
        int err;
 
-       pte_unmap(page_table);
-       spin_unlock(&mm->page_table_lock);
+       if (!pte_unmap_same(mm, pmd, page_table, orig_pte))
+               return VM_FAULT_MINOR;
 
        if (unlikely(!(vma->vm_flags & VM_NONLINEAR))) {
                /*
@@ -1985,25 +1998,19 @@ static int do_file_page(struct mm_struct *mm, struct vm_area_struct *vma,
  * with external mmu caches can use to update those (ie the Sparc or
  * PowerPC hashed page tables that act as extended TLBs).
  *
- * Note the "page_table_lock". It is to protect against kswapd removing
- * pages from under us. Note that kswapd only ever _removes_ pages, never
- * adds them. As such, once we have noticed that the page is not present,
- * we can drop the lock early.
- *
- * The adding of pages is protected by the MM semaphore (which we hold),
- * so we don't need to worry about a page being suddenly been added into
- * our VM.
- *
- * We enter with the pagetable spinlock held, we are supposed to
- * release it when done.
+ * We enter with non-exclusive mmap_sem (to exclude vma changes,
+ * but allow concurrent faults), and pte mapped but not yet locked.
+ * We return with mmap_sem still held, but pte unmapped and unlocked.
  */
 static inline int handle_pte_fault(struct mm_struct *mm,
                struct vm_area_struct *vma, unsigned long address,
                pte_t *pte, pmd_t *pmd, int write_access)
 {
        pte_t entry;
+       pte_t old_entry;
+       spinlock_t *ptl;
 
-       entry = *pte;
+       old_entry = entry = *pte;
        if (!pte_present(entry)) {
                if (pte_none(entry)) {
                        if (!vma->vm_ops || !vma->vm_ops->nopage)
@@ -2019,17 +2026,33 @@ static inline int handle_pte_fault(struct mm_struct *mm,
                                        pte, pmd, write_access, entry);
        }
 
+       ptl = pte_lockptr(mm, pmd);
+       spin_lock(ptl);
+       if (unlikely(!pte_same(*pte, entry)))
+               goto unlock;
        if (write_access) {
                if (!pte_write(entry))
-                       return do_wp_page(mm, vma, address, pte, pmd, entry);
+                       return do_wp_page(mm, vma, address,
+                                       pte, pmd, ptl, entry);
                entry = pte_mkdirty(entry);
        }
        entry = pte_mkyoung(entry);
-       ptep_set_access_flags(vma, address, pte, entry, write_access);
-       update_mmu_cache(vma, address, entry);
-       lazy_mmu_prot_update(entry);
-       pte_unmap(pte);
-       spin_unlock(&mm->page_table_lock);
+       if (!pte_same(old_entry, entry)) {
+               ptep_set_access_flags(vma, address, pte, entry, write_access);
+               update_mmu_cache(vma, address, entry);
+               lazy_mmu_prot_update(entry);
+       } else {
+               /*
+                * This is needed only for protection faults but the arch code
+                * is not yet telling us if this is a protection fault or not.
+                * This still avoids useless tlb flushes for .text page faults
+                * with threads.
+                */
+               if (write_access)
+                       flush_tlb_page(vma, address);
+       }
+unlock:
+       pte_unmap_unlock(pte, ptl);
        return VM_FAULT_MINOR;
 }
 
@@ -2051,30 +2074,18 @@ int __handle_mm_fault(struct mm_struct *mm, struct vm_area_struct *vma,
        if (unlikely(is_vm_hugetlb_page(vma)))
                return hugetlb_fault(mm, vma, address, write_access);
 
-       /*
-        * We need the page table lock to synchronize with kswapd
-        * and the SMP-safe atomic PTE updates.
-        */
        pgd = pgd_offset(mm, address);
-       spin_lock(&mm->page_table_lock);
-
        pud = pud_alloc(mm, pgd, address);
        if (!pud)
-               goto oom;
-
+               return VM_FAULT_OOM;
        pmd = pmd_alloc(mm, pud, address);
        if (!pmd)
-               goto oom;
-
+               return VM_FAULT_OOM;
        pte = pte_alloc_map(mm, pmd, address);
        if (!pte)
-               goto oom;
-       
-       return handle_pte_fault(mm, vma, address, pte, pmd, write_access);
+               return VM_FAULT_OOM;
 
- oom:
-       spin_unlock(&mm->page_table_lock);
-       return VM_FAULT_OOM;
+       return handle_pte_fault(mm, vma, address, pte, pmd, write_access);
 }
 
 #ifndef __PAGETABLE_PUD_FOLDED
@@ -2084,24 +2095,16 @@ int __handle_mm_fault(struct mm_struct *mm, struct vm_area_struct *vma,
  */
 int __pud_alloc(struct mm_struct *mm, pgd_t *pgd, unsigned long address)
 {
-       pud_t *new;
-
-       if (mm != &init_mm)             /* Temporary bridging hack */
-               spin_unlock(&mm->page_table_lock);
-       new = pud_alloc_one(mm, address);
-       if (!new) {
-               if (mm != &init_mm)     /* Temporary bridging hack */
-                       spin_lock(&mm->page_table_lock);
+       pud_t *new = pud_alloc_one(mm, address);
+       if (!new)
                return -ENOMEM;
-       }
 
        spin_lock(&mm->page_table_lock);
        if (pgd_present(*pgd))          /* Another has populated it */
                pud_free(new);
        else
                pgd_populate(mm, pgd, new);
-       if (mm == &init_mm)             /* Temporary bridging hack */
-               spin_unlock(&mm->page_table_lock);
+       spin_unlock(&mm->page_table_lock);
        return 0;
 }
 #endif /* __PAGETABLE_PUD_FOLDED */
@@ -2113,16 +2116,9 @@ int __pud_alloc(struct mm_struct *mm, pgd_t *pgd, unsigned long address)
  */
 int __pmd_alloc(struct mm_struct *mm, pud_t *pud, unsigned long address)
 {
-       pmd_t *new;
-
-       if (mm != &init_mm)             /* Temporary bridging hack */
-               spin_unlock(&mm->page_table_lock);
-       new = pmd_alloc_one(mm, address);
-       if (!new) {
-               if (mm != &init_mm)     /* Temporary bridging hack */
-                       spin_lock(&mm->page_table_lock);
+       pmd_t *new = pmd_alloc_one(mm, address);
+       if (!new)
                return -ENOMEM;
-       }
 
        spin_lock(&mm->page_table_lock);
 #ifndef __ARCH_HAS_4LEVEL_HACK
@@ -2136,8 +2132,7 @@ int __pmd_alloc(struct mm_struct *mm, pud_t *pud, unsigned long address)
        else
                pgd_populate(mm, pud, new);
 #endif /* __ARCH_HAS_4LEVEL_HACK */
-       if (mm == &init_mm)             /* Temporary bridging hack */
-               spin_unlock(&mm->page_table_lock);
+       spin_unlock(&mm->page_table_lock);
        return 0;
 }
 #endif /* __PAGETABLE_PMD_FOLDED */
@@ -2214,7 +2209,7 @@ static int __init gate_vma_init(void)
        gate_vma.vm_start = FIXADDR_USER_START;
        gate_vma.vm_end = FIXADDR_USER_END;
        gate_vma.vm_page_prot = PAGE_READONLY;
-       gate_vma.vm_flags = VM_RESERVED;
+       gate_vma.vm_flags = 0;
        return 0;
 }
 __initcall(gate_vma_init);