MM: Pass a PTE pointer to update_mmu_cache() rather than the PTE itself
[safe/jmp/linux-2.6] / arch / arm / mm / fault-armv.c
index be4ab3d..c45f9bb 100644 (file)
 #include <linux/init.h>
 #include <linux/pagemap.h>
 
+#include <asm/bugs.h>
 #include <asm/cacheflush.h>
+#include <asm/cachetype.h>
 #include <asm/pgtable.h>
 #include <asm/tlbflush.h>
 
-static unsigned long shared_pte_mask = L_PTE_CACHEABLE;
+#include "mm.h"
+
+static unsigned long shared_pte_mask = L_PTE_MT_BUFFERABLE;
 
 /*
  * We take the easy way out of this problem - we make the
  * PTE uncacheable.  However, we leave the write buffer on.
+ *
+ * Note that the pte lock held when calling update_mmu_cache must also
+ * guard the pte (somewhere else in the same mm) that we modify here.
+ * Therefore those configurations which might call adjust_pte (those
+ * without CONFIG_CPU_CACHE_VIPT) cannot support split page_table_lock.
  */
-static int adjust_pte(struct vm_area_struct *vma, unsigned long address)
+static int do_adjust_pte(struct vm_area_struct *vma, unsigned long address,
+       unsigned long pfn, pte_t *ptep)
 {
-       pgd_t *pgd;
-       pmd_t *pmd;
-       pte_t *pte, entry;
-       int ret = 0;
+       pte_t entry = *ptep;
+       int ret;
 
-       pgd = pgd_offset(vma->vm_mm, address);
-       if (pgd_none(*pgd))
-               goto no_pgd;
-       if (pgd_bad(*pgd))
-               goto bad_pgd;
-
-       pmd = pmd_offset(pgd, address);
-       if (pmd_none(*pmd))
-               goto no_pmd;
-       if (pmd_bad(*pmd))
-               goto bad_pmd;
-
-       pte = pte_offset_map(pmd, address);
-       entry = *pte;
+       /*
+        * If this page is present, it's actually being shared.
+        */
+       ret = pte_present(entry);
 
        /*
         * If this page isn't present, or is already setup to
         * fault (ie, is old), we can safely ignore any issues.
         */
-       if (pte_present(entry) && pte_val(entry) & shared_pte_mask) {
-               flush_cache_page(vma, address, pte_pfn(entry));
-               pte_val(entry) &= ~shared_pte_mask;
-               set_pte(pte, entry);
+       if (ret && (pte_val(entry) & L_PTE_MT_MASK) != shared_pte_mask) {
+               flush_cache_page(vma, address, pfn);
+               outer_flush_range((pfn << PAGE_SHIFT),
+                                 (pfn << PAGE_SHIFT) + PAGE_SIZE);
+               pte_val(entry) &= ~L_PTE_MT_MASK;
+               pte_val(entry) |= shared_pte_mask;
+               set_pte_at(vma->vm_mm, address, ptep, entry);
                flush_tlb_page(vma, address);
-               ret = 1;
        }
-       pte_unmap(pte);
+
        return ret;
+}
+
+static int adjust_pte(struct vm_area_struct *vma, unsigned long address,
+       unsigned long pfn)
+{
+       spinlock_t *ptl;
+       pgd_t *pgd;
+       pmd_t *pmd;
+       pte_t *pte;
+       int ret;
 
-bad_pgd:
-       pgd_ERROR(*pgd);
-       pgd_clear(pgd);
-no_pgd:
-       return 0;
-
-bad_pmd:
-       pmd_ERROR(*pmd);
-       pmd_clear(pmd);
-no_pmd:
-       return 0;
+       pgd = pgd_offset(vma->vm_mm, address);
+       if (pgd_none_or_clear_bad(pgd))
+               return 0;
+
+       pmd = pmd_offset(pgd, address);
+       if (pmd_none_or_clear_bad(pmd))
+               return 0;
+
+       /*
+        * This is called while another page table is mapped, so we
+        * must use the nested version.  This also means we need to
+        * open-code the spin-locking.
+        */
+       ptl = pte_lockptr(vma->vm_mm, pmd);
+       pte = pte_offset_map_nested(pmd, address);
+       spin_lock(ptl);
+
+       ret = do_adjust_pte(vma, address, pfn, pte);
+
+       spin_unlock(ptl);
+       pte_unmap_nested(pte);
+
+       return ret;
 }
 
 static void
@@ -105,17 +127,15 @@ make_coherent(struct address_space *mapping, struct vm_area_struct *vma, unsigne
                if (!(mpnt->vm_flags & VM_MAYSHARE))
                        continue;
                offset = (pgoff - mpnt->vm_pgoff) << PAGE_SHIFT;
-               aliases += adjust_pte(mpnt, mpnt->vm_start + offset);
+               aliases += adjust_pte(mpnt, mpnt->vm_start + offset, pfn);
        }
        flush_dcache_mmap_unlock(mapping);
        if (aliases)
-               adjust_pte(vma, addr);
+               adjust_pte(vma, addr, pfn);
        else
                flush_cache_page(vma, addr, pfn);
 }
 
-void __flush_dcache_page(struct address_space *mapping, struct page *page);
-
 /*
  * Take care of architecture specific things when placing a new PTE into
  * a page table, or changing an existing PTE.  Basically, there are two
@@ -127,27 +147,36 @@ void __flush_dcache_page(struct address_space *mapping, struct page *page);
  *  2. If we have multiple shared mappings of the same space in
  *     an object, we need to deal with the cache aliasing issues.
  *
- * Note that the page_table_lock will be held.
+ * Note that the pte lock will be held.
  */
-void update_mmu_cache(struct vm_area_struct *vma, unsigned long addr, pte_t pte)
+void update_mmu_cache(struct vm_area_struct *vma, unsigned long addr,
+       pte_t *ptep)
 {
-       unsigned long pfn = pte_pfn(pte);
+       unsigned long pfn = pte_pfn(*ptep);
        struct address_space *mapping;
        struct page *page;
 
        if (!pfn_valid(pfn))
                return;
 
+       /*
+        * The zero page is never written to, so never has any dirty
+        * cache lines, and therefore never needs to be flushed.
+        */
        page = pfn_to_page(pfn);
+       if (page == ZERO_PAGE(0))
+               return;
+
        mapping = page_mapping(page);
+#ifndef CONFIG_SMP
+       if (test_and_clear_bit(PG_dcache_dirty, &page->flags))
+               __flush_dcache_page(mapping, page);
+#endif
        if (mapping) {
-               int dirty = test_and_clear_bit(PG_dcache_dirty, &page->flags);
-
-               if (dirty)
-                       __flush_dcache_page(mapping, page);
-
                if (cache_is_vivt())
                        make_coherent(mapping, vma, addr, pfn);
+               else if (vma->vm_flags & VM_EXEC)
+                       __flush_icache_all();
        }
 }
 
@@ -184,9 +213,8 @@ void __init check_writebuffer_bugs(void)
        page = alloc_page(GFP_KERNEL);
        if (page) {
                unsigned long *p1, *p2;
-               pgprot_t prot = __pgprot(L_PTE_PRESENT|L_PTE_YOUNG|
-                                        L_PTE_DIRTY|L_PTE_WRITE|
-                                        L_PTE_BUFFERABLE);
+               pgprot_t prot = __pgprot_modify(PAGE_KERNEL,
+                                       L_PTE_MT_MASK, L_PTE_MT_BUFFERABLE);
 
                p1 = vmap(&page, 1, VM_IOREMAP, prot);
                p2 = vmap(&page, 1, VM_IOREMAP, prot);
@@ -207,7 +235,7 @@ void __init check_writebuffer_bugs(void)
 
        if (v) {
                printk("failed, %s\n", reason);
-               shared_pte_mask |= L_PTE_BUFFERABLE;
+               shared_pte_mask = L_PTE_MT_UNCACHED;
        } else {
                printk("ok\n");
        }