[S390] mmap: add missing compat_ptr conversion to both mmap compat syscalls
[safe/jmp/linux-2.6] / arch / arm / mm / fault-armv.c
index cf95c5d..56ee153 100644 (file)
 #include <linux/init.h>
 #include <linux/pagemap.h>
 
+#include <asm/bugs.h>
 #include <asm/cacheflush.h>
+#include <asm/cachetype.h>
 #include <asm/pgtable.h>
 #include <asm/tlbflush.h>
 
-static unsigned long shared_pte_mask = L_PTE_CACHEABLE;
+#include "mm.h"
+
+static unsigned long shared_pte_mask = L_PTE_MT_BUFFERABLE;
 
 /*
  * We take the easy way out of this problem - we make the
@@ -37,7 +41,7 @@ static int adjust_pte(struct vm_area_struct *vma, unsigned long address)
        pgd_t *pgd;
        pmd_t *pmd;
        pte_t *pte, entry;
-       int ret = 0;
+       int ret;
 
        pgd = pgd_offset(vma->vm_mm, address);
        if (pgd_none(*pgd))
@@ -55,15 +59,23 @@ static int adjust_pte(struct vm_area_struct *vma, unsigned long address)
        entry = *pte;
 
        /*
+        * If this page is present, it's actually being shared.
+        */
+       ret = pte_present(entry);
+
+       /*
         * If this page isn't present, or is already setup to
         * fault (ie, is old), we can safely ignore any issues.
         */
-       if (pte_present(entry) && pte_val(entry) & shared_pte_mask) {
-               flush_cache_page(vma, address, pte_pfn(entry));
-               pte_val(entry) &= ~shared_pte_mask;
+       if (ret && (pte_val(entry) & L_PTE_MT_MASK) != shared_pte_mask) {
+               unsigned long pfn = pte_pfn(entry);
+               flush_cache_page(vma, address, pfn);
+               outer_flush_range((pfn << PAGE_SHIFT),
+                                 (pfn << PAGE_SHIFT) + PAGE_SIZE);
+               pte_val(entry) &= ~L_PTE_MT_MASK;
+               pte_val(entry) |= shared_pte_mask;
                set_pte_at(vma->vm_mm, address, pte, entry);
                flush_tlb_page(vma, address);
-               ret = 1;
        }
        pte_unmap(pte);
        return ret;
@@ -119,8 +131,6 @@ make_coherent(struct address_space *mapping, struct vm_area_struct *vma, unsigne
                flush_cache_page(vma, addr, pfn);
 }
 
-void __flush_dcache_page(struct address_space *mapping, struct page *page);
-
 /*
  * Take care of architecture specific things when placing a new PTE into
  * a page table, or changing an existing PTE.  Basically, there are two
@@ -143,16 +153,24 @@ void update_mmu_cache(struct vm_area_struct *vma, unsigned long addr, pte_t pte)
        if (!pfn_valid(pfn))
                return;
 
+       /*
+        * The zero page is never written to, so never has any dirty
+        * cache lines, and therefore never needs to be flushed.
+        */
        page = pfn_to_page(pfn);
+       if (page == ZERO_PAGE(0))
+               return;
+
        mapping = page_mapping(page);
+#ifndef CONFIG_SMP
+       if (test_and_clear_bit(PG_dcache_dirty, &page->flags))
+               __flush_dcache_page(mapping, page);
+#endif
        if (mapping) {
-               int dirty = test_and_clear_bit(PG_dcache_dirty, &page->flags);
-
-               if (dirty)
-                       __flush_dcache_page(mapping, page);
-
                if (cache_is_vivt())
                        make_coherent(mapping, vma, addr, pfn);
+               else if (vma->vm_flags & VM_EXEC)
+                       __flush_icache_all();
        }
 }
 
@@ -189,9 +207,8 @@ void __init check_writebuffer_bugs(void)
        page = alloc_page(GFP_KERNEL);
        if (page) {
                unsigned long *p1, *p2;
-               pgprot_t prot = __pgprot(L_PTE_PRESENT|L_PTE_YOUNG|
-                                        L_PTE_DIRTY|L_PTE_WRITE|
-                                        L_PTE_BUFFERABLE);
+               pgprot_t prot = __pgprot_modify(PAGE_KERNEL,
+                                       L_PTE_MT_MASK, L_PTE_MT_BUFFERABLE);
 
                p1 = vmap(&page, 1, VM_IOREMAP, prot);
                p2 = vmap(&page, 1, VM_IOREMAP, prot);
@@ -212,7 +229,7 @@ void __init check_writebuffer_bugs(void)
 
        if (v) {
                printk("failed, %s\n", reason);
-               shared_pte_mask |= L_PTE_BUFFERABLE;
+               shared_pte_mask = L_PTE_MT_UNCACHED;
        } else {
                printk("ok\n");
        }