#include <linux/init.h>
#include <linux/pagemap.h>
+#include <asm/bugs.h>
#include <asm/cacheflush.h>
+#include <asm/cachetype.h>
#include <asm/pgtable.h>
#include <asm/tlbflush.h>
-static unsigned long shared_pte_mask = L_PTE_CACHEABLE;
+#include "mm.h"
+
+static unsigned long shared_pte_mask = L_PTE_MT_BUFFERABLE;
/*
* We take the easy way out of this problem - we make the
pgd_t *pgd;
pmd_t *pmd;
pte_t *pte, entry;
- int ret = 0;
+ int ret;
pgd = pgd_offset(vma->vm_mm, address);
if (pgd_none(*pgd))
entry = *pte;
/*
+ * If this page is present, it's actually being shared.
+ */
+ ret = pte_present(entry);
+
+ /*
* If this page isn't present, or is already setup to
* fault (ie, is old), we can safely ignore any issues.
*/
- if (pte_present(entry) && pte_val(entry) & shared_pte_mask) {
- flush_cache_page(vma, address, pte_pfn(entry));
- pte_val(entry) &= ~shared_pte_mask;
+ if (ret && (pte_val(entry) & L_PTE_MT_MASK) != shared_pte_mask) {
+ unsigned long pfn = pte_pfn(entry);
+ flush_cache_page(vma, address, pfn);
+ outer_flush_range((pfn << PAGE_SHIFT),
+ (pfn << PAGE_SHIFT) + PAGE_SIZE);
+ pte_val(entry) &= ~L_PTE_MT_MASK;
+ pte_val(entry) |= shared_pte_mask;
set_pte_at(vma->vm_mm, address, pte, entry);
flush_tlb_page(vma, address);
- ret = 1;
}
pte_unmap(pte);
return ret;
flush_cache_page(vma, addr, pfn);
}
-void __flush_dcache_page(struct address_space *mapping, struct page *page);
-
/*
* Take care of architecture specific things when placing a new PTE into
* a page table, or changing an existing PTE. Basically, there are two
if (!pfn_valid(pfn))
return;
+ /*
+ * The zero page is never written to, so never has any dirty
+ * cache lines, and therefore never needs to be flushed.
+ */
page = pfn_to_page(pfn);
+ if (page == ZERO_PAGE(0))
+ return;
+
mapping = page_mapping(page);
+#ifndef CONFIG_SMP
+ if (test_and_clear_bit(PG_dcache_dirty, &page->flags))
+ __flush_dcache_page(mapping, page);
+#endif
if (mapping) {
- int dirty = test_and_clear_bit(PG_dcache_dirty, &page->flags);
-
- if (dirty)
- __flush_dcache_page(mapping, page);
-
if (cache_is_vivt())
make_coherent(mapping, vma, addr, pfn);
+ else if (vma->vm_flags & VM_EXEC)
+ __flush_icache_all();
}
}
page = alloc_page(GFP_KERNEL);
if (page) {
unsigned long *p1, *p2;
- pgprot_t prot = __pgprot(L_PTE_PRESENT|L_PTE_YOUNG|
- L_PTE_DIRTY|L_PTE_WRITE|
- L_PTE_BUFFERABLE);
+ pgprot_t prot = __pgprot_modify(PAGE_KERNEL,
+ L_PTE_MT_MASK, L_PTE_MT_BUFFERABLE);
p1 = vmap(&page, 1, VM_IOREMAP, prot);
p2 = vmap(&page, 1, VM_IOREMAP, prot);
if (v) {
printk("failed, %s\n", reason);
- shared_pte_mask |= L_PTE_BUFFERABLE;
+ shared_pte_mask = L_PTE_MT_UNCACHED;
} else {
printk("ok\n");
}