On VIVT ARM, when we have multiple shared mappings of the same file
in the same MM, we need to ensure that we have coherency across all
copies. We do this via make_coherent() by making the pages
uncacheable.
This used to work fine, until we allowed highmem with highpte - we
now have a page table which is mapped as required, and is not available
for modification via update_mmu_cache().
Ralf Beache suggested getting rid of the PTE value passed to
update_mmu_cache():
On MIPS update_mmu_cache() calls __update_tlb() which walks pagetables
to construct a pointer to the pte again. Passing a pte_t * is much
more elegant. Maybe we might even replace the pte argument with the
pte_t?
Ben Herrenschmidt would also like the pte pointer for PowerPC:
Passing the ptep in there is exactly what I want. I want that
-instead- of the PTE value, because I have issue on some ppc cases,
for I$/D$ coherency, where set_pte_at() may decide to mask out the
_PAGE_EXEC.
So, pass in the mapped page table pointer into update_mmu_cache(), and
remove the PTE value, updating all implementations and call sites to
suit.
Includes a fix from Stephen Rothwell:
sparc: fix fallout from update_mmu_cache API change
Signed-off-by: Stephen Rothwell <sfr@canb.auug.org.au>
Acked-by: Benjamin Herrenschmidt <benh@kernel.crashing.org>
Signed-off-by: Russell King <rmk+kernel@arm.linux.org.uk>
40 files changed:
This is used primarily during fault processing.
5) void update_mmu_cache(struct vm_area_struct *vma,
This is used primarily during fault processing.
5) void update_mmu_cache(struct vm_area_struct *vma,
- unsigned long address, pte_t pte)
+ unsigned long address, pte_t *ptep)
At the end of every page fault, this routine is invoked to
tell the architecture specific code that a translation
At the end of every page fault, this routine is invoked to
tell the architecture specific code that a translation
- described by "pte" now exists at virtual address "address"
- for address space "vma->vm_mm", in the software page tables.
+ now exists at virtual address "address" for address space
+ "vma->vm_mm", in the software page tables.
A port may use this information in any way it so chooses.
For example, it could use this event to pre-load TLB
A port may use this information in any way it so chooses.
For example, it could use this event to pre-load TLB
* tables contain all the necessary information.
*/
extern inline void update_mmu_cache(struct vm_area_struct * vma,
* tables contain all the necessary information.
*/
extern inline void update_mmu_cache(struct vm_area_struct * vma,
- unsigned long address, pte_t pte)
+ unsigned long address, pte_t *ptep)
* cache entries for the kernels virtual memory range are written
* back to the page.
*/
* cache entries for the kernels virtual memory range are written
* back to the page.
*/
-extern void update_mmu_cache(struct vm_area_struct *vma, unsigned long addr, pte_t pte);
+extern void update_mmu_cache(struct vm_area_struct *vma, unsigned long addr,
+ pte_t *ptep);
*
* Note that the pte lock will be held.
*/
*
* Note that the pte lock will be held.
*/
-void update_mmu_cache(struct vm_area_struct *vma, unsigned long addr, pte_t pte)
+void update_mmu_cache(struct vm_area_struct *vma, unsigned long addr,
+ pte_t *ptep)
- unsigned long pfn = pte_pfn(pte);
+ unsigned long pfn = pte_pfn(*ptep);
struct address_space *mapping;
struct page *page;
struct address_space *mapping;
struct page *page;
struct vm_area_struct;
extern void update_mmu_cache(struct vm_area_struct * vma,
struct vm_area_struct;
extern void update_mmu_cache(struct vm_area_struct * vma,
- unsigned long address, pte_t pte);
+ unsigned long address, pte_t *ptep);
/*
* Encode and decode a swap entry
/*
* Encode and decode a swap entry
}
void update_mmu_cache(struct vm_area_struct *vma,
}
void update_mmu_cache(struct vm_area_struct *vma,
- unsigned long address, pte_t pte)
+ unsigned long address, pte_t *ptep)
return;
local_irq_save(flags);
return;
local_irq_save(flags);
- update_dtlb(address, pte);
+ update_dtlb(address, *ptep);
local_irq_restore(flags);
}
local_irq_restore(flags);
}
* Actually I am not sure on what this could be used for.
*/
static inline void update_mmu_cache(struct vm_area_struct * vma,
* Actually I am not sure on what this could be used for.
*/
static inline void update_mmu_cache(struct vm_area_struct * vma,
- unsigned long address, pte_t pte)
+ unsigned long address, pte_t *ptep)
/*
* preload information about a newly instantiated PTE into the SCR0/SCR1 PGE cache
*/
/*
* preload information about a newly instantiated PTE into the SCR0/SCR1 PGE cache
*/
-static inline void update_mmu_cache(struct vm_area_struct *vma, unsigned long address, pte_t pte)
+static inline void update_mmu_cache(struct vm_area_struct *vma, unsigned long address, pte_t *ptep)
{
struct mm_struct *mm;
unsigned long ampr;
{
struct mm_struct *mm;
unsigned long ampr;
return pte_val(a) == pte_val(b);
}
return pte_val(a) == pte_val(b);
}
-#define update_mmu_cache(vma, address, pte) do { } while (0)
+#define update_mmu_cache(vma, address, ptep) do { } while (0)
extern pgd_t swapper_pg_dir[PTRS_PER_PGD];
extern void paging_init (void);
extern pgd_t swapper_pg_dir[PTRS_PER_PGD];
extern void paging_init (void);
-extern void update_mmu_cache(struct vm_area_struct *, unsigned long, pte_t);
+extern void update_mmu_cache(struct vm_area_struct *, unsigned long, pte_t *);
#endif /* _ASM_M32R_TLBFLUSH_H */
#endif /* _ASM_M32R_TLBFLUSH_H */
* update_mmu_cache()
*======================================================================*/
void update_mmu_cache(struct vm_area_struct *vma, unsigned long addr,
* update_mmu_cache()
*======================================================================*/
void update_mmu_cache(struct vm_area_struct *vma, unsigned long addr,
addr = (address & PAGE_MASK);
set_thread_fault_code(error_code);
addr = (address & PAGE_MASK);
set_thread_fault_code(error_code);
- update_mmu_cache(NULL, addr, *pte_k);
+ update_mmu_cache(NULL, addr, pte_k);
set_thread_fault_code(0);
return;
}
set_thread_fault_code(0);
return;
}
#define ITLB_END (unsigned long *)(ITLB_BASE + (NR_TLB_ENTRIES * 8))
#define DTLB_END (unsigned long *)(DTLB_BASE + (NR_TLB_ENTRIES * 8))
void update_mmu_cache(struct vm_area_struct *vma, unsigned long vaddr,
#define ITLB_END (unsigned long *)(ITLB_BASE + (NR_TLB_ENTRIES * 8))
#define DTLB_END (unsigned long *)(DTLB_BASE + (NR_TLB_ENTRIES * 8))
void update_mmu_cache(struct vm_area_struct *vma, unsigned long vaddr,
{
volatile unsigned long *entry1, *entry2;
unsigned long pte_data, flags;
{
volatile unsigned long *entry1, *entry2;
unsigned long pte_data, flags;
vaddr = (vaddr & PAGE_MASK) | get_asid();
vaddr = (vaddr & PAGE_MASK) | get_asid();
- pte_data = pte_val(pte);
+ pte_data = pte_val(*ptep);
#ifdef CONFIG_CHIP_OPSP
entry1 = (unsigned long *)ITLB_BASE;
#ifdef CONFIG_CHIP_OPSP
entry1 = (unsigned long *)ITLB_BASE;
* they are updated on demand.
*/
static inline void update_mmu_cache(struct vm_area_struct *vma,
* they are updated on demand.
*/
static inline void update_mmu_cache(struct vm_area_struct *vma,
- unsigned long address, pte_t pte)
+ unsigned long address, pte_t *ptep)
#define flush_tlb_kernel_range(start, end) do { } while (0)
#define flush_tlb_kernel_range(start, end) do { } while (0)
-#define update_mmu_cache(vma, addr, pte) do { } while (0)
+#define update_mmu_cache(vma, addr, ptep) do { } while (0)
#define flush_tlb_all local_flush_tlb_all
#define flush_tlb_mm local_flush_tlb_mm
#define flush_tlb_all local_flush_tlb_all
#define flush_tlb_mm local_flush_tlb_mm
pte_t pte);
static inline void update_mmu_cache(struct vm_area_struct *vma,
pte_t pte);
static inline void update_mmu_cache(struct vm_area_struct *vma,
- unsigned long address, pte_t pte)
+ unsigned long address, pte_t *ptep)
__update_tlb(vma, address, pte);
__update_cache(vma, address, pte);
}
__update_tlb(vma, address, pte);
__update_cache(vma, address, pte);
}
* the kernel page tables containing the necessary information by tlb-mn10300.S
*/
extern void update_mmu_cache(struct vm_area_struct *vma,
* the kernel page tables containing the necessary information by tlb-mn10300.S
*/
extern void update_mmu_cache(struct vm_area_struct *vma,
- unsigned long address, pte_t pte);
+ unsigned long address, pte_t *ptep);
#endif /* !__ASSEMBLY__ */
#endif /* !__ASSEMBLY__ */
/*
* preemptively set a TLB entry
*/
/*
* preemptively set a TLB entry
*/
-void update_mmu_cache(struct vm_area_struct *vma, unsigned long addr, pte_t pte)
+void update_mmu_cache(struct vm_area_struct *vma, unsigned long addr, pte_t *ptep)
{
unsigned long pteu, ptel, cnx, flags;
{
unsigned long pteu, ptel, cnx, flags;
addr &= PAGE_MASK;
ptel = pte_val(pte) & ~(xPTEL_UNUSED1 | xPTEL_UNUSED2);
addr &= PAGE_MASK;
ptel = pte_val(pte) & ~(xPTEL_UNUSED1 | xPTEL_UNUSED2);
#define PG_dcache_dirty PG_arch_1
#define PG_dcache_dirty PG_arch_1
-extern void update_mmu_cache(struct vm_area_struct *, unsigned long, pte_t);
+extern void update_mmu_cache(struct vm_area_struct *, unsigned long, pte_t *);
/* Encode and de-code a swap entry */
/* Encode and de-code a swap entry */
EXPORT_SYMBOL(flush_cache_all_local);
void
EXPORT_SYMBOL(flush_cache_all_local);
void
-update_mmu_cache(struct vm_area_struct *vma, unsigned long address, pte_t pte)
+update_mmu_cache(struct vm_area_struct *vma, unsigned long address, pte_t *ptep)
- struct page *page = pte_page(pte);
+ struct page *page = pte_page(*ptep);
if (pfn_valid(page_to_pfn(page)) && page_mapping(page) &&
test_bit(PG_dcache_dirty, &page->flags)) {
if (pfn_valid(page_to_pfn(page)) && page_mapping(page) &&
test_bit(PG_dcache_dirty, &page->flags)) {
* corresponding HPTE into the hash table ahead of time, instead of
* waiting for the inevitable extra hash-table miss exception.
*/
* corresponding HPTE into the hash table ahead of time, instead of
* waiting for the inevitable extra hash-table miss exception.
*/
-extern void update_mmu_cache(struct vm_area_struct *, unsigned long, pte_t);
+extern void update_mmu_cache(struct vm_area_struct *, unsigned long, pte_t *);
extern int gup_hugepd(hugepd_t *hugepd, unsigned pdshift, unsigned long addr,
unsigned long end, int write, struct page **pages, int *nr);
extern int gup_hugepd(hugepd_t *hugepd, unsigned pdshift, unsigned long addr,
unsigned long end, int write, struct page **pages, int *nr);
* This must always be called with the pte lock held.
*/
void update_mmu_cache(struct vm_area_struct *vma, unsigned long address,
* This must always be called with the pte lock held.
*/
void update_mmu_cache(struct vm_area_struct *vma, unsigned long address,
{
#ifdef CONFIG_PPC_STD_MMU
unsigned long access = 0, trap;
/* We only want HPTEs for linux PTEs that have _PAGE_ACCESSED set */
{
#ifdef CONFIG_PPC_STD_MMU
unsigned long access = 0, trap;
/* We only want HPTEs for linux PTEs that have _PAGE_ACCESSED set */
- if (!pte_young(pte) || address >= TASK_SIZE)
+ if (!pte_young(*ptep) || address >= TASK_SIZE)
return;
/* We try to figure out if we are coming from an instruction
return;
/* We try to figure out if we are coming from an instruction
* The S390 doesn't have any external MMU info: the kernel page
* tables contain all the necessary information.
*/
* The S390 doesn't have any external MMU info: the kernel page
* tables contain all the necessary information.
*/
-#define update_mmu_cache(vma, address, pte) do { } while (0)
+#define update_mmu_cache(vma, address, ptep) do { } while (0)
/*
* ZERO_PAGE is a global shared page that is always zero: used
/*
* ZERO_PAGE is a global shared page that is always zero: used
unsigned long address, pte_t pte);
static inline void update_mmu_cache(struct vm_area_struct *vma,
unsigned long address, pte_t pte);
static inline void update_mmu_cache(struct vm_area_struct *vma,
- unsigned long address, pte_t pte)
+ unsigned long address, pte_t *ptep)
__update_tlb(vma, address, pte);
__update_cache(vma, address, pte);
}
__update_tlb(vma, address, pte);
__update_cache(vma, address, pte);
}
unsigned long address, pte_t pte);
static inline void
unsigned long address, pte_t pte);
static inline void
-update_mmu_cache(struct vm_area_struct *vma, unsigned long address, pte_t pte)
+update_mmu_cache(struct vm_area_struct *vma, unsigned long address, pte_t *ptep)
__update_cache(vma, address, pte);
__update_tlb(vma, address, pte);
}
__update_cache(vma, address, pte);
__update_tlb(vma, address, pte);
}
local_flush_tlb_one(get_asid(), address & PAGE_MASK);
#endif
local_flush_tlb_one(get_asid(), address & PAGE_MASK);
#endif
- update_mmu_cache(NULL, address, entry);
+ update_mmu_cache(NULL, address, pte);
#define FAULT_CODE_WRITE 0x2
#define FAULT_CODE_USER 0x4
#define FAULT_CODE_WRITE 0x2
#define FAULT_CODE_USER 0x4
-BTFIXUPDEF_CALL(void, update_mmu_cache, struct vm_area_struct *, unsigned long, pte_t)
+BTFIXUPDEF_CALL(void, update_mmu_cache, struct vm_area_struct *, unsigned long, pte_t *)
-#define update_mmu_cache(vma,addr,pte) BTFIXUP_CALL(update_mmu_cache)(vma,addr,pte)
+#define update_mmu_cache(vma,addr,ptep) BTFIXUP_CALL(update_mmu_cache)(vma,addr,ptep)
BTFIXUPDEF_CALL(void, sparc_mapiorange, unsigned int, unsigned long,
unsigned long, unsigned int)
BTFIXUPDEF_CALL(void, sparc_mapiorange, unsigned int, unsigned long,
unsigned long, unsigned int)
#define mmu_unlockarea(vaddr, len) do { } while(0)
struct vm_area_struct;
#define mmu_unlockarea(vaddr, len) do { } while(0)
struct vm_area_struct;
-extern void update_mmu_cache(struct vm_area_struct *, unsigned long, pte_t);
+extern void update_mmu_cache(struct vm_area_struct *, unsigned long, pte_t *);
/* Encode and de-code a swap entry */
#define __swp_type(entry) (((entry).val >> PAGE_SHIFT) & 0xffUL)
/* Encode and de-code a swap entry */
#define __swp_type(entry) (((entry).val >> PAGE_SHIFT) & 0xffUL)
unsigned long address)
{
extern void sun4c_update_mmu_cache(struct vm_area_struct *,
unsigned long address)
{
extern void sun4c_update_mmu_cache(struct vm_area_struct *,
+ unsigned long,pte_t *);
extern pte_t *sun4c_pte_offset_kernel(pmd_t *,unsigned long);
struct task_struct *tsk = current;
struct mm_struct *mm = tsk->mm;
extern pte_t *sun4c_pte_offset_kernel(pmd_t *,unsigned long);
struct task_struct *tsk = current;
struct mm_struct *mm = tsk->mm;
* on the CPU and doing a shrink_mmap() on this vma.
*/
sun4c_update_mmu_cache (find_vma(current->mm, address), address,
* on the CPU and doing a shrink_mmap() on this vma.
*/
sun4c_update_mmu_cache (find_vma(current->mm, address), address,
else
do_sparc_fault(regs, text_fault, write, address);
}
else
do_sparc_fault(regs, text_fault, write, address);
}
-void update_mmu_cache(struct vm_area_struct *vma, unsigned long address, pte_t pte)
+void update_mmu_cache(struct vm_area_struct *vma, unsigned long address, pte_t *ptep)
{
struct mm_struct *mm;
struct tsb *tsb;
unsigned long tag, flags;
unsigned long tsb_index, tsb_hash_shift;
{
struct mm_struct *mm;
struct tsb *tsb;
unsigned long tag, flags;
unsigned long tsb_index, tsb_hash_shift;
if (tlb_type != hypervisor) {
unsigned long pfn = pte_pfn(pte);
if (tlb_type != hypervisor) {
unsigned long pfn = pte_pfn(pte);
-void sun4c_update_mmu_cache(struct vm_area_struct *vma, unsigned long address, pte_t pte)
+void sun4c_update_mmu_cache(struct vm_area_struct *vma, unsigned long address, pte_t *ptep)
* The following code is a deadwood that may be necessary when
* we start to make precise page flushes again. --zaitcev
*/
* The following code is a deadwood that may be necessary when
* we start to make precise page flushes again. --zaitcev
*/
-static void swift_update_mmu_cache(struct vm_area_struct * vma, unsigned long address, pte_t pte)
+static void swift_update_mmu_cache(struct vm_area_struct * vma, unsigned long address, pte_t *ptep)
{
#if 0
static unsigned long last;
{
#if 0
static unsigned long last;
if (address == last) {
val = srmmu_hwprobe(address);
if (address == last) {
val = srmmu_hwprobe(address);
- if (val != 0 && pte_val(pte) != val) {
+ if (val != 0 && pte_val(*ptep) != val) {
printk("swift_update_mmu_cache: "
"addr %lx put %08x probed %08x from %p\n",
printk("swift_update_mmu_cache: "
"addr %lx put %08x probed %08x from %p\n",
- address, pte_val(pte), val,
+ address, pte_val(*ptep), val,
__builtin_return_address(0));
srmmu_flush_whole_tlb();
}
__builtin_return_address(0));
srmmu_flush_whole_tlb();
}
/* An experiment, turn off by default for now... -DaveM */
#define SUN4C_PRELOAD_PSEG
/* An experiment, turn off by default for now... -DaveM */
#define SUN4C_PRELOAD_PSEG
-void sun4c_update_mmu_cache(struct vm_area_struct *vma, unsigned long address, pte_t pte)
+void sun4c_update_mmu_cache(struct vm_area_struct *vma, unsigned long address, pte_t *ptep)
{
unsigned long flags;
int pseg;
{
unsigned long flags;
int pseg;
start += PAGE_SIZE;
}
#ifndef SUN4C_PRELOAD_PSEG
start += PAGE_SIZE;
}
#ifndef SUN4C_PRELOAD_PSEG
- sun4c_put_pte(address, pte_val(pte));
+ sun4c_put_pte(address, pte_val(*ptep));
#endif
local_irq_restore(flags);
return;
#endif
local_irq_restore(flags);
return;
- sun4c_put_pte(address, pte_val(pte));
+ sun4c_put_pte(address, pte_val(*ptep));
local_irq_restore(flags);
}
local_irq_restore(flags);
}
struct mm_struct;
extern pte_t *virt_to_pte(struct mm_struct *mm, unsigned long addr);
struct mm_struct;
extern pte_t *virt_to_pte(struct mm_struct *mm, unsigned long addr);
-#define update_mmu_cache(vma,address,pte) do ; while (0)
+#define update_mmu_cache(vma,address,ptep) do ; while (0)
/* Encode and de-code a swap entry */
#define __swp_type(x) (((x).val >> 4) & 0x3f)
/* Encode and de-code a swap entry */
#define __swp_type(x) (((x).val >> 4) & 0x3f)
* The i386 doesn't have any external MMU info: the kernel page
* tables contain all the necessary information.
*/
* The i386 doesn't have any external MMU info: the kernel page
* tables contain all the necessary information.
*/
-#define update_mmu_cache(vma, address, pte) do { } while (0)
+#define update_mmu_cache(vma, address, ptep) do { } while (0)
#endif /* !__ASSEMBLY__ */
#endif /* !__ASSEMBLY__ */
#define pte_unmap(pte) /* NOP */
#define pte_unmap_nested(pte) /* NOP */
#define pte_unmap(pte) /* NOP */
#define pte_unmap_nested(pte) /* NOP */
-#define update_mmu_cache(vma, address, pte) do { } while (0)
+#define update_mmu_cache(vma, address, ptep) do { } while (0)
/* Encode and de-code a swap entry */
#if _PAGE_BIT_FILE < _PAGE_BIT_PROTNONE
/* Encode and de-code a swap entry */
#if _PAGE_BIT_FILE < _PAGE_BIT_PROTNONE
#define kern_addr_valid(addr) (1)
extern void update_mmu_cache(struct vm_area_struct * vma,
#define kern_addr_valid(addr) (1)
extern void update_mmu_cache(struct vm_area_struct * vma,
- unsigned long address, pte_t pte);
+ unsigned long address, pte_t *ptep);
/*
* remap a physical page `pfn' of size `size' with page protection `prot'
/*
* remap a physical page `pfn' of size `size' with page protection `prot'
-update_mmu_cache(struct vm_area_struct * vma, unsigned long addr, pte_t pte)
+update_mmu_cache(struct vm_area_struct * vma, unsigned long addr, pte_t *ptep)
- unsigned long pfn = pte_pfn(pte);
+ unsigned long pfn = pte_pfn(*ptep);
struct page *page;
if (!pfn_valid(pfn))
struct page *page;
if (!pfn_valid(pfn))
entry = pte_mkwrite(pte_mkdirty(huge_ptep_get(ptep)));
if (huge_ptep_set_access_flags(vma, address, ptep, entry, 1)) {
entry = pte_mkwrite(pte_mkdirty(huge_ptep_get(ptep)));
if (huge_ptep_set_access_flags(vma, address, ptep, entry, 1)) {
- update_mmu_cache(vma, address, entry);
+ update_mmu_cache(vma, address, ptep);
entry = pte_mkyoung(entry);
if (huge_ptep_set_access_flags(vma, address, ptep, entry,
flags & FAULT_FLAG_WRITE))
entry = pte_mkyoung(entry);
if (huge_ptep_set_access_flags(vma, address, ptep, entry,
flags & FAULT_FLAG_WRITE))
- update_mmu_cache(vma, address, entry);
+ update_mmu_cache(vma, address, ptep);
out_page_table_lock:
spin_unlock(&mm->page_table_lock);
out_page_table_lock:
spin_unlock(&mm->page_table_lock);
/* Ok, finally just insert the thing.. */
entry = pte_mkspecial(pfn_pte(pfn, prot));
set_pte_at(mm, addr, pte, entry);
/* Ok, finally just insert the thing.. */
entry = pte_mkspecial(pfn_pte(pfn, prot));
set_pte_at(mm, addr, pte, entry);
- update_mmu_cache(vma, addr, entry); /* XXX: why not for insert_page? */
+ update_mmu_cache(vma, addr, pte); /* XXX: why not for insert_page? */
entry = pte_mkyoung(orig_pte);
entry = maybe_mkwrite(pte_mkdirty(entry), vma);
if (ptep_set_access_flags(vma, address, page_table, entry,1))
entry = pte_mkyoung(orig_pte);
entry = maybe_mkwrite(pte_mkdirty(entry), vma);
if (ptep_set_access_flags(vma, address, page_table, entry,1))
- update_mmu_cache(vma, address, entry);
+ update_mmu_cache(vma, address, page_table);
ret |= VM_FAULT_WRITE;
goto unlock;
}
ret |= VM_FAULT_WRITE;
goto unlock;
}
* new page to be mapped directly into the secondary page table.
*/
set_pte_at_notify(mm, address, page_table, entry);
* new page to be mapped directly into the secondary page table.
*/
set_pte_at_notify(mm, address, page_table, entry);
- update_mmu_cache(vma, address, entry);
+ update_mmu_cache(vma, address, page_table);
if (old_page) {
/*
* Only after switching the pte to the new page may
if (old_page) {
/*
* Only after switching the pte to the new page may
}
/* No need to invalidate - it was non-present before */
}
/* No need to invalidate - it was non-present before */
- update_mmu_cache(vma, address, pte);
+ update_mmu_cache(vma, address, page_table);
unlock:
pte_unmap_unlock(page_table, ptl);
out:
unlock:
pte_unmap_unlock(page_table, ptl);
out:
set_pte_at(mm, address, page_table, entry);
/* No need to invalidate - it was non-present before */
set_pte_at(mm, address, page_table, entry);
/* No need to invalidate - it was non-present before */
- update_mmu_cache(vma, address, entry);
+ update_mmu_cache(vma, address, page_table);
unlock:
pte_unmap_unlock(page_table, ptl);
return 0;
unlock:
pte_unmap_unlock(page_table, ptl);
return 0;
set_pte_at(mm, address, page_table, entry);
/* no need to invalidate: a not-present page won't be cached */
set_pte_at(mm, address, page_table, entry);
/* no need to invalidate: a not-present page won't be cached */
- update_mmu_cache(vma, address, entry);
+ update_mmu_cache(vma, address, page_table);
} else {
if (charged)
mem_cgroup_uncharge_page(page);
} else {
if (charged)
mem_cgroup_uncharge_page(page);
}
entry = pte_mkyoung(entry);
if (ptep_set_access_flags(vma, address, pte, entry, flags & FAULT_FLAG_WRITE)) {
}
entry = pte_mkyoung(entry);
if (ptep_set_access_flags(vma, address, pte, entry, flags & FAULT_FLAG_WRITE)) {
- update_mmu_cache(vma, address, entry);
+ update_mmu_cache(vma, address, pte);
} else {
/*
* This is needed only for protection faults but the arch code
} else {
/*
* This is needed only for protection faults but the arch code
page_add_file_rmap(new);
/* No need to invalidate - it was non-present before */
page_add_file_rmap(new);
/* No need to invalidate - it was non-present before */
- update_mmu_cache(vma, addr, pte);
+ update_mmu_cache(vma, addr, ptep);
unlock:
pte_unmap_unlock(ptep, ptl);
out:
unlock:
pte_unmap_unlock(ptep, ptl);
out: