4 * Copyright (C) 1999, 2000, 2002 Niibe Yutaka
5 * Copyright (C) 2002 - 2009 Paul Mundt
7 * Released under the terms of the GNU GPL v2.0.
10 #include <linux/init.h>
11 #include <linux/mutex.h>
13 #include <linux/highmem.h>
14 #include <linux/module.h>
15 #include <asm/mmu_context.h>
16 #include <asm/cacheflush.h>
18 void copy_to_user_page(struct vm_area_struct *vma, struct page *page,
19 unsigned long vaddr, void *dst, const void *src,
22 if (boot_cpu_data.dcache.n_aliases && page_mapped(page) &&
23 !test_bit(PG_dcache_dirty, &page->flags)) {
24 void *vto = kmap_coherent(page, vaddr) + (vaddr & ~PAGE_MASK);
25 memcpy(vto, src, len);
28 memcpy(dst, src, len);
29 if (boot_cpu_data.dcache.n_aliases)
30 set_bit(PG_dcache_dirty, &page->flags);
33 if (vma->vm_flags & VM_EXEC)
34 flush_cache_page(vma, vaddr, page_to_pfn(page));
37 void copy_from_user_page(struct vm_area_struct *vma, struct page *page,
38 unsigned long vaddr, void *dst, const void *src,
41 if (boot_cpu_data.dcache.n_aliases && page_mapped(page) &&
42 !test_bit(PG_dcache_dirty, &page->flags)) {
43 void *vfrom = kmap_coherent(page, vaddr) + (vaddr & ~PAGE_MASK);
44 memcpy(dst, vfrom, len);
47 memcpy(dst, src, len);
48 if (boot_cpu_data.dcache.n_aliases)
49 set_bit(PG_dcache_dirty, &page->flags);
53 void copy_user_highpage(struct page *to, struct page *from,
54 unsigned long vaddr, struct vm_area_struct *vma)
58 vto = kmap_atomic(to, KM_USER1);
60 if (boot_cpu_data.dcache.n_aliases && page_mapped(from) &&
61 !test_bit(PG_dcache_dirty, &from->flags)) {
62 vfrom = kmap_coherent(from, vaddr);
63 copy_page(vto, vfrom);
66 vfrom = kmap_atomic(from, KM_USER0);
67 copy_page(vto, vfrom);
68 kunmap_atomic(vfrom, KM_USER0);
71 if (pages_do_alias((unsigned long)vto, vaddr & PAGE_MASK))
72 __flush_wback_region(vto, PAGE_SIZE);
74 kunmap_atomic(vto, KM_USER1);
75 /* Make sure this page is cleared on other CPU's too before using it */
78 EXPORT_SYMBOL(copy_user_highpage);
80 void clear_user_highpage(struct page *page, unsigned long vaddr)
82 void *kaddr = kmap_atomic(page, KM_USER0);
86 if (pages_do_alias((unsigned long)kaddr, vaddr & PAGE_MASK))
87 __flush_wback_region(kaddr, PAGE_SIZE);
89 kunmap_atomic(kaddr, KM_USER0);
91 EXPORT_SYMBOL(clear_user_highpage);
93 void __update_cache(struct vm_area_struct *vma,
94 unsigned long address, pte_t pte)
97 unsigned long pfn = pte_pfn(pte);
99 if (!boot_cpu_data.dcache.n_aliases)
102 page = pfn_to_page(pfn);
103 if (pfn_valid(pfn) && page_mapping(page)) {
104 int dirty = test_and_clear_bit(PG_dcache_dirty, &page->flags);
106 unsigned long addr = (unsigned long)page_address(page);
108 if (pages_do_alias(addr, address & PAGE_MASK))
109 __flush_wback_region((void *)addr, PAGE_SIZE);
114 void __flush_anon_page(struct page *page, unsigned long vmaddr)
116 unsigned long addr = (unsigned long) page_address(page);
118 if (pages_do_alias(addr, vmaddr)) {
119 if (boot_cpu_data.dcache.n_aliases && page_mapped(page) &&
120 !test_bit(PG_dcache_dirty, &page->flags)) {
123 kaddr = kmap_coherent(page, vmaddr);
124 __flush_wback_region((void *)kaddr, PAGE_SIZE);
127 __flush_wback_region((void *)addr, PAGE_SIZE);