sh: Split out SH-4 __flush_xxx_region() ops.
[safe/jmp/linux-2.6] / arch / sh / mm / pg-mmu.c
1 /*
2  * arch/sh/mm/pg-mmu.c
3  *
4  * Copyright (C) 1999, 2000, 2002  Niibe Yutaka
5  * Copyright (C) 2002 - 2009  Paul Mundt
6  *
7  * Released under the terms of the GNU GPL v2.0.
8  */
9 #include <linux/mm.h>
10 #include <linux/init.h>
11 #include <linux/mutex.h>
12 #include <linux/fs.h>
13 #include <linux/highmem.h>
14 #include <linux/module.h>
15 #include <asm/mmu_context.h>
16 #include <asm/cacheflush.h>
17
18 #define kmap_get_fixmap_pte(vaddr)                                     \
19         pte_offset_kernel(pmd_offset(pud_offset(pgd_offset_k(vaddr), (vaddr)), (vaddr)), (vaddr))
20
21 static pte_t *kmap_coherent_pte;
22
23 void __init kmap_coherent_init(void)
24 {
25 #if defined(CONFIG_CPU_SH4) || defined(CONFIG_SH7705_CACHE_32KB)
26         unsigned long vaddr;
27
28         /* cache the first coherent kmap pte */
29         vaddr = __fix_to_virt(FIX_CMAP_BEGIN);
30         kmap_coherent_pte = kmap_get_fixmap_pte(vaddr);
31 #endif
32 }
33
34 static void *kmap_coherent(struct page *page, unsigned long addr)
35 {
36         enum fixed_addresses idx;
37         unsigned long vaddr, flags;
38         pte_t pte;
39
40         BUG_ON(test_bit(PG_dcache_dirty, &page->flags));
41
42         inc_preempt_count();
43
44         idx = (addr & current_cpu_data.dcache.alias_mask) >> PAGE_SHIFT;
45         vaddr = __fix_to_virt(FIX_CMAP_END - idx);
46         pte = mk_pte(page, PAGE_KERNEL);
47
48         local_irq_save(flags);
49         flush_tlb_one(get_asid(), vaddr);
50         local_irq_restore(flags);
51
52         update_mmu_cache(NULL, vaddr, pte);
53
54         set_pte(kmap_coherent_pte - (FIX_CMAP_END - idx), pte);
55
56         return (void *)vaddr;
57 }
58
59 static inline void kunmap_coherent(void)
60 {
61         dec_preempt_count();
62         preempt_check_resched();
63 }
64
65 void copy_to_user_page(struct vm_area_struct *vma, struct page *page,
66                        unsigned long vaddr, void *dst, const void *src,
67                        unsigned long len)
68 {
69         if (boot_cpu_data.dcache.n_aliases && page_mapped(page) &&
70             !test_bit(PG_dcache_dirty, &page->flags)) {
71                 void *vto = kmap_coherent(page, vaddr) + (vaddr & ~PAGE_MASK);
72                 memcpy(vto, src, len);
73                 kunmap_coherent();
74         } else {
75                 memcpy(dst, src, len);
76                 if (boot_cpu_data.dcache.n_aliases)
77                         set_bit(PG_dcache_dirty, &page->flags);
78         }
79
80         if (vma->vm_flags & VM_EXEC)
81                 flush_cache_page(vma, vaddr, page_to_pfn(page));
82 }
83
84 void copy_from_user_page(struct vm_area_struct *vma, struct page *page,
85                          unsigned long vaddr, void *dst, const void *src,
86                          unsigned long len)
87 {
88         if (boot_cpu_data.dcache.n_aliases && page_mapped(page) &&
89             !test_bit(PG_dcache_dirty, &page->flags)) {
90                 void *vfrom = kmap_coherent(page, vaddr) + (vaddr & ~PAGE_MASK);
91                 memcpy(dst, vfrom, len);
92                 kunmap_coherent();
93         } else {
94                 memcpy(dst, src, len);
95                 if (boot_cpu_data.dcache.n_aliases)
96                         set_bit(PG_dcache_dirty, &page->flags);
97         }
98 }
99
100 void copy_user_highpage(struct page *to, struct page *from,
101                         unsigned long vaddr, struct vm_area_struct *vma)
102 {
103         void *vfrom, *vto;
104
105         vto = kmap_atomic(to, KM_USER1);
106
107         if (boot_cpu_data.dcache.n_aliases && page_mapped(from) &&
108             !test_bit(PG_dcache_dirty, &from->flags)) {
109                 vfrom = kmap_coherent(from, vaddr);
110                 copy_page(vto, vfrom);
111                 kunmap_coherent();
112         } else {
113                 vfrom = kmap_atomic(from, KM_USER0);
114                 copy_page(vto, vfrom);
115                 kunmap_atomic(vfrom, KM_USER0);
116         }
117
118         if (pages_do_alias((unsigned long)vto, vaddr & PAGE_MASK))
119                 __flush_wback_region(vto, PAGE_SIZE);
120
121         kunmap_atomic(vto, KM_USER1);
122         /* Make sure this page is cleared on other CPU's too before using it */
123         smp_wmb();
124 }
125 EXPORT_SYMBOL(copy_user_highpage);
126
127 void clear_user_highpage(struct page *page, unsigned long vaddr)
128 {
129         void *kaddr = kmap_atomic(page, KM_USER0);
130
131         clear_page(kaddr);
132
133         if (pages_do_alias((unsigned long)kaddr, vaddr & PAGE_MASK))
134                 __flush_wback_region(kaddr, PAGE_SIZE);
135
136         kunmap_atomic(kaddr, KM_USER0);
137 }
138 EXPORT_SYMBOL(clear_user_highpage);
139
140 void __update_cache(struct vm_area_struct *vma,
141                     unsigned long address, pte_t pte)
142 {
143         struct page *page;
144         unsigned long pfn = pte_pfn(pte);
145
146         if (!boot_cpu_data.dcache.n_aliases)
147                 return;
148
149         page = pfn_to_page(pfn);
150         if (pfn_valid(pfn) && page_mapping(page)) {
151                 int dirty = test_and_clear_bit(PG_dcache_dirty, &page->flags);
152                 if (dirty) {
153                         unsigned long addr = (unsigned long)page_address(page);
154
155                         if (pages_do_alias(addr, address & PAGE_MASK))
156                                 __flush_wback_region((void *)addr, PAGE_SIZE);
157                 }
158         }
159 }
160
161 void __flush_anon_page(struct page *page, unsigned long vmaddr)
162 {
163         unsigned long addr = (unsigned long) page_address(page);
164
165         if (pages_do_alias(addr, vmaddr)) {
166                 if (boot_cpu_data.dcache.n_aliases && page_mapped(page) &&
167                     !test_bit(PG_dcache_dirty, &page->flags)) {
168                         void *kaddr;
169
170                         kaddr = kmap_coherent(page, vmaddr);
171                         __flush_wback_region((void *)kaddr, PAGE_SIZE);
172                         kunmap_coherent();
173                 } else
174                         __flush_wback_region((void *)addr, PAGE_SIZE);
175         }
176 }