sh: force dcache flush if dcache_dirty bit set.
[safe/jmp/linux-2.6] / arch / sh / mm / cache.c
1 /*
2  * arch/sh/mm/cache.c
3  *
4  * Copyright (C) 1999, 2000, 2002  Niibe Yutaka
5  * Copyright (C) 2002 - 2009  Paul Mundt
6  *
7  * Released under the terms of the GNU GPL v2.0.
8  */
9 #include <linux/mm.h>
10 #include <linux/init.h>
11 #include <linux/mutex.h>
12 #include <linux/fs.h>
13 #include <linux/smp.h>
14 #include <linux/highmem.h>
15 #include <linux/module.h>
16 #include <asm/mmu_context.h>
17 #include <asm/cacheflush.h>
18
19 void (*local_flush_cache_all)(void *args) = cache_noop;
20 void (*local_flush_cache_mm)(void *args) = cache_noop;
21 void (*local_flush_cache_dup_mm)(void *args) = cache_noop;
22 void (*local_flush_cache_page)(void *args) = cache_noop;
23 void (*local_flush_cache_range)(void *args) = cache_noop;
24 void (*local_flush_dcache_page)(void *args) = cache_noop;
25 void (*local_flush_icache_range)(void *args) = cache_noop;
26 void (*local_flush_icache_page)(void *args) = cache_noop;
27 void (*local_flush_cache_sigtramp)(void *args) = cache_noop;
28
29 void (*__flush_wback_region)(void *start, int size);
30 void (*__flush_purge_region)(void *start, int size);
31 void (*__flush_invalidate_region)(void *start, int size);
32
33 static inline void noop__flush_region(void *start, int size)
34 {
35 }
36
37 static inline void cacheop_on_each_cpu(void (*func) (void *info), void *info,
38                                    int wait)
39 {
40         preempt_disable();
41         smp_call_function(func, info, wait);
42         func(info);
43         preempt_enable();
44 }
45
46 void copy_to_user_page(struct vm_area_struct *vma, struct page *page,
47                        unsigned long vaddr, void *dst, const void *src,
48                        unsigned long len)
49 {
50         if (boot_cpu_data.dcache.n_aliases && page_mapped(page) &&
51             !test_bit(PG_dcache_dirty, &page->flags)) {
52                 void *vto = kmap_coherent(page, vaddr) + (vaddr & ~PAGE_MASK);
53                 memcpy(vto, src, len);
54                 kunmap_coherent(vto);
55         } else {
56                 memcpy(dst, src, len);
57                 if (boot_cpu_data.dcache.n_aliases)
58                         set_bit(PG_dcache_dirty, &page->flags);
59         }
60
61         if (vma->vm_flags & VM_EXEC)
62                 flush_cache_page(vma, vaddr, page_to_pfn(page));
63 }
64
65 void copy_from_user_page(struct vm_area_struct *vma, struct page *page,
66                          unsigned long vaddr, void *dst, const void *src,
67                          unsigned long len)
68 {
69         if (boot_cpu_data.dcache.n_aliases && page_mapped(page) &&
70             !test_bit(PG_dcache_dirty, &page->flags)) {
71                 void *vfrom = kmap_coherent(page, vaddr) + (vaddr & ~PAGE_MASK);
72                 memcpy(dst, vfrom, len);
73                 kunmap_coherent(vfrom);
74         } else {
75                 memcpy(dst, src, len);
76                 if (boot_cpu_data.dcache.n_aliases)
77                         set_bit(PG_dcache_dirty, &page->flags);
78         }
79 }
80
81 void copy_user_highpage(struct page *to, struct page *from,
82                         unsigned long vaddr, struct vm_area_struct *vma)
83 {
84         void *vfrom, *vto;
85
86         vto = kmap_atomic(to, KM_USER1);
87
88         if (boot_cpu_data.dcache.n_aliases && page_mapped(from) &&
89             !test_bit(PG_dcache_dirty, &from->flags)) {
90                 vfrom = kmap_coherent(from, vaddr);
91                 copy_page(vto, vfrom);
92                 kunmap_coherent(vfrom);
93         } else {
94                 vfrom = kmap_atomic(from, KM_USER0);
95                 copy_page(vto, vfrom);
96                 kunmap_atomic(vfrom, KM_USER0);
97         }
98
99         if (pages_do_alias((unsigned long)vto, vaddr & PAGE_MASK))
100                 __flush_purge_region(vto, PAGE_SIZE);
101
102         kunmap_atomic(vto, KM_USER1);
103         /* Make sure this page is cleared on other CPU's too before using it */
104         smp_wmb();
105 }
106 EXPORT_SYMBOL(copy_user_highpage);
107
108 void clear_user_highpage(struct page *page, unsigned long vaddr)
109 {
110         void *kaddr = kmap_atomic(page, KM_USER0);
111
112         clear_page(kaddr);
113
114         if (pages_do_alias((unsigned long)kaddr, vaddr & PAGE_MASK))
115                 __flush_purge_region(kaddr, PAGE_SIZE);
116
117         kunmap_atomic(kaddr, KM_USER0);
118 }
119 EXPORT_SYMBOL(clear_user_highpage);
120
121 void __update_cache(struct vm_area_struct *vma,
122                     unsigned long address, pte_t pte)
123 {
124         struct page *page;
125         unsigned long pfn = pte_pfn(pte);
126
127         if (!boot_cpu_data.dcache.n_aliases)
128                 return;
129
130         page = pfn_to_page(pfn);
131         if (pfn_valid(pfn)) {
132                 int dirty = test_and_clear_bit(PG_dcache_dirty, &page->flags);
133                 if (dirty) {
134                         unsigned long addr = (unsigned long)page_address(page);
135
136                         if (pages_do_alias(addr, address & PAGE_MASK))
137                                 __flush_purge_region((void *)addr, PAGE_SIZE);
138                 }
139         }
140 }
141
142 void __flush_anon_page(struct page *page, unsigned long vmaddr)
143 {
144         unsigned long addr = (unsigned long) page_address(page);
145
146         if (pages_do_alias(addr, vmaddr)) {
147                 if (boot_cpu_data.dcache.n_aliases && page_mapped(page) &&
148                     !test_bit(PG_dcache_dirty, &page->flags)) {
149                         void *kaddr;
150
151                         kaddr = kmap_coherent(page, vmaddr);
152                         /* XXX.. For now kunmap_coherent() does a purge */
153                         /* __flush_purge_region((void *)kaddr, PAGE_SIZE); */
154                         kunmap_coherent(kaddr);
155                 } else
156                         __flush_purge_region((void *)addr, PAGE_SIZE);
157         }
158 }
159
160 void flush_cache_all(void)
161 {
162         cacheop_on_each_cpu(local_flush_cache_all, NULL, 1);
163 }
164
165 void flush_cache_mm(struct mm_struct *mm)
166 {
167         cacheop_on_each_cpu(local_flush_cache_mm, mm, 1);
168 }
169
170 void flush_cache_dup_mm(struct mm_struct *mm)
171 {
172         cacheop_on_each_cpu(local_flush_cache_dup_mm, mm, 1);
173 }
174
175 void flush_cache_page(struct vm_area_struct *vma, unsigned long addr,
176                       unsigned long pfn)
177 {
178         struct flusher_data data;
179
180         data.vma = vma;
181         data.addr1 = addr;
182         data.addr2 = pfn;
183
184         cacheop_on_each_cpu(local_flush_cache_page, (void *)&data, 1);
185 }
186
187 void flush_cache_range(struct vm_area_struct *vma, unsigned long start,
188                        unsigned long end)
189 {
190         struct flusher_data data;
191
192         data.vma = vma;
193         data.addr1 = start;
194         data.addr2 = end;
195
196         cacheop_on_each_cpu(local_flush_cache_range, (void *)&data, 1);
197 }
198
199 void flush_dcache_page(struct page *page)
200 {
201         cacheop_on_each_cpu(local_flush_dcache_page, page, 1);
202 }
203
204 void flush_icache_range(unsigned long start, unsigned long end)
205 {
206         struct flusher_data data;
207
208         data.vma = NULL;
209         data.addr1 = start;
210         data.addr2 = end;
211
212         cacheop_on_each_cpu(local_flush_icache_range, (void *)&data, 1);
213 }
214
215 void flush_icache_page(struct vm_area_struct *vma, struct page *page)
216 {
217         /* Nothing uses the VMA, so just pass the struct page along */
218         cacheop_on_each_cpu(local_flush_icache_page, page, 1);
219 }
220
221 void flush_cache_sigtramp(unsigned long address)
222 {
223         cacheop_on_each_cpu(local_flush_cache_sigtramp, (void *)address, 1);
224 }
225
226 static void compute_alias(struct cache_info *c)
227 {
228         c->alias_mask = ((c->sets - 1) << c->entry_shift) & ~(PAGE_SIZE - 1);
229         c->n_aliases = c->alias_mask ? (c->alias_mask >> PAGE_SHIFT) + 1 : 0;
230 }
231
232 static void __init emit_cache_params(void)
233 {
234         printk(KERN_NOTICE "I-cache : n_ways=%d n_sets=%d way_incr=%d\n",
235                 boot_cpu_data.icache.ways,
236                 boot_cpu_data.icache.sets,
237                 boot_cpu_data.icache.way_incr);
238         printk(KERN_NOTICE "I-cache : entry_mask=0x%08x alias_mask=0x%08x n_aliases=%d\n",
239                 boot_cpu_data.icache.entry_mask,
240                 boot_cpu_data.icache.alias_mask,
241                 boot_cpu_data.icache.n_aliases);
242         printk(KERN_NOTICE "D-cache : n_ways=%d n_sets=%d way_incr=%d\n",
243                 boot_cpu_data.dcache.ways,
244                 boot_cpu_data.dcache.sets,
245                 boot_cpu_data.dcache.way_incr);
246         printk(KERN_NOTICE "D-cache : entry_mask=0x%08x alias_mask=0x%08x n_aliases=%d\n",
247                 boot_cpu_data.dcache.entry_mask,
248                 boot_cpu_data.dcache.alias_mask,
249                 boot_cpu_data.dcache.n_aliases);
250
251         /*
252          * Emit Secondary Cache parameters if the CPU has a probed L2.
253          */
254         if (boot_cpu_data.flags & CPU_HAS_L2_CACHE) {
255                 printk(KERN_NOTICE "S-cache : n_ways=%d n_sets=%d way_incr=%d\n",
256                         boot_cpu_data.scache.ways,
257                         boot_cpu_data.scache.sets,
258                         boot_cpu_data.scache.way_incr);
259                 printk(KERN_NOTICE "S-cache : entry_mask=0x%08x alias_mask=0x%08x n_aliases=%d\n",
260                         boot_cpu_data.scache.entry_mask,
261                         boot_cpu_data.scache.alias_mask,
262                         boot_cpu_data.scache.n_aliases);
263         }
264 }
265
266 void __init cpu_cache_init(void)
267 {
268         compute_alias(&boot_cpu_data.icache);
269         compute_alias(&boot_cpu_data.dcache);
270         compute_alias(&boot_cpu_data.scache);
271
272         __flush_wback_region            = noop__flush_region;
273         __flush_purge_region            = noop__flush_region;
274         __flush_invalidate_region       = noop__flush_region;
275
276         if (boot_cpu_data.family == CPU_FAMILY_SH2) {
277                 extern void __weak sh2_cache_init(void);
278
279                 sh2_cache_init();
280         }
281
282         if (boot_cpu_data.family == CPU_FAMILY_SH2A) {
283                 extern void __weak sh2a_cache_init(void);
284
285                 sh2a_cache_init();
286         }
287
288         if (boot_cpu_data.family == CPU_FAMILY_SH3) {
289                 extern void __weak sh3_cache_init(void);
290
291                 sh3_cache_init();
292
293                 if ((boot_cpu_data.type == CPU_SH7705) &&
294                     (boot_cpu_data.dcache.sets == 512)) {
295                         extern void __weak sh7705_cache_init(void);
296
297                         sh7705_cache_init();
298                 }
299         }
300
301         if ((boot_cpu_data.family == CPU_FAMILY_SH4) ||
302             (boot_cpu_data.family == CPU_FAMILY_SH4A) ||
303             (boot_cpu_data.family == CPU_FAMILY_SH4AL_DSP)) {
304                 extern void __weak sh4_cache_init(void);
305
306                 sh4_cache_init();
307         }
308
309         if (boot_cpu_data.family == CPU_FAMILY_SH5) {
310                 extern void __weak sh5_cache_init(void);
311
312                 sh5_cache_init();
313         }
314
315         emit_cache_params();
316 }