2 * arch/arm/include/asm/cacheflush.h
4 * Copyright (C) 1999-2002 Russell King
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License version 2 as
8 * published by the Free Software Foundation.
10 #ifndef _ASMARM_CACHEFLUSH_H
11 #define _ASMARM_CACHEFLUSH_H
16 #include <asm/shmparam.h>
17 #include <asm/cachetype.h>
19 #define CACHE_COLOUR(vaddr) ((vaddr & (SHMLBA - 1)) >> PAGE_SHIFT)
28 #if defined(CONFIG_CPU_CACHE_V3)
30 # define MULTI_CACHE 1
36 #if defined(CONFIG_CPU_CACHE_V4)
38 # define MULTI_CACHE 1
44 #if defined(CONFIG_CPU_ARM920T) || defined(CONFIG_CPU_ARM922T) || \
45 defined(CONFIG_CPU_ARM925T) || defined(CONFIG_CPU_ARM1020)
46 # define MULTI_CACHE 1
49 #if defined(CONFIG_CPU_FA526)
51 # define MULTI_CACHE 1
57 #if defined(CONFIG_CPU_ARM926T)
59 # define MULTI_CACHE 1
61 # define _CACHE arm926
65 #if defined(CONFIG_CPU_ARM940T)
67 # define MULTI_CACHE 1
69 # define _CACHE arm940
73 #if defined(CONFIG_CPU_ARM946E)
75 # define MULTI_CACHE 1
77 # define _CACHE arm946
81 #if defined(CONFIG_CPU_CACHE_V4WB)
83 # define MULTI_CACHE 1
89 #if defined(CONFIG_CPU_XSCALE)
91 # define MULTI_CACHE 1
93 # define _CACHE xscale
97 #if defined(CONFIG_CPU_XSC3)
99 # define MULTI_CACHE 1
105 #if defined(CONFIG_CPU_MOHAWK)
107 # define MULTI_CACHE 1
109 # define _CACHE mohawk
113 #if defined(CONFIG_CPU_FEROCEON)
114 # define MULTI_CACHE 1
117 #if defined(CONFIG_CPU_V6)
119 # define MULTI_CACHE 1
125 #if defined(CONFIG_CPU_V7)
127 # define MULTI_CACHE 1
133 #if !defined(_CACHE) && !defined(MULTI_CACHE)
134 #error Unknown cache maintainence model
138 * This flag is used to indicate that the page pointed to by a pte
139 * is dirty and requires cleaning before returning it to the user.
141 #define PG_dcache_dirty PG_arch_1
144 * MM Cache Management
145 * ===================
147 * The arch/arm/mm/cache-*.S and arch/arm/mm/proc-*.S files
148 * implement these methods.
150 * Start addresses are inclusive and end addresses are exclusive;
151 * start addresses should be rounded down, end addresses up.
153 * See Documentation/cachetlb.txt for more information.
154 * Please note that the implementation of these, and the required
155 * effects are cache-type (VIVT/VIPT/PIPT) specific.
157 * flush_cache_kern_all()
159 * Unconditionally clean and invalidate the entire cache.
161 * flush_cache_user_mm(mm)
163 * Clean and invalidate all user space cache entries
164 * before a change of page tables.
166 * flush_cache_user_range(start, end, flags)
168 * Clean and invalidate a range of cache entries in the
169 * specified address space before a change of page tables.
170 * - start - user start address (inclusive, page aligned)
171 * - end - user end address (exclusive, page aligned)
172 * - flags - vma->vm_flags field
174 * coherent_kern_range(start, end)
176 * Ensure coherency between the Icache and the Dcache in the
177 * region described by start, end. If you have non-snooping
178 * Harvard caches, you need to implement this function.
179 * - start - virtual start address
180 * - end - virtual end address
182 * DMA Cache Coherency
183 * ===================
185 * dma_inv_range(start, end)
187 * Invalidate (discard) the specified virtual address range.
188 * May not write back any entries. If 'start' or 'end'
189 * are not cache line aligned, those lines must be written
191 * - start - virtual start address
192 * - end - virtual end address
194 * dma_clean_range(start, end)
196 * Clean (write back) the specified virtual address range.
197 * - start - virtual start address
198 * - end - virtual end address
200 * dma_flush_range(start, end)
202 * Clean and invalidate the specified virtual address range.
203 * - start - virtual start address
204 * - end - virtual end address
207 struct cpu_cache_fns {
208 void (*flush_kern_all)(void);
209 void (*flush_user_all)(void);
210 void (*flush_user_range)(unsigned long, unsigned long, unsigned int);
212 void (*coherent_kern_range)(unsigned long, unsigned long);
213 void (*coherent_user_range)(unsigned long, unsigned long);
214 void (*flush_kern_dcache_area)(void *, size_t);
216 void (*dma_map_area)(const void *, size_t, int);
217 void (*dma_unmap_area)(const void *, size_t, int);
219 void (*dma_inv_range)(const void *, const void *);
220 void (*dma_clean_range)(const void *, const void *);
221 void (*dma_flush_range)(const void *, const void *);
224 struct outer_cache_fns {
225 void (*inv_range)(unsigned long, unsigned long);
226 void (*clean_range)(unsigned long, unsigned long);
227 void (*flush_range)(unsigned long, unsigned long);
231 * Select the calling method
235 extern struct cpu_cache_fns cpu_cache;
237 #define __cpuc_flush_kern_all cpu_cache.flush_kern_all
238 #define __cpuc_flush_user_all cpu_cache.flush_user_all
239 #define __cpuc_flush_user_range cpu_cache.flush_user_range
240 #define __cpuc_coherent_kern_range cpu_cache.coherent_kern_range
241 #define __cpuc_coherent_user_range cpu_cache.coherent_user_range
242 #define __cpuc_flush_dcache_area cpu_cache.flush_kern_dcache_area
245 * These are private to the dma-mapping API. Do not use directly.
246 * Their sole purpose is to ensure that data held in the cache
247 * is visible to DMA, or data written by DMA to system memory is
248 * visible to the CPU.
250 #define dmac_map_area cpu_cache.dma_map_area
251 #define dmac_unmap_area cpu_cache.dma_unmap_area
252 #define dmac_inv_range cpu_cache.dma_inv_range
253 #define dmac_clean_range cpu_cache.dma_clean_range
254 #define dmac_flush_range cpu_cache.dma_flush_range
258 #define __cpuc_flush_kern_all __glue(_CACHE,_flush_kern_cache_all)
259 #define __cpuc_flush_user_all __glue(_CACHE,_flush_user_cache_all)
260 #define __cpuc_flush_user_range __glue(_CACHE,_flush_user_cache_range)
261 #define __cpuc_coherent_kern_range __glue(_CACHE,_coherent_kern_range)
262 #define __cpuc_coherent_user_range __glue(_CACHE,_coherent_user_range)
263 #define __cpuc_flush_dcache_area __glue(_CACHE,_flush_kern_dcache_area)
265 extern void __cpuc_flush_kern_all(void);
266 extern void __cpuc_flush_user_all(void);
267 extern void __cpuc_flush_user_range(unsigned long, unsigned long, unsigned int);
268 extern void __cpuc_coherent_kern_range(unsigned long, unsigned long);
269 extern void __cpuc_coherent_user_range(unsigned long, unsigned long);
270 extern void __cpuc_flush_dcache_area(void *, size_t);
273 * These are private to the dma-mapping API. Do not use directly.
274 * Their sole purpose is to ensure that data held in the cache
275 * is visible to DMA, or data written by DMA to system memory is
276 * visible to the CPU.
278 #define dmac_map_area __glue(_CACHE,_dma_map_area)
279 #define dmac_unmap_area __glue(_CACHE,_dma_unmap_area)
280 #define dmac_inv_range __glue(_CACHE,_dma_inv_range)
281 #define dmac_clean_range __glue(_CACHE,_dma_clean_range)
282 #define dmac_flush_range __glue(_CACHE,_dma_flush_range)
284 extern void dmac_map_area(const void *, size_t, int);
285 extern void dmac_unmap_area(const void *, size_t, int);
286 extern void dmac_inv_range(const void *, const void *);
287 extern void dmac_clean_range(const void *, const void *);
288 extern void dmac_flush_range(const void *, const void *);
292 #ifdef CONFIG_OUTER_CACHE
294 extern struct outer_cache_fns outer_cache;
296 static inline void outer_inv_range(unsigned long start, unsigned long end)
298 if (outer_cache.inv_range)
299 outer_cache.inv_range(start, end);
301 static inline void outer_clean_range(unsigned long start, unsigned long end)
303 if (outer_cache.clean_range)
304 outer_cache.clean_range(start, end);
306 static inline void outer_flush_range(unsigned long start, unsigned long end)
308 if (outer_cache.flush_range)
309 outer_cache.flush_range(start, end);
314 static inline void outer_inv_range(unsigned long start, unsigned long end)
316 static inline void outer_clean_range(unsigned long start, unsigned long end)
318 static inline void outer_flush_range(unsigned long start, unsigned long end)
324 * Copy user data from/to a page which is mapped into a different
325 * processes address space. Really, we want to allow our "user
326 * space" model to handle this.
328 #define copy_to_user_page(vma, page, vaddr, dst, src, len) \
330 memcpy(dst, src, len); \
331 flush_ptrace_access(vma, page, vaddr, dst, len, 1);\
334 #define copy_from_user_page(vma, page, vaddr, dst, src, len) \
336 memcpy(dst, src, len); \
340 * Convert calls to our calling convention.
342 #define flush_cache_all() __cpuc_flush_kern_all()
344 static inline void vivt_flush_cache_mm(struct mm_struct *mm)
346 if (cpumask_test_cpu(smp_processor_id(), mm_cpumask(mm)))
347 __cpuc_flush_user_all();
351 vivt_flush_cache_range(struct vm_area_struct *vma, unsigned long start, unsigned long end)
353 if (cpumask_test_cpu(smp_processor_id(), mm_cpumask(vma->vm_mm)))
354 __cpuc_flush_user_range(start & PAGE_MASK, PAGE_ALIGN(end),
359 vivt_flush_cache_page(struct vm_area_struct *vma, unsigned long user_addr, unsigned long pfn)
361 if (cpumask_test_cpu(smp_processor_id(), mm_cpumask(vma->vm_mm))) {
362 unsigned long addr = user_addr & PAGE_MASK;
363 __cpuc_flush_user_range(addr, addr + PAGE_SIZE, vma->vm_flags);
368 vivt_flush_ptrace_access(struct vm_area_struct *vma, struct page *page,
369 unsigned long uaddr, void *kaddr,
370 unsigned long len, int write)
372 if (cpumask_test_cpu(smp_processor_id(), mm_cpumask(vma->vm_mm))) {
373 unsigned long addr = (unsigned long)kaddr;
374 __cpuc_coherent_kern_range(addr, addr + len);
378 #ifndef CONFIG_CPU_CACHE_VIPT
379 #define flush_cache_mm(mm) \
380 vivt_flush_cache_mm(mm)
381 #define flush_cache_range(vma,start,end) \
382 vivt_flush_cache_range(vma,start,end)
383 #define flush_cache_page(vma,addr,pfn) \
384 vivt_flush_cache_page(vma,addr,pfn)
385 #define flush_ptrace_access(vma,page,ua,ka,len,write) \
386 vivt_flush_ptrace_access(vma,page,ua,ka,len,write)
388 extern void flush_cache_mm(struct mm_struct *mm);
389 extern void flush_cache_range(struct vm_area_struct *vma, unsigned long start, unsigned long end);
390 extern void flush_cache_page(struct vm_area_struct *vma, unsigned long user_addr, unsigned long pfn);
391 extern void flush_ptrace_access(struct vm_area_struct *vma, struct page *page,
392 unsigned long uaddr, void *kaddr,
393 unsigned long len, int write);
396 #define flush_cache_dup_mm(mm) flush_cache_mm(mm)
399 * flush_cache_user_range is used when we want to ensure that the
400 * Harvard caches are synchronised for the user space address range.
401 * This is used for the ARM private sys_cacheflush system call.
403 #define flush_cache_user_range(vma,start,end) \
404 __cpuc_coherent_user_range((start) & PAGE_MASK, PAGE_ALIGN(end))
407 * Perform necessary cache operations to ensure that data previously
408 * stored within this range of addresses can be executed by the CPU.
410 #define flush_icache_range(s,e) __cpuc_coherent_kern_range(s,e)
413 * Perform necessary cache operations to ensure that the TLB will
414 * see data written in the specified area.
416 #define clean_dcache_area(start,size) cpu_dcache_clean_area(start, size)
419 * flush_dcache_page is used when the kernel has written to the page
420 * cache page at virtual address page->virtual.
422 * If this page isn't mapped (ie, page_mapping == NULL), or it might
423 * have userspace mappings, then we _must_ always clean + invalidate
424 * the dcache entries associated with the kernel mapping.
426 * Otherwise we can defer the operation, and clean the cache when we are
427 * about to change to user space. This is the same method as used on SPARC64.
428 * See update_mmu_cache for the user space part.
430 #define ARCH_IMPLEMENTS_FLUSH_DCACHE_PAGE 1
431 extern void flush_dcache_page(struct page *);
433 static inline void __flush_icache_all(void)
435 #ifdef CONFIG_ARM_ERRATA_411920
436 extern void v6_icache_inval_all(void);
437 v6_icache_inval_all();
439 asm("mcr p15, 0, %0, c7, c5, 0 @ invalidate I-cache\n"
445 #define ARCH_HAS_FLUSH_ANON_PAGE
446 static inline void flush_anon_page(struct vm_area_struct *vma,
447 struct page *page, unsigned long vmaddr)
449 extern void __flush_anon_page(struct vm_area_struct *vma,
450 struct page *, unsigned long);
452 __flush_anon_page(vma, page, vmaddr);
455 #define ARCH_HAS_FLUSH_KERNEL_DCACHE_PAGE
456 static inline void flush_kernel_dcache_page(struct page *page)
458 /* highmem pages are always flushed upon kunmap already */
459 if ((cache_is_vivt() || cache_is_vipt_aliasing()) && !PageHighMem(page))
460 __cpuc_flush_dcache_area(page_address(page), PAGE_SIZE);
463 #define flush_dcache_mmap_lock(mapping) \
464 spin_lock_irq(&(mapping)->tree_lock)
465 #define flush_dcache_mmap_unlock(mapping) \
466 spin_unlock_irq(&(mapping)->tree_lock)
468 #define flush_icache_user_range(vma,page,addr,len) \
469 flush_dcache_page(page)
472 * We don't appear to need to do anything here. In fact, if we did, we'd
473 * duplicate cache flushing elsewhere performed by flush_dcache_page().
475 #define flush_icache_page(vma,page) do { } while (0)
478 * flush_cache_vmap() is used when creating mappings (eg, via vmap,
479 * vmalloc, ioremap etc) in kernel space for pages. On non-VIPT
480 * caches, since the direct-mappings of these pages may contain cached
481 * data, we need to do a full cache flush to ensure that writebacks
482 * don't corrupt data placed into these pages via the new mappings.
484 static inline void flush_cache_vmap(unsigned long start, unsigned long end)
486 if (!cache_is_vipt_nonaliasing())
490 * set_pte_at() called from vmap_pte_range() does not
491 * have a DSB after cleaning the cache line.
496 static inline void flush_cache_vunmap(unsigned long start, unsigned long end)
498 if (!cache_is_vipt_nonaliasing())