2 * arch/sh/mm/cache-sh4.c
4 * Copyright (C) 1999, 2000, 2002 Niibe Yutaka
5 * Copyright (C) 2001, 2002, 2003, 2004, 2005 Paul Mundt
6 * Copyright (C) 2003 Richard Curnow
8 * This file is subject to the terms and conditions of the GNU General Public
9 * License. See the file "COPYING" in the main directory of this archive
13 #include <linux/init.h>
14 #include <linux/mman.h>
16 #include <linux/threads.h>
17 #include <asm/addrspace.h>
19 #include <asm/pgtable.h>
20 #include <asm/processor.h>
21 #include <asm/cache.h>
23 #include <asm/uaccess.h>
24 #include <asm/pgalloc.h>
25 #include <asm/mmu_context.h>
26 #include <asm/cacheflush.h>
28 static void __flush_dcache_segment_1way(unsigned long start,
29 unsigned long extent);
30 static void __flush_dcache_segment_2way(unsigned long start,
31 unsigned long extent);
32 static void __flush_dcache_segment_4way(unsigned long start,
33 unsigned long extent);
35 static void __flush_cache_4096(unsigned long addr, unsigned long phys,
36 unsigned long exec_offset);
39 * This is initialised here to ensure that it is not placed in the BSS. If
40 * that were to happen, note that cache_init gets called before the BSS is
41 * cleared, so this would get nulled out which would be hopeless.
43 static void (*__flush_dcache_segment_fn)(unsigned long, unsigned long) =
44 (void (*)(unsigned long, unsigned long))0xdeadbeef;
46 static void compute_alias(struct cache_info *c)
48 c->alias_mask = ((c->sets - 1) << c->entry_shift) & ~(PAGE_SIZE - 1);
49 c->n_aliases = (c->alias_mask >> PAGE_SHIFT) + 1;
52 static void __init emit_cache_params(void)
54 printk("PVR=%08x CVR=%08x PRR=%08x\n",
58 printk("I-cache : n_ways=%d n_sets=%d way_incr=%d\n",
59 cpu_data->icache.ways,
60 cpu_data->icache.sets,
61 cpu_data->icache.way_incr);
62 printk("I-cache : entry_mask=0x%08x alias_mask=0x%08x n_aliases=%d\n",
63 cpu_data->icache.entry_mask,
64 cpu_data->icache.alias_mask,
65 cpu_data->icache.n_aliases);
66 printk("D-cache : n_ways=%d n_sets=%d way_incr=%d\n",
67 cpu_data->dcache.ways,
68 cpu_data->dcache.sets,
69 cpu_data->dcache.way_incr);
70 printk("D-cache : entry_mask=0x%08x alias_mask=0x%08x n_aliases=%d\n",
71 cpu_data->dcache.entry_mask,
72 cpu_data->dcache.alias_mask,
73 cpu_data->dcache.n_aliases);
75 if (!__flush_dcache_segment_fn)
76 panic("unknown number of cache ways\n");
80 * SH-4 has virtually indexed and physically tagged cache.
83 /* Worst case assumed to be 64k cache, direct-mapped i.e. 4 synonym bits. */
84 #define MAX_P3_SEMAPHORES 16
86 struct semaphore p3map_sem[MAX_P3_SEMAPHORES];
88 void __init p3_cache_init(void)
92 compute_alias(&cpu_data->icache);
93 compute_alias(&cpu_data->dcache);
95 switch (cpu_data->dcache.ways) {
97 __flush_dcache_segment_fn = __flush_dcache_segment_1way;
100 __flush_dcache_segment_fn = __flush_dcache_segment_2way;
103 __flush_dcache_segment_fn = __flush_dcache_segment_4way;
106 __flush_dcache_segment_fn = NULL;
112 if (remap_area_pages(P3SEG, 0, PAGE_SIZE * 4, _PAGE_CACHABLE))
113 panic("%s failed.", __FUNCTION__);
115 for (i = 0; i < cpu_data->dcache.n_aliases; i++)
116 sema_init(&p3map_sem[i], 1);
120 * Write back the dirty D-caches, but not invalidate them.
122 * START: Virtual Address (U0, P1, or P3)
123 * SIZE: Size of the region.
125 void __flush_wback_region(void *start, int size)
128 unsigned long begin, end;
130 begin = (unsigned long)start & ~(L1_CACHE_BYTES-1);
131 end = ((unsigned long)start + size + L1_CACHE_BYTES-1)
132 & ~(L1_CACHE_BYTES-1);
133 for (v = begin; v < end; v+=L1_CACHE_BYTES) {
134 asm volatile("ocbwb %0"
141 * Write back the dirty D-caches and invalidate them.
143 * START: Virtual Address (U0, P1, or P3)
144 * SIZE: Size of the region.
146 void __flush_purge_region(void *start, int size)
149 unsigned long begin, end;
151 begin = (unsigned long)start & ~(L1_CACHE_BYTES-1);
152 end = ((unsigned long)start + size + L1_CACHE_BYTES-1)
153 & ~(L1_CACHE_BYTES-1);
154 for (v = begin; v < end; v+=L1_CACHE_BYTES) {
155 asm volatile("ocbp %0"
162 * No write back please
164 void __flush_invalidate_region(void *start, int size)
167 unsigned long begin, end;
169 begin = (unsigned long)start & ~(L1_CACHE_BYTES-1);
170 end = ((unsigned long)start + size + L1_CACHE_BYTES-1)
171 & ~(L1_CACHE_BYTES-1);
172 for (v = begin; v < end; v+=L1_CACHE_BYTES) {
173 asm volatile("ocbi %0"
180 * Write back the range of D-cache, and purge the I-cache.
182 * Called from kernel/module.c:sys_init_module and routine for a.out format.
184 void flush_icache_range(unsigned long start, unsigned long end)
190 * Write back the D-cache and purge the I-cache for signal trampoline.
191 * .. which happens to be the same behavior as flush_icache_range().
192 * So, we simply flush out a line.
194 void flush_cache_sigtramp(unsigned long addr)
196 unsigned long v, index;
200 v = addr & ~(L1_CACHE_BYTES-1);
201 asm volatile("ocbwb %0"
205 index = CACHE_IC_ADDRESS_ARRAY | (v & cpu_data->icache.entry_mask);
207 local_irq_save(flags);
210 for (i = 0; i < cpu_data->icache.ways;
211 i++, index += cpu_data->icache.way_incr)
212 ctrl_outl(0, index); /* Clear out Valid-bit */
216 local_irq_restore(flags);
219 static inline void flush_cache_4096(unsigned long start,
225 * All types of SH-4 require PC to be in P2 to operate on the I-cache.
226 * Some types of SH-4 require PC to be in P2 to operate on the D-cache.
228 if ((cpu_data->flags & CPU_HAS_P2_FLUSH_BUG)
229 || start < CACHE_OC_ADDRESS_ARRAY) {
230 local_irq_save(flags);
231 __flush_cache_4096(start | SH_CACHE_ASSOC,
232 P1SEGADDR(phys), 0x20000000);
233 local_irq_restore(flags);
235 __flush_cache_4096(start | SH_CACHE_ASSOC,
241 * Write back & invalidate the D-cache of the page.
242 * (To avoid "alias" issues)
244 void flush_dcache_page(struct page *page)
246 if (test_bit(PG_mapped, &page->flags)) {
247 unsigned long phys = PHYSADDR(page_address(page));
248 unsigned long addr = CACHE_OC_ADDRESS_ARRAY;
251 /* Loop all the D-cache */
252 n = cpu_data->dcache.n_aliases;
253 for (i = 0; i < n; i++, addr += PAGE_SIZE)
254 flush_cache_4096(addr, phys);
260 static inline void flush_icache_all(void)
262 unsigned long flags, ccr;
264 local_irq_save(flags);
269 ccr |= CCR_CACHE_ICI;
273 * back_to_P1() will take care of the barrier for us, don't add
278 local_irq_restore(flags);
281 void flush_dcache_all(void)
283 (*__flush_dcache_segment_fn)(0UL, cpu_data->dcache.way_size);
287 void flush_cache_all(void)
293 void flush_cache_mm(struct mm_struct *mm)
296 * Note : (RPC) since the caches are physically tagged, the only point
297 * of flush_cache_mm for SH-4 is to get rid of aliases from the
298 * D-cache. The assumption elsewhere, e.g. flush_cache_range, is that
299 * lines can stay resident so long as the virtual address they were
300 * accessed with (hence cache set) is in accord with the physical
301 * address (i.e. tag). It's no different here. So I reckon we don't
302 * need to flush the I-cache, since aliases don't matter for that. We
309 * Write back and invalidate I/D-caches for the page.
311 * ADDR: Virtual Address (U0 address)
312 * PFN: Physical page number
314 void flush_cache_page(struct vm_area_struct *vma, unsigned long address, unsigned long pfn)
316 unsigned long phys = pfn << PAGE_SHIFT;
317 unsigned int alias_mask;
319 alias_mask = cpu_data->dcache.alias_mask;
321 /* We only need to flush D-cache when we have alias */
322 if ((address^phys) & alias_mask) {
323 /* Loop 4K of the D-cache */
325 CACHE_OC_ADDRESS_ARRAY | (address & alias_mask),
327 /* Loop another 4K of the D-cache */
329 CACHE_OC_ADDRESS_ARRAY | (phys & alias_mask),
333 alias_mask = cpu_data->icache.alias_mask;
334 if (vma->vm_flags & VM_EXEC) {
336 * Evict entries from the portion of the cache from which code
337 * may have been executed at this address (virtual). There's
338 * no need to evict from the portion corresponding to the
339 * physical address as for the D-cache, because we know the
340 * kernel has never executed the code through its identity
344 CACHE_IC_ADDRESS_ARRAY | (address & alias_mask),
350 * Write back and invalidate D-caches.
352 * START, END: Virtual Address (U0 address)
354 * NOTE: We need to flush the _physical_ page entry.
355 * Flushing the cache lines for U0 only isn't enough.
356 * We need to flush for P1 too, which may contain aliases.
358 void flush_cache_range(struct vm_area_struct *vma, unsigned long start,
361 unsigned long d = 0, p = start & PAGE_MASK;
362 unsigned long alias_mask = cpu_data->dcache.alias_mask;
363 unsigned long n_aliases = cpu_data->dcache.n_aliases;
364 unsigned long select_bit;
365 unsigned long all_aliases_mask;
366 unsigned long addr_offset;
376 * If cache is only 4k-per-way, there are never any 'aliases'. Since
377 * the cache is physically tagged, the data can just be left in there.
382 all_aliases_mask = (1 << n_aliases) - 1;
385 * Don't bother with the lookup and alias check if we have a
386 * wide range to cover, just blow away the dcache in its
387 * entirety instead. -- PFM.
389 if (((end - start) >> PAGE_SHIFT) >= 64) {
392 if (vma->vm_flags & VM_EXEC)
398 dir = pgd_offset(vma->vm_mm, p);
399 pud = pud_offset(dir, p);
400 pmd = pmd_offset(pud, p);
401 end = PAGE_ALIGN(end);
404 if (pmd_none(*pmd) || pmd_bad(*pmd)) {
405 p &= ~((1 << PMD_SHIFT) - 1);
406 p += (1 << PMD_SHIFT);
412 pte = pte_offset_kernel(pmd, p);
417 if ((pte_val(entry) & _PAGE_PRESENT)) {
418 phys = pte_val(entry) & PTE_PHYS_MASK;
420 if ((p ^ phys) & alias_mask) {
421 d |= 1 << ((p & alias_mask) >> PAGE_SHIFT);
422 d |= 1 << ((phys & alias_mask) >> PAGE_SHIFT);
424 if (d == all_aliases_mask)
431 } while (p < end && ((unsigned long)pte & ~PAGE_MASK));
436 for (i = 0, select_bit = 0x1, addr_offset = 0x0; i < n_aliases;
437 i++, select_bit <<= 1, addr_offset += PAGE_SIZE)
438 if (d & select_bit) {
439 (*__flush_dcache_segment_fn)(addr_offset, PAGE_SIZE);
443 if (vma->vm_flags & VM_EXEC) {
445 * TODO: Is this required??? Need to look at how I-cache
446 * coherency is assured when new programs are loaded to see if
454 * flush_icache_user_range
455 * @vma: VMA of the process
458 * @len: length of the range (< page size)
460 void flush_icache_user_range(struct vm_area_struct *vma,
461 struct page *page, unsigned long addr, int len)
463 flush_cache_page(vma, addr, page_to_pfn(page));
470 * @addr: address in memory mapped cache array
471 * @phys: P1 address to flush (has to match tags if addr has 'A' bit
472 * set i.e. associative write)
473 * @exec_offset: set to 0x20000000 if flush has to be executed from P2
476 * The offset into the cache array implied by 'addr' selects the
477 * 'colour' of the virtual address range that will be flushed. The
478 * operation (purge/write-back) is selected by the lower 2 bits of
481 static void __flush_cache_4096(unsigned long addr, unsigned long phys,
482 unsigned long exec_offset)
485 unsigned long base_addr = addr;
486 struct cache_info *dcache;
487 unsigned long way_incr;
488 unsigned long a, ea, p;
489 unsigned long temp_pc;
491 dcache = &cpu_data->dcache;
492 /* Write this way for better assembly. */
493 way_count = dcache->ways;
494 way_incr = dcache->way_incr;
497 * Apply exec_offset (i.e. branch to P2 if required.).
501 * If I write "=r" for the (temp_pc), it puts this in r6 hence
502 * trashing exec_offset before it's been added on - why? Hence
503 * "=&r" as a 'workaround'
505 asm volatile("mov.l 1f, %0\n\t"
511 "2:\n" : "=&r" (temp_pc) : "r" (exec_offset));
514 * We know there will be >=1 iteration, so write as do-while to avoid
515 * pointless nead-of-loop check for 0 iterations.
518 ea = base_addr + PAGE_SIZE;
523 *(volatile unsigned long *)a = p;
525 * Next line: intentionally not p+32, saves an add, p
526 * will do since only the cache tag bits need to
529 *(volatile unsigned long *)(a+32) = p;
534 base_addr += way_incr;
535 } while (--way_count != 0);
539 * Break the 1, 2 and 4 way variants of this out into separate functions to
540 * avoid nearly all the overhead of having the conditional stuff in the function
541 * bodies (+ the 1 and 2 way cases avoid saving any registers too).
543 static void __flush_dcache_segment_1way(unsigned long start,
544 unsigned long extent_per_way)
546 unsigned long orig_sr, sr_with_bl;
547 unsigned long base_addr;
548 unsigned long way_incr, linesz, way_size;
549 struct cache_info *dcache;
550 register unsigned long a0, a0e;
552 asm volatile("stc sr, %0" : "=r" (orig_sr));
553 sr_with_bl = orig_sr | (1<<28);
554 base_addr = ((unsigned long)&empty_zero_page[0]);
557 * The previous code aligned base_addr to 16k, i.e. the way_size of all
558 * existing SH-4 D-caches. Whilst I don't see a need to have this
559 * aligned to any better than the cache line size (which it will be
560 * anyway by construction), let's align it to at least the way_size of
561 * any existing or conceivable SH-4 D-cache. -- RPC
563 base_addr = ((base_addr >> 16) << 16);
566 dcache = &cpu_data->dcache;
567 linesz = dcache->linesz;
568 way_incr = dcache->way_incr;
569 way_size = dcache->way_size;
572 a0e = base_addr + extent_per_way;
574 asm volatile("ldc %0, sr" : : "r" (sr_with_bl));
575 asm volatile("movca.l r0, @%0\n\t"
576 "ocbi @%0" : : "r" (a0));
578 asm volatile("movca.l r0, @%0\n\t"
579 "ocbi @%0" : : "r" (a0));
581 asm volatile("movca.l r0, @%0\n\t"
582 "ocbi @%0" : : "r" (a0));
584 asm volatile("movca.l r0, @%0\n\t"
585 "ocbi @%0" : : "r" (a0));
586 asm volatile("ldc %0, sr" : : "r" (orig_sr));
591 static void __flush_dcache_segment_2way(unsigned long start,
592 unsigned long extent_per_way)
594 unsigned long orig_sr, sr_with_bl;
595 unsigned long base_addr;
596 unsigned long way_incr, linesz, way_size;
597 struct cache_info *dcache;
598 register unsigned long a0, a1, a0e;
600 asm volatile("stc sr, %0" : "=r" (orig_sr));
601 sr_with_bl = orig_sr | (1<<28);
602 base_addr = ((unsigned long)&empty_zero_page[0]);
604 /* See comment under 1-way above */
605 base_addr = ((base_addr >> 16) << 16);
608 dcache = &cpu_data->dcache;
609 linesz = dcache->linesz;
610 way_incr = dcache->way_incr;
611 way_size = dcache->way_size;
615 a0e = base_addr + extent_per_way;
617 asm volatile("ldc %0, sr" : : "r" (sr_with_bl));
618 asm volatile("movca.l r0, @%0\n\t"
619 "movca.l r0, @%1\n\t"
625 asm volatile("movca.l r0, @%0\n\t"
626 "movca.l r0, @%1\n\t"
632 asm volatile("movca.l r0, @%0\n\t"
633 "movca.l r0, @%1\n\t"
639 asm volatile("movca.l r0, @%0\n\t"
640 "movca.l r0, @%1\n\t"
644 asm volatile("ldc %0, sr" : : "r" (orig_sr));
650 static void __flush_dcache_segment_4way(unsigned long start,
651 unsigned long extent_per_way)
653 unsigned long orig_sr, sr_with_bl;
654 unsigned long base_addr;
655 unsigned long way_incr, linesz, way_size;
656 struct cache_info *dcache;
657 register unsigned long a0, a1, a2, a3, a0e;
659 asm volatile("stc sr, %0" : "=r" (orig_sr));
660 sr_with_bl = orig_sr | (1<<28);
661 base_addr = ((unsigned long)&empty_zero_page[0]);
663 /* See comment under 1-way above */
664 base_addr = ((base_addr >> 16) << 16);
667 dcache = &cpu_data->dcache;
668 linesz = dcache->linesz;
669 way_incr = dcache->way_incr;
670 way_size = dcache->way_size;
676 a0e = base_addr + extent_per_way;
678 asm volatile("ldc %0, sr" : : "r" (sr_with_bl));
679 asm volatile("movca.l r0, @%0\n\t"
680 "movca.l r0, @%1\n\t"
681 "movca.l r0, @%2\n\t"
682 "movca.l r0, @%3\n\t"
687 "r" (a0), "r" (a1), "r" (a2), "r" (a3));
692 asm volatile("movca.l r0, @%0\n\t"
693 "movca.l r0, @%1\n\t"
694 "movca.l r0, @%2\n\t"
695 "movca.l r0, @%3\n\t"
700 "r" (a0), "r" (a1), "r" (a2), "r" (a3));
705 asm volatile("movca.l r0, @%0\n\t"
706 "movca.l r0, @%1\n\t"
707 "movca.l r0, @%2\n\t"
708 "movca.l r0, @%3\n\t"
713 "r" (a0), "r" (a1), "r" (a2), "r" (a3));
718 asm volatile("movca.l r0, @%0\n\t"
719 "movca.l r0, @%1\n\t"
720 "movca.l r0, @%2\n\t"
721 "movca.l r0, @%3\n\t"
726 "r" (a0), "r" (a1), "r" (a2), "r" (a3));
727 asm volatile("ldc %0, sr" : : "r" (orig_sr));