sh: Add register alignment helpers for shared flushers.
[safe/jmp/linux-2.6] / arch / sh / mm / flush-sh4.c
1 #include <linux/mm.h>
2 #include <asm/mmu_context.h>
3 #include <asm/cacheflush.h>
4
5 /*
6  * Write back the dirty D-caches, but not invalidate them.
7  *
8  * START: Virtual Address (U0, P1, or P3)
9  * SIZE: Size of the region.
10  */
11 void __weak __flush_wback_region(void *start, int size)
12 {
13         reg_size_t aligned_start, v, cnt, end;
14
15         aligned_start = register_align(start);
16         v = aligned_start & ~(L1_CACHE_BYTES-1);
17         end = (aligned_start + size + L1_CACHE_BYTES-1)
18                 & ~(L1_CACHE_BYTES-1);
19         cnt = (end - v) / L1_CACHE_BYTES;
20
21         while (cnt >= 8) {
22                 asm volatile("ocbwb     @%0" : : "r" (v));
23                 v += L1_CACHE_BYTES;
24                 asm volatile("ocbwb     @%0" : : "r" (v));
25                 v += L1_CACHE_BYTES;
26                 asm volatile("ocbwb     @%0" : : "r" (v));
27                 v += L1_CACHE_BYTES;
28                 asm volatile("ocbwb     @%0" : : "r" (v));
29                 v += L1_CACHE_BYTES;
30                 asm volatile("ocbwb     @%0" : : "r" (v));
31                 v += L1_CACHE_BYTES;
32                 asm volatile("ocbwb     @%0" : : "r" (v));
33                 v += L1_CACHE_BYTES;
34                 asm volatile("ocbwb     @%0" : : "r" (v));
35                 v += L1_CACHE_BYTES;
36                 asm volatile("ocbwb     @%0" : : "r" (v));
37                 v += L1_CACHE_BYTES;
38                 cnt -= 8;
39         }
40
41         while (cnt) {
42                 asm volatile("ocbwb     @%0" : : "r" (v));
43                 v += L1_CACHE_BYTES;
44                 cnt--;
45         }
46 }
47
48 /*
49  * Write back the dirty D-caches and invalidate them.
50  *
51  * START: Virtual Address (U0, P1, or P3)
52  * SIZE: Size of the region.
53  */
54 void __weak __flush_purge_region(void *start, int size)
55 {
56         reg_size_t aligned_start, v, cnt, end;
57
58         aligned_start = register_align(start);
59         v = aligned_start & ~(L1_CACHE_BYTES-1);
60         end = (aligned_start + size + L1_CACHE_BYTES-1)
61                 & ~(L1_CACHE_BYTES-1);
62         cnt = (end - v) / L1_CACHE_BYTES;
63
64         while (cnt >= 8) {
65                 asm volatile("ocbp      @%0" : : "r" (v));
66                 v += L1_CACHE_BYTES;
67                 asm volatile("ocbp      @%0" : : "r" (v));
68                 v += L1_CACHE_BYTES;
69                 asm volatile("ocbp      @%0" : : "r" (v));
70                 v += L1_CACHE_BYTES;
71                 asm volatile("ocbp      @%0" : : "r" (v));
72                 v += L1_CACHE_BYTES;
73                 asm volatile("ocbp      @%0" : : "r" (v));
74                 v += L1_CACHE_BYTES;
75                 asm volatile("ocbp      @%0" : : "r" (v));
76                 v += L1_CACHE_BYTES;
77                 asm volatile("ocbp      @%0" : : "r" (v));
78                 v += L1_CACHE_BYTES;
79                 asm volatile("ocbp      @%0" : : "r" (v));
80                 v += L1_CACHE_BYTES;
81                 cnt -= 8;
82         }
83         while (cnt) {
84                 asm volatile("ocbp      @%0" : : "r" (v));
85                 v += L1_CACHE_BYTES;
86                 cnt--;
87         }
88 }
89
90 /*
91  * No write back please
92  */
93 void __weak __flush_invalidate_region(void *start, int size)
94 {
95         reg_size_t aligned_start, v, cnt, end;
96
97         aligned_start = register_align(start);
98         v = aligned_start & ~(L1_CACHE_BYTES-1);
99         end = (aligned_start + size + L1_CACHE_BYTES-1)
100                 & ~(L1_CACHE_BYTES-1);
101         cnt = (end - v) / L1_CACHE_BYTES;
102
103         while (cnt >= 8) {
104                 asm volatile("ocbi      @%0" : : "r" (v));
105                 v += L1_CACHE_BYTES;
106                 asm volatile("ocbi      @%0" : : "r" (v));
107                 v += L1_CACHE_BYTES;
108                 asm volatile("ocbi      @%0" : : "r" (v));
109                 v += L1_CACHE_BYTES;
110                 asm volatile("ocbi      @%0" : : "r" (v));
111                 v += L1_CACHE_BYTES;
112                 asm volatile("ocbi      @%0" : : "r" (v));
113                 v += L1_CACHE_BYTES;
114                 asm volatile("ocbi      @%0" : : "r" (v));
115                 v += L1_CACHE_BYTES;
116                 asm volatile("ocbi      @%0" : : "r" (v));
117                 v += L1_CACHE_BYTES;
118                 asm volatile("ocbi      @%0" : : "r" (v));
119                 v += L1_CACHE_BYTES;
120                 cnt -= 8;
121         }
122
123         while (cnt) {
124                 asm volatile("ocbi      @%0" : : "r" (v));
125                 v += L1_CACHE_BYTES;
126                 cnt--;
127         }
128 }