2 * arch/sh/mm/cache-sh5.c
4 * Copyright (C) 2000, 2001 Paolo Alberelli
5 * Copyright (C) 2002 Benedict Gaster
6 * Copyright (C) 2003 Richard Curnow
7 * Copyright (C) 2003 - 2008 Paul Mundt
9 * This file is subject to the terms and conditions of the GNU General Public
10 * License. See the file "COPYING" in the main directory of this archive
13 #include <linux/init.h>
14 #include <linux/mman.h>
17 #include <asm/processor.h>
18 #include <asm/cache.h>
19 #include <asm/pgalloc.h>
20 #include <asm/uaccess.h>
21 #include <asm/mmu_context.h>
23 /* Wired TLB entry for the D-cache */
24 static unsigned long long dtlb_cache_slot;
26 void __init cpu_cache_init(void)
28 /* Reserve a slot for dcache colouring in the DTLB */
29 dtlb_cache_slot = sh64_get_wired_dtlb_entry();
32 void __init kmap_coherent_init(void)
37 void *kmap_coherent(struct page *page, unsigned long addr)
43 void kunmap_coherent(void)
47 #ifdef CONFIG_DCACHE_DISABLED
48 #define sh64_dcache_purge_all() do { } while (0)
49 #define sh64_dcache_purge_coloured_phy_page(paddr, eaddr) do { } while (0)
50 #define sh64_dcache_purge_user_range(mm, start, end) do { } while (0)
51 #define sh64_dcache_purge_phy_page(paddr) do { } while (0)
52 #define sh64_dcache_purge_virt_page(mm, eaddr) do { } while (0)
56 * The following group of functions deal with mapping and unmapping a
57 * temporary page into a DTLB slot that has been set aside for exclusive
61 sh64_setup_dtlb_cache_slot(unsigned long eaddr, unsigned long asid,
65 sh64_setup_tlb_slot(dtlb_cache_slot, eaddr, asid, paddr);
68 static inline void sh64_teardown_dtlb_cache_slot(void)
70 sh64_teardown_tlb_slot(dtlb_cache_slot);
74 #ifndef CONFIG_ICACHE_DISABLED
75 static inline void sh64_icache_inv_all(void)
77 unsigned long long addr, flag, data;
84 /* Make this a critical section for safety (probably not strictly necessary.) */
85 local_irq_save(flags);
87 /* Without %1 it gets unexplicably wrong */
88 __asm__ __volatile__ (
89 "getcfg %3, 0, %0\n\t"
91 "putcfg %3, 0, %0\n\t"
94 : "0" (data), "r" (flag), "r" (addr));
96 local_irq_restore(flags);
99 static void sh64_icache_inv_kernel_range(unsigned long start, unsigned long end)
101 /* Invalidate range of addresses [start,end] from the I-cache, where
102 * the addresses lie in the kernel superpage. */
104 unsigned long long ullend, addr, aligned_start;
105 aligned_start = (unsigned long long)(signed long long)(signed long) start;
106 addr = L1_CACHE_ALIGN(aligned_start);
107 ullend = (unsigned long long) (signed long long) (signed long) end;
109 while (addr <= ullend) {
110 __asm__ __volatile__ ("icbi %0, 0" : : "r" (addr));
111 addr += L1_CACHE_BYTES;
115 static void sh64_icache_inv_user_page(struct vm_area_struct *vma, unsigned long eaddr)
117 /* If we get called, we know that vma->vm_flags contains VM_EXEC.
118 Also, eaddr is page-aligned. */
119 unsigned int cpu = smp_processor_id();
120 unsigned long long addr, end_addr;
121 unsigned long flags = 0;
122 unsigned long running_asid, vma_asid;
124 end_addr = addr + PAGE_SIZE;
126 /* Check whether we can use the current ASID for the I-cache
127 invalidation. For example, if we're called via
128 access_process_vm->flush_cache_page->here, (e.g. when reading from
129 /proc), 'running_asid' will be that of the reader, not of the
132 Also, note the risk that we might get pre-empted between the ASID
133 compare and blocking IRQs, and before we regain control, the
134 pid->ASID mapping changes. However, the whole cache will get
135 invalidated when the mapping is renewed, so the worst that can
136 happen is that the loop below ends up invalidating somebody else's
140 running_asid = get_asid();
141 vma_asid = cpu_asid(cpu, vma->vm_mm);
142 if (running_asid != vma_asid) {
143 local_irq_save(flags);
144 switch_and_save_asid(vma_asid);
146 while (addr < end_addr) {
147 /* Worth unrolling a little */
148 __asm__ __volatile__("icbi %0, 0" : : "r" (addr));
149 __asm__ __volatile__("icbi %0, 32" : : "r" (addr));
150 __asm__ __volatile__("icbi %0, 64" : : "r" (addr));
151 __asm__ __volatile__("icbi %0, 96" : : "r" (addr));
154 if (running_asid != vma_asid) {
155 switch_and_save_asid(running_asid);
156 local_irq_restore(flags);
160 static void sh64_icache_inv_user_page_range(struct mm_struct *mm,
161 unsigned long start, unsigned long end)
163 /* Used for invalidating big chunks of I-cache, i.e. assume the range
164 is whole pages. If 'start' or 'end' is not page aligned, the code
165 is conservative and invalidates to the ends of the enclosing pages.
166 This is functionally OK, just a performance loss. */
168 /* See the comments below in sh64_dcache_purge_user_range() regarding
169 the choice of algorithm. However, for the I-cache option (2) isn't
170 available because there are no physical tags so aliases can't be
171 resolved. The icbi instruction has to be used through the user
172 mapping. Because icbi is cheaper than ocbp on a cache hit, it
173 would be cheaper to use the selective code for a large range than is
174 possible with the D-cache. Just assume 64 for now as a working
182 n_pages = ((end - start) >> PAGE_SHIFT);
184 sh64_icache_inv_all();
186 unsigned long aligned_start;
188 unsigned long after_last_page_start;
189 unsigned long mm_asid, current_asid;
190 unsigned long flags = 0;
192 mm_asid = cpu_asid(smp_processor_id(), mm);
193 current_asid = get_asid();
195 if (mm_asid != current_asid) {
196 /* Switch ASID and run the invalidate loop under cli */
197 local_irq_save(flags);
198 switch_and_save_asid(mm_asid);
201 aligned_start = start & PAGE_MASK;
202 after_last_page_start = PAGE_SIZE + ((end - 1) & PAGE_MASK);
204 while (aligned_start < after_last_page_start) {
205 struct vm_area_struct *vma;
206 unsigned long vma_end;
207 vma = find_vma(mm, aligned_start);
208 if (!vma || (aligned_start <= vma->vm_end)) {
209 /* Avoid getting stuck in an error condition */
210 aligned_start += PAGE_SIZE;
213 vma_end = vma->vm_end;
214 if (vma->vm_flags & VM_EXEC) {
216 eaddr = aligned_start;
217 while (eaddr < vma_end) {
218 sh64_icache_inv_user_page(vma, eaddr);
222 aligned_start = vma->vm_end; /* Skip to start of next region */
225 if (mm_asid != current_asid) {
226 switch_and_save_asid(current_asid);
227 local_irq_restore(flags);
233 * Invalidate a small range of user context I-cache, not necessarily page
234 * (or even cache-line) aligned.
236 * Since this is used inside ptrace, the ASID in the mm context typically
237 * won't match current_asid. We'll have to switch ASID to do this. For
238 * safety, and given that the range will be small, do all this under cli.
240 * Note, there is a hazard that the ASID in mm->context is no longer
241 * actually associated with mm, i.e. if the mm->context has started a new
242 * cycle since mm was last active. However, this is just a performance
243 * issue: all that happens is that we invalidate lines belonging to
244 * another mm, so the owning process has to refill them when that mm goes
245 * live again. mm itself can't have any cache entries because there will
246 * have been a flush_cache_all when the new mm->context cycle started.
248 static void sh64_icache_inv_user_small_range(struct mm_struct *mm,
249 unsigned long start, int len)
251 unsigned long long eaddr = start;
252 unsigned long long eaddr_end = start + len;
253 unsigned long current_asid, mm_asid;
255 unsigned long long epage_start;
258 * Align to start of cache line. Otherwise, suppose len==8 and
259 * start was at 32N+28 : the last 4 bytes wouldn't get invalidated.
261 eaddr = L1_CACHE_ALIGN(start);
262 eaddr_end = start + len;
264 mm_asid = cpu_asid(smp_processor_id(), mm);
265 local_irq_save(flags);
266 current_asid = switch_and_save_asid(mm_asid);
268 epage_start = eaddr & PAGE_MASK;
270 while (eaddr < eaddr_end) {
271 __asm__ __volatile__("icbi %0, 0" : : "r" (eaddr));
272 eaddr += L1_CACHE_BYTES;
274 switch_and_save_asid(current_asid);
275 local_irq_restore(flags);
278 static void sh64_icache_inv_current_user_range(unsigned long start, unsigned long end)
280 /* The icbi instruction never raises ITLBMISS. i.e. if there's not a
281 cache hit on the virtual tag the instruction ends there, without a
284 unsigned long long aligned_start;
285 unsigned long long ull_end;
286 unsigned long long addr;
290 /* Just invalidate over the range using the natural addresses. TLB
291 miss handling will be OK (TBC). Since it's for the current process,
292 either we're already in the right ASID context, or the ASIDs have
293 been recycled since we were last active in which case we might just
294 invalidate another processes I-cache entries : no worries, just a
295 performance drop for him. */
296 aligned_start = L1_CACHE_ALIGN(start);
297 addr = aligned_start;
298 while (addr < ull_end) {
299 __asm__ __volatile__ ("icbi %0, 0" : : "r" (addr));
300 __asm__ __volatile__ ("nop");
301 __asm__ __volatile__ ("nop");
302 addr += L1_CACHE_BYTES;
305 #endif /* !CONFIG_ICACHE_DISABLED */
307 #ifndef CONFIG_DCACHE_DISABLED
308 /* Buffer used as the target of alloco instructions to purge data from cache
309 sets by natural eviction. -- RPC */
310 #define DUMMY_ALLOCO_AREA_SIZE ((L1_CACHE_BYTES << 10) + (1024 * 4))
311 static unsigned char dummy_alloco_area[DUMMY_ALLOCO_AREA_SIZE] __cacheline_aligned = { 0, };
313 static void inline sh64_dcache_purge_sets(int sets_to_purge_base, int n_sets)
315 /* Purge all ways in a particular block of sets, specified by the base
316 set number and number of sets. Can handle wrap-around, if that's
319 int dummy_buffer_base_set;
320 unsigned long long eaddr, eaddr0, eaddr1;
324 dummy_buffer_base_set = ((int)&dummy_alloco_area &
325 cpu_data->dcache.entry_mask) >>
326 cpu_data->dcache.entry_shift;
327 set_offset = sets_to_purge_base - dummy_buffer_base_set;
329 for (j = 0; j < n_sets; j++, set_offset++) {
330 set_offset &= (cpu_data->dcache.sets - 1);
331 eaddr0 = (unsigned long long)dummy_alloco_area +
332 (set_offset << cpu_data->dcache.entry_shift);
335 * Do one alloco which hits the required set per cache
336 * way. For write-back mode, this will purge the #ways
337 * resident lines. There's little point unrolling this
338 * loop because the allocos stall more if they're too
341 eaddr1 = eaddr0 + cpu_data->dcache.way_size *
342 cpu_data->dcache.ways;
344 for (eaddr = eaddr0; eaddr < eaddr1;
345 eaddr += cpu_data->dcache.way_size) {
346 __asm__ __volatile__ ("alloco %0, 0" : : "r" (eaddr));
347 __asm__ __volatile__ ("synco"); /* TAKum03020 */
350 eaddr1 = eaddr0 + cpu_data->dcache.way_size *
351 cpu_data->dcache.ways;
353 for (eaddr = eaddr0; eaddr < eaddr1;
354 eaddr += cpu_data->dcache.way_size) {
356 * Load from each address. Required because
357 * alloco is a NOP if the cache is write-through.
359 if (test_bit(SH_CACHE_MODE_WT, &(cpu_data->dcache.flags)))
360 __raw_readb((unsigned long)eaddr);
365 * Don't use OCBI to invalidate the lines. That costs cycles
366 * directly. If the dummy block is just left resident, it will
367 * naturally get evicted as required.
372 * Purge the entire contents of the dcache. The most efficient way to
373 * achieve this is to use alloco instructions on a region of unused
374 * memory equal in size to the cache, thereby causing the current
375 * contents to be discarded by natural eviction. The alternative, namely
376 * reading every tag, setting up a mapping for the corresponding page and
377 * doing an OCBP for the line, would be much more expensive.
379 static void sh64_dcache_purge_all(void)
382 sh64_dcache_purge_sets(0, cpu_data->dcache.sets);
386 /* Assumes this address (+ (2**n_synbits) pages up from it) aren't used for
387 anything else in the kernel */
388 #define MAGIC_PAGE0_START 0xffffffffec000000ULL
390 /* Purge the physical page 'paddr' from the cache. It's known that any
391 * cache lines requiring attention have the same page colour as the the
394 * This relies on the fact that the D-cache matches on physical tags when
395 * no virtual tag matches. So we create an alias for the original page
396 * and purge through that. (Alternatively, we could have done this by
397 * switching ASID to match the original mapping and purged through that,
398 * but that involves ASID switching cost + probably a TLBMISS + refill
401 static void sh64_dcache_purge_coloured_phy_page(unsigned long paddr,
404 unsigned long long magic_page_start;
405 unsigned long long magic_eaddr, magic_eaddr_end;
407 magic_page_start = MAGIC_PAGE0_START + (eaddr & CACHE_OC_SYN_MASK);
409 /* As long as the kernel is not pre-emptible, this doesn't need to be
411 sh64_setup_dtlb_cache_slot(magic_page_start, get_asid(), paddr);
413 magic_eaddr = magic_page_start;
414 magic_eaddr_end = magic_eaddr + PAGE_SIZE;
416 while (magic_eaddr < magic_eaddr_end) {
417 /* Little point in unrolling this loop - the OCBPs are blocking
418 and won't go any quicker (i.e. the loop overhead is parallel
419 to part of the OCBP execution.) */
420 __asm__ __volatile__ ("ocbp %0, 0" : : "r" (magic_eaddr));
421 magic_eaddr += L1_CACHE_BYTES;
424 sh64_teardown_dtlb_cache_slot();
428 * Purge a page given its physical start address, by creating a temporary
429 * 1 page mapping and purging across that. Even if we know the virtual
430 * address (& vma or mm) of the page, the method here is more elegant
431 * because it avoids issues of coping with page faults on the purge
432 * instructions (i.e. no special-case code required in the critical path
433 * in the TLB miss handling).
435 static void sh64_dcache_purge_phy_page(unsigned long paddr)
437 unsigned long long eaddr_start, eaddr, eaddr_end;
440 /* As long as the kernel is not pre-emptible, this doesn't need to be
442 eaddr_start = MAGIC_PAGE0_START;
443 for (i = 0; i < (1 << CACHE_OC_N_SYNBITS); i++) {
444 sh64_setup_dtlb_cache_slot(eaddr_start, get_asid(), paddr);
447 eaddr_end = eaddr + PAGE_SIZE;
448 while (eaddr < eaddr_end) {
449 __asm__ __volatile__ ("ocbp %0, 0" : : "r" (eaddr));
450 eaddr += L1_CACHE_BYTES;
453 sh64_teardown_dtlb_cache_slot();
454 eaddr_start += PAGE_SIZE;
458 static void sh64_dcache_purge_user_pages(struct mm_struct *mm,
459 unsigned long addr, unsigned long end)
470 return; /* No way to find physical address of page */
472 pgd = pgd_offset(mm, addr);
476 pud = pud_offset(pgd, addr);
477 if (pud_none(*pud) || pud_bad(*pud))
480 pmd = pmd_offset(pud, addr);
481 if (pmd_none(*pmd) || pmd_bad(*pmd))
484 pte = pte_offset_map_lock(mm, pmd, addr, &ptl);
487 if (pte_none(entry) || !pte_present(entry))
489 paddr = pte_val(entry) & PAGE_MASK;
490 sh64_dcache_purge_coloured_phy_page(paddr, addr);
491 } while (pte++, addr += PAGE_SIZE, addr != end);
492 pte_unmap_unlock(pte - 1, ptl);
496 * There are at least 5 choices for the implementation of this, with
497 * pros (+), cons(-), comments(*):
499 * 1. ocbp each line in the range through the original user's ASID
500 * + no lines spuriously evicted
501 * - tlbmiss handling (must either handle faults on demand => extra
502 * special-case code in tlbmiss critical path), or map the page in
503 * advance (=> flush_tlb_range in advance to avoid multiple hits)
505 * - expensive for large ranges
507 * 2. temporarily map each page in the range to a special effective
508 * address and ocbp through the temporary mapping; relies on the
509 * fact that SH-5 OCB* always do TLB lookup and match on ptags (they
510 * never look at the etags)
511 * + no spurious evictions
512 * - expensive for large ranges
513 * * surely cheaper than (1)
515 * 3. walk all the lines in the cache, check the tags, if a match
516 * occurs create a page mapping to ocbp the line through
517 * + no spurious evictions
518 * - tag inspection overhead
519 * - (especially for small ranges)
520 * - potential cost of setting up/tearing down page mapping for
521 * every line that matches the range
522 * * cost partly independent of range size
524 * 4. walk all the lines in the cache, check the tags, if a match
525 * occurs use 4 * alloco to purge the line (+3 other probably
526 * innocent victims) by natural eviction
527 * + no tlb mapping overheads
528 * - spurious evictions
529 * - tag inspection overhead
531 * 5. implement like flush_cache_all
532 * + no tag inspection overhead
533 * - spurious evictions
534 * - bad for small ranges
536 * (1) can be ruled out as more expensive than (2). (2) appears best
537 * for small ranges. The choice between (3), (4) and (5) for large
538 * ranges and the range size for the large/small boundary need
539 * benchmarking to determine.
541 * For now use approach (2) for small ranges and (5) for large ones.
543 static void sh64_dcache_purge_user_range(struct mm_struct *mm,
544 unsigned long start, unsigned long end)
546 int n_pages = ((end - start) >> PAGE_SHIFT);
548 if (n_pages >= 64 || ((start ^ (end - 1)) & PMD_MASK)) {
549 sh64_dcache_purge_all();
551 /* Small range, covered by a single page table page */
552 start &= PAGE_MASK; /* should already be so */
553 end = PAGE_ALIGN(end); /* should already be so */
554 sh64_dcache_purge_user_pages(mm, start, end);
557 #endif /* !CONFIG_DCACHE_DISABLED */
560 * Invalidate the entire contents of both caches, after writing back to
561 * memory any dirty data from the D-cache.
563 void flush_cache_all(void)
565 sh64_dcache_purge_all();
566 sh64_icache_inv_all();
570 * Invalidate an entire user-address space from both caches, after
571 * writing back dirty data (e.g. for shared mmap etc).
573 * This could be coded selectively by inspecting all the tags then
574 * doing 4*alloco on any set containing a match (as for
575 * flush_cache_range), but fork/exit/execve (where this is called from)
576 * are expensive anyway.
578 * Have to do a purge here, despite the comments re I-cache below.
579 * There could be odd-coloured dirty data associated with the mm still
580 * in the cache - if this gets written out through natural eviction
581 * after the kernel has reused the page there will be chaos.
583 * The mm being torn down won't ever be active again, so any Icache
584 * lines tagged with its ASID won't be visible for the rest of the
585 * lifetime of this ASID cycle. Before the ASID gets reused, there
586 * will be a flush_cache_all. Hence we don't need to touch the
587 * I-cache. This is similar to the lack of action needed in
588 * flush_tlb_mm - see fault.c.
590 void flush_cache_mm(struct mm_struct *mm)
592 sh64_dcache_purge_all();
596 * Invalidate (from both caches) the range [start,end) of virtual
597 * addresses from the user address space specified by mm, after writing
598 * back any dirty data.
600 * Note, 'end' is 1 byte beyond the end of the range to flush.
602 void flush_cache_range(struct vm_area_struct *vma, unsigned long start,
605 struct mm_struct *mm = vma->vm_mm;
607 sh64_dcache_purge_user_range(mm, start, end);
608 sh64_icache_inv_user_page_range(mm, start, end);
612 * Invalidate any entries in either cache for the vma within the user
613 * address space vma->vm_mm for the page starting at virtual address
614 * 'eaddr'. This seems to be used primarily in breaking COW. Note,
615 * the I-cache must be searched too in case the page in question is
616 * both writable and being executed from (e.g. stack trampolines.)
618 * Note, this is called with pte lock held.
620 void flush_cache_page(struct vm_area_struct *vma, unsigned long eaddr,
623 sh64_dcache_purge_phy_page(pfn << PAGE_SHIFT);
625 if (vma->vm_flags & VM_EXEC)
626 sh64_icache_inv_user_page(vma, eaddr);
629 void flush_dcache_page(struct page *page)
631 sh64_dcache_purge_phy_page(page_to_phys(page));
636 * Flush the range [start,end] of kernel virtual adddress space from
637 * the I-cache. The corresponding range must be purged from the
638 * D-cache also because the SH-5 doesn't have cache snooping between
639 * the caches. The addresses will be visible through the superpage
640 * mapping, therefore it's guaranteed that there no cache entries for
641 * the range in cache sets of the wrong colour.
643 void flush_icache_range(unsigned long start, unsigned long end)
645 __flush_purge_region((void *)start, end);
647 sh64_icache_inv_kernel_range(start, end);
651 * Flush the range of user (defined by vma->vm_mm) address space starting
652 * at 'addr' for 'len' bytes from the cache. The range does not straddle
653 * a page boundary, the unique physical page containing the range is
654 * 'page'. This seems to be used mainly for invalidating an address
655 * range following a poke into the program text through the ptrace() call
656 * from another process (e.g. for BRK instruction insertion).
658 static void flush_icache_user_range(struct vm_area_struct *vma,
659 struct page *page, unsigned long addr, int len)
662 sh64_dcache_purge_coloured_phy_page(page_to_phys(page), addr);
665 if (vma->vm_flags & VM_EXEC)
666 sh64_icache_inv_user_small_range(vma->vm_mm, addr, len);
670 * For the address range [start,end), write back the data from the
671 * D-cache and invalidate the corresponding region of the I-cache for the
672 * current process. Used to flush signal trampolines on the stack to
673 * make them executable.
675 void flush_cache_sigtramp(unsigned long vaddr)
677 unsigned long end = vaddr + L1_CACHE_BYTES;
679 __flush_wback_region((void *)vaddr, L1_CACHE_BYTES);
681 sh64_icache_inv_current_user_range(vaddr, end);
686 * These *MUST* lie in an area of virtual address space that's otherwise
689 #define UNIQUE_EADDR_START 0xe0000000UL
690 #define UNIQUE_EADDR_END 0xe8000000UL
693 * Given a physical address paddr, and a user virtual address user_eaddr
694 * which will eventually be mapped to it, create a one-off kernel-private
695 * eaddr mapped to the same paddr. This is used for creating special
696 * destination pages for copy_user_page and clear_user_page.
698 static unsigned long sh64_make_unique_eaddr(unsigned long user_eaddr,
701 static unsigned long current_pointer = UNIQUE_EADDR_START;
702 unsigned long coloured_pointer;
704 if (current_pointer == UNIQUE_EADDR_END) {
705 sh64_dcache_purge_all();
706 current_pointer = UNIQUE_EADDR_START;
709 coloured_pointer = (current_pointer & ~CACHE_OC_SYN_MASK) |
710 (user_eaddr & CACHE_OC_SYN_MASK);
711 sh64_setup_dtlb_cache_slot(coloured_pointer, get_asid(), paddr);
713 current_pointer += (PAGE_SIZE << CACHE_OC_N_SYNBITS);
715 return coloured_pointer;
718 static void sh64_copy_user_page_coloured(void *to, void *from,
719 unsigned long address)
724 * Discard any existing cache entries of the wrong colour. These are
725 * present quite often, if the kernel has recently used the page
726 * internally, then given it up, then it's been allocated to the user.
728 sh64_dcache_purge_coloured_phy_page(__pa(to), (unsigned long)to);
730 coloured_to = (void *)sh64_make_unique_eaddr(address, __pa(to));
731 copy_page(from, coloured_to);
733 sh64_teardown_dtlb_cache_slot();
736 static void sh64_clear_user_page_coloured(void *to, unsigned long address)
741 * Discard any existing kernel-originated lines of the wrong
744 sh64_dcache_purge_coloured_phy_page(__pa(to), (unsigned long)to);
746 coloured_to = (void *)sh64_make_unique_eaddr(address, __pa(to));
747 clear_page(coloured_to);
749 sh64_teardown_dtlb_cache_slot();
753 * 'from' and 'to' are kernel virtual addresses (within the superpage
754 * mapping of the physical RAM). 'address' is the user virtual address
755 * where the copy 'to' will be mapped after. This allows a custom
756 * mapping to be used to ensure that the new copy is placed in the
757 * right cache sets for the user to see it without having to bounce it
758 * out via memory. Note however : the call to flush_page_to_ram in
759 * (generic)/mm/memory.c:(break_cow) undoes all this good work in that one
760 * very important case!
762 * TBD : can we guarantee that on every call, any cache entries for
763 * 'from' are in the same colour sets as 'address' also? i.e. is this
764 * always used just to deal with COW? (I suspect not).
766 * There are two possibilities here for when the page 'from' was last accessed:
767 * - by the kernel : this is OK, no purge required.
768 * - by the/a user (e.g. for break_COW) : need to purge.
770 * If the potential user mapping at 'address' is the same colour as
771 * 'from' there is no need to purge any cache lines from the 'from'
772 * page mapped into cache sets of colour 'address'. (The copy will be
773 * accessing the page through 'from').
775 void copy_user_page(void *to, void *from, unsigned long address,
778 if (((address ^ (unsigned long) from) & CACHE_OC_SYN_MASK) != 0)
779 sh64_dcache_purge_coloured_phy_page(__pa(from), address);
781 if (((address ^ (unsigned long) to) & CACHE_OC_SYN_MASK) == 0)
784 sh64_copy_user_page_coloured(to, from, address);
788 * 'to' is a kernel virtual address (within the superpage mapping of the
789 * physical RAM). 'address' is the user virtual address where the 'to'
790 * page will be mapped after. This allows a custom mapping to be used to
791 * ensure that the new copy is placed in the right cache sets for the
792 * user to see it without having to bounce it out via memory.
794 void clear_user_page(void *to, unsigned long address, struct page *page)
796 if (((address ^ (unsigned long) to) & CACHE_OC_SYN_MASK) == 0)
799 sh64_clear_user_page_coloured(to, address);
802 void copy_to_user_page(struct vm_area_struct *vma, struct page *page,
803 unsigned long vaddr, void *dst, const void *src,
806 flush_cache_page(vma, vaddr, page_to_pfn(page));
807 memcpy(dst, src, len);
808 flush_icache_user_range(vma, page, vaddr, len);
811 void copy_from_user_page(struct vm_area_struct *vma, struct page *page,
812 unsigned long vaddr, void *dst, const void *src,
815 flush_cache_page(vma, vaddr, page_to_pfn(page));
816 memcpy(dst, src, len);