*
* 2001 Sep 08:
* Completely revisited, many important fixes
- * Nicolas Pitre <nico@cam.org>
+ * Nicolas Pitre <nico@fluxnic.net>
*/
#include <linux/linkage.h>
#include <linux/init.h>
#include <asm/assembler.h>
-#include <asm/procinfo.h>
+#include <asm/hwcap.h>
#include <asm/pgtable.h>
+#include <asm/pgtable-hwdef.h>
#include <asm/page.h>
#include <asm/ptrace.h>
#include "proc-macros.S"
* Nothing too exciting at the moment
*/
ENTRY(cpu_xscale_proc_init)
+ @ enable write buffer coalescing. Some bootloader disable it
+ mrc p15, 0, r1, c1, c0, 1
+ bic r1, r1, #1
+ mcr p15, 0, r1, c1, c0, 1
mov pc, lr
/*
* to what would be the reset vector.
*
* loc: location to jump to for soft reset
+ *
+ * Beware PXA270 erratum E7.
*/
.align 5
ENTRY(cpu_xscale_reset)
mov r1, #PSR_F_BIT|PSR_I_BIT|SVC_MODE
msr cpsr_c, r1 @ reset CPSR
+ mcr p15, 0, r1, c10, c4, 1 @ unlock I-TLB
+ mcr p15, 0, r1, c8, c5, 0 @ invalidate I-TLB
mrc p15, 0, r1, c1, c0, 0 @ ctrl register
bic r1, r1, #0x0086 @ ........B....CA.
bic r1, r1, #0x3900 @ ..VIZ..S........
+ sub pc, pc, #4 @ flush pipeline
+ @ *** cache line aligned ***
mcr p15, 0, r1, c1, c0, 0 @ ctrl register
- mcr p15, 0, ip, c7, c7, 0 @ invalidate I,D caches & BTB
bic r1, r1, #0x0001 @ ...............M
+ mcr p15, 0, ip, c7, c7, 0 @ invalidate I,D caches & BTB
mcr p15, 0, r1, c1, c0, 0 @ ctrl register
@ CAUTION: MMU turned off from this point. We count on the pipeline
@ already containing those two last instructions to survive.
* - end - virtual end address
*/
ENTRY(xscale_dma_inv_range)
- mrc p15, 0, r2, c0, c0, 0 @ read ID
- eor r2, r2, #0x69000000
- eor r2, r2, #0x00052000
- bics r2, r2, #1
- beq xscale_dma_flush_range
-
tst r0, #CACHELINESIZE - 1
bic r0, r0, #CACHELINESIZE - 1
mcrne p15, 0, r0, c7, c10, 1 @ clean D entry
.long xscale_dma_clean_range
.long xscale_dma_flush_range
+/*
+ * On stepping A0/A1 of the 80200, invalidating D-cache by line doesn't
+ * clear the dirty bits, which means that if we invalidate a dirty line,
+ * the dirty data can still be written back to external memory later on.
+ *
+ * The recommended workaround is to always do a clean D-cache line before
+ * doing an invalidate D-cache line, so on the affected processors,
+ * dma_inv_range() is implemented as dma_flush_range().
+ *
+ * See erratum #25 of "Intel 80200 Processor Specification Update",
+ * revision January 22, 2003, available at:
+ * http://www.intel.com/design/iio/specupdt/273415.htm
+ */
+ENTRY(xscale_80200_A0_A1_cache_fns)
+ .long xscale_flush_kern_cache_all
+ .long xscale_flush_user_cache_all
+ .long xscale_flush_user_cache_range
+ .long xscale_coherent_kern_range
+ .long xscale_coherent_user_range
+ .long xscale_flush_kern_dcache_page
+ .long xscale_dma_flush_range
+ .long xscale_dma_clean_range
+ .long xscale_dma_flush_range
+
ENTRY(cpu_xscale_dcache_clean_area)
1: mcr p15, 0, r0, c7, c10, 1 @ clean D entry
add r0, r0, #CACHELINESIZE
/* =============================== PageTable ============================== */
-#define PTE_CACHE_WRITE_ALLOCATE 0
-
/*
* cpu_xscale_switch_mm(pgd)
*
cpwait_ret lr, ip
/*
- * cpu_xscale_set_pte(ptep, pte)
+ * cpu_xscale_set_pte_ext(ptep, pte, ext)
*
* Set a PTE and flush it out
*
* Errata 40: must set memory to write-through for user read-only pages.
*/
- .align 5
-ENTRY(cpu_xscale_set_pte)
- str r1, [r0], #-2048 @ linux version
-
- bic r2, r1, #0xff0
- orr r2, r2, #PTE_TYPE_EXT @ extended page
-
- eor r3, r1, #L_PTE_PRESENT | L_PTE_YOUNG | L_PTE_WRITE | L_PTE_DIRTY
-
- tst r3, #L_PTE_USER @ User?
- orrne r2, r2, #PTE_EXT_AP_URO_SRW @ yes -> user r/o, system r/w
-
- tst r3, #L_PTE_WRITE | L_PTE_DIRTY @ Write and Dirty?
- orreq r2, r2, #PTE_EXT_AP_UNO_SRW @ yes -> user n/a, system r/w
- @ combined with user -> user r/w
+cpu_xscale_mt_table:
+ .long 0x00 @ L_PTE_MT_UNCACHED
+ .long PTE_BUFFERABLE @ L_PTE_MT_BUFFERABLE
+ .long PTE_CACHEABLE @ L_PTE_MT_WRITETHROUGH
+ .long PTE_CACHEABLE | PTE_BUFFERABLE @ L_PTE_MT_WRITEBACK
+ .long PTE_EXT_TEX(1) | PTE_BUFFERABLE @ L_PTE_MT_DEV_SHARED
+ .long 0x00 @ unused
+ .long PTE_EXT_TEX(1) | PTE_CACHEABLE @ L_PTE_MT_MINICACHE
+ .long PTE_EXT_TEX(1) | PTE_CACHEABLE | PTE_BUFFERABLE @ L_PTE_MT_WRITEALLOC
+ .long 0x00 @ unused
+ .long PTE_BUFFERABLE @ L_PTE_MT_DEV_WC
+ .long 0x00 @ unused
+ .long PTE_CACHEABLE | PTE_BUFFERABLE @ L_PTE_MT_DEV_CACHED
+ .long 0x00 @ L_PTE_MT_DEV_NONSHARED
+ .long 0x00 @ unused
+ .long 0x00 @ unused
+ .long 0x00 @ unused
- @
- @ Handle the X bit. We want to set this bit for the minicache
- @ (U = E = B = W = 0, C = 1) or when write allocate is enabled,
- @ and we have a writeable, cacheable region. If we ignore the
- @ U and E bits, we can allow user space to use the minicache as
- @ well.
- @
- @ X = (C & ~W & ~B) | (C & W & B & write_allocate)
- @
- eor ip, r1, #L_PTE_CACHEABLE
- tst ip, #L_PTE_CACHEABLE | L_PTE_WRITE | L_PTE_BUFFERABLE
-#if PTE_CACHE_WRITE_ALLOCATE
- eorne ip, r1, #L_PTE_CACHEABLE | L_PTE_WRITE | L_PTE_BUFFERABLE
- tstne ip, #L_PTE_CACHEABLE | L_PTE_WRITE | L_PTE_BUFFERABLE
-#endif
- orreq r2, r2, #PTE_EXT_TEX(1)
+ .align 5
+ENTRY(cpu_xscale_set_pte_ext)
+ xscale_set_pte_ext_prologue
@
- @ Erratum 40: The B bit must be cleared for a user read-only
- @ cacheable page.
+ @ Erratum 40: must set memory to write-through for user read-only pages
@
- @ B = B & ~(U & C & ~W)
- @
- and ip, r1, #L_PTE_USER | L_PTE_WRITE | L_PTE_CACHEABLE
- teq ip, #L_PTE_USER | L_PTE_CACHEABLE
- biceq r2, r2, #PTE_BUFFERABLE
+ and ip, r1, #(L_PTE_MT_MASK | L_PTE_USER | L_PTE_WRITE) & ~(4 << 2)
+ teq ip, #L_PTE_MT_WRITEBACK | L_PTE_USER
- tst r3, #L_PTE_PRESENT | L_PTE_YOUNG @ Present and Young?
- movne r2, #0 @ no -> fault
+ moveq r1, #L_PTE_MT_WRITETHROUGH
+ and r1, r1, #L_PTE_MT_MASK
+ adr ip, cpu_xscale_mt_table
+ ldr ip, [ip, r1]
+ bic r2, r2, #0x0c
+ orr r2, r2, ip
- str r2, [r0] @ hardware version
- mov ip, #0
- mcr p15, 0, r0, c7, c10, 1 @ Clean D cache line
- mcr p15, 0, ip, c7, c10, 4 @ Drain Write (& Fill) Buffer
+ xscale_set_pte_ext_epilogue
mov pc, lr
mcr p15, 0, ip, c7, c7, 0 @ invalidate I, D caches & BTB
mcr p15, 0, ip, c7, c10, 4 @ Drain Write (& Fill) Buffer
mcr p15, 0, ip, c8, c7, 0 @ invalidate I, D TLBs
-#ifdef CONFIG_IWMMXT
- mov r0, #0 @ initially disallow access to CP0/CP1
-#else
- mov r0, #1 @ Allow access to CP0
-#endif
- orr r0, r0, #1 << 6 @ cp6 for IOP3xx and Bulverde
+ mov r0, #1 << 6 @ cp6 for IOP3xx and Bulverde
orr r0, r0, #1 << 13 @ Its undefined whether this
mcr p15, 0, r0, c15, c1, 0 @ affects USR or SVC modes
+
+ adr r5, xscale_crval
+ ldmia r5, {r5, r6}
mrc p15, 0, r0, c1, c0, 0 @ get control register
- ldr r5, xscale_cr1_clear
bic r0, r0, r5
- ldr r5, xscale_cr1_set
- orr r0, r0, r5
+ orr r0, r0, r6
mov pc, lr
.size __xscale_setup, . - __xscale_setup
* ..11 1.01 .... .101
*
*/
- .type xscale_cr1_clear, #object
- .type xscale_cr1_set, #object
-xscale_cr1_clear:
- .word 0x3b07
-xscale_cr1_set:
- .word 0x3905
+ .type xscale_crval, #object
+xscale_crval:
+ crval clear=0x00003b07, mmuset=0x00003905, ucset=0x00001900
__INITDATA
.type xscale_processor_functions, #object
ENTRY(xscale_processor_functions)
.word v5t_early_abort
+ .word legacy_pabort
.word cpu_xscale_proc_init
.word cpu_xscale_proc_fin
.word cpu_xscale_reset
.word cpu_xscale_do_idle
.word cpu_xscale_dcache_clean_area
.word cpu_xscale_switch_mm
- .word cpu_xscale_set_pte
+ .word cpu_xscale_set_pte_ext
.size xscale_processor_functions, . - xscale_processor_functions
.section ".rodata"
.asciz "v5"
.size cpu_elf_name, . - cpu_elf_name
+ .type cpu_80200_A0_A1_name, #object
+cpu_80200_A0_A1_name:
+ .asciz "XScale-80200 A0/A1"
+ .size cpu_80200_A0_A1_name, . - cpu_80200_A0_A1_name
+
.type cpu_80200_name, #object
cpu_80200_name:
.asciz "XScale-80200"
.size cpu_80200_name, . - cpu_80200_name
+ .type cpu_80219_name, #object
+cpu_80219_name:
+ .asciz "XScale-80219"
+ .size cpu_80219_name, . - cpu_80219_name
+
.type cpu_8032x_name, #object
cpu_8032x_name:
.asciz "XScale-IOP8032x Family"
.asciz "XScale-IXP42x Family"
.size cpu_ixp42x_name, . - cpu_ixp42x_name
+ .type cpu_ixp43x_name, #object
+cpu_ixp43x_name:
+ .asciz "XScale-IXP43x Family"
+ .size cpu_ixp43x_name, . - cpu_ixp43x_name
+
.type cpu_ixp46x_name, #object
cpu_ixp46x_name:
.asciz "XScale-IXP46x Family"
.section ".proc.info.init", #alloc, #execinstr
+ .type __80200_A0_A1_proc_info,#object
+__80200_A0_A1_proc_info:
+ .long 0x69052000
+ .long 0xfffffffe
+ .long PMD_TYPE_SECT | \
+ PMD_SECT_BUFFERABLE | \
+ PMD_SECT_CACHEABLE | \
+ PMD_SECT_AP_WRITE | \
+ PMD_SECT_AP_READ
+ .long PMD_TYPE_SECT | \
+ PMD_SECT_AP_WRITE | \
+ PMD_SECT_AP_READ
+ b __xscale_setup
+ .long cpu_arch_name
+ .long cpu_elf_name
+ .long HWCAP_SWP|HWCAP_HALF|HWCAP_THUMB|HWCAP_FAST_MULT|HWCAP_EDSP
+ .long cpu_80200_name
+ .long xscale_processor_functions
+ .long v4wbi_tlb_fns
+ .long xscale_mc_user_fns
+ .long xscale_80200_A0_A1_cache_fns
+ .size __80200_A0_A1_proc_info, . - __80200_A0_A1_proc_info
+
.type __80200_proc_info,#object
__80200_proc_info:
.long 0x69052000
PMD_SECT_CACHEABLE | \
PMD_SECT_AP_WRITE | \
PMD_SECT_AP_READ
+ .long PMD_TYPE_SECT | \
+ PMD_SECT_AP_WRITE | \
+ PMD_SECT_AP_READ
b __xscale_setup
.long cpu_arch_name
.long cpu_elf_name
.long xscale_cache_fns
.size __80200_proc_info, . - __80200_proc_info
+ .type __80219_proc_info,#object
+__80219_proc_info:
+ .long 0x69052e20
+ .long 0xffffffe0
+ .long PMD_TYPE_SECT | \
+ PMD_SECT_BUFFERABLE | \
+ PMD_SECT_CACHEABLE | \
+ PMD_SECT_AP_WRITE | \
+ PMD_SECT_AP_READ
+ .long PMD_TYPE_SECT | \
+ PMD_SECT_AP_WRITE | \
+ PMD_SECT_AP_READ
+ b __xscale_setup
+ .long cpu_arch_name
+ .long cpu_elf_name
+ .long HWCAP_SWP|HWCAP_HALF|HWCAP_THUMB|HWCAP_FAST_MULT|HWCAP_EDSP
+ .long cpu_80219_name
+ .long xscale_processor_functions
+ .long v4wbi_tlb_fns
+ .long xscale_mc_user_fns
+ .long xscale_cache_fns
+ .size __80219_proc_info, . - __80219_proc_info
+
.type __8032x_proc_info,#object
__8032x_proc_info:
.long 0x69052420
- .long 0xfffff5e0 @ mask should accomodate IOP80219 also
+ .long 0xfffff7e0
.long PMD_TYPE_SECT | \
PMD_SECT_BUFFERABLE | \
PMD_SECT_CACHEABLE | \
PMD_SECT_AP_WRITE | \
PMD_SECT_AP_READ
+ .long PMD_TYPE_SECT | \
+ PMD_SECT_AP_WRITE | \
+ PMD_SECT_AP_READ
b __xscale_setup
.long cpu_arch_name
.long cpu_elf_name
.type __8033x_proc_info,#object
__8033x_proc_info:
.long 0x69054010
- .long 0xffffff30
+ .long 0xfffffd30
.long PMD_TYPE_SECT | \
PMD_SECT_BUFFERABLE | \
PMD_SECT_CACHEABLE | \
PMD_SECT_AP_WRITE | \
PMD_SECT_AP_READ
+ .long PMD_TYPE_SECT | \
+ PMD_SECT_AP_WRITE | \
+ PMD_SECT_AP_READ
b __xscale_setup
.long cpu_arch_name
.long cpu_elf_name
PMD_SECT_CACHEABLE | \
PMD_SECT_AP_WRITE | \
PMD_SECT_AP_READ
+ .long PMD_TYPE_SECT | \
+ PMD_SECT_AP_WRITE | \
+ PMD_SECT_AP_READ
b __xscale_setup
.long cpu_arch_name
.long cpu_elf_name
PMD_SECT_CACHEABLE | \
PMD_SECT_AP_WRITE | \
PMD_SECT_AP_READ
+ .long PMD_TYPE_SECT | \
+ PMD_SECT_AP_WRITE | \
+ PMD_SECT_AP_READ
b __xscale_setup
.long cpu_arch_name
.long cpu_elf_name
PMD_SECT_CACHEABLE | \
PMD_SECT_AP_WRITE | \
PMD_SECT_AP_READ
+ .long PMD_TYPE_SECT | \
+ PMD_SECT_AP_WRITE | \
+ PMD_SECT_AP_READ
b __xscale_setup
.long cpu_arch_name
.long cpu_elf_name
PMD_SECT_CACHEABLE | \
PMD_SECT_AP_WRITE | \
PMD_SECT_AP_READ
+ .long PMD_TYPE_SECT | \
+ PMD_SECT_AP_WRITE | \
+ PMD_SECT_AP_READ
b __xscale_setup
.long cpu_arch_name
.long cpu_elf_name
PMD_SECT_CACHEABLE | \
PMD_SECT_AP_WRITE | \
PMD_SECT_AP_READ
+ .long PMD_TYPE_SECT | \
+ PMD_SECT_AP_WRITE | \
+ PMD_SECT_AP_READ
b __xscale_setup
.long cpu_arch_name
.long cpu_elf_name
.long xscale_cache_fns
.size __ixp42x_proc_info, . - __ixp42x_proc_info
+ .type __ixp43x_proc_info, #object
+__ixp43x_proc_info:
+ .long 0x69054040
+ .long 0xfffffff0
+ .long PMD_TYPE_SECT | \
+ PMD_SECT_BUFFERABLE | \
+ PMD_SECT_CACHEABLE | \
+ PMD_SECT_AP_WRITE | \
+ PMD_SECT_AP_READ
+ .long PMD_TYPE_SECT | \
+ PMD_SECT_AP_WRITE | \
+ PMD_SECT_AP_READ
+ b __xscale_setup
+ .long cpu_arch_name
+ .long cpu_elf_name
+ .long HWCAP_SWP|HWCAP_HALF|HWCAP_THUMB|HWCAP_FAST_MULT|HWCAP_EDSP
+ .long cpu_ixp43x_name
+ .long xscale_processor_functions
+ .long v4wbi_tlb_fns
+ .long xscale_mc_user_fns
+ .long xscale_cache_fns
+ .size __ixp43x_proc_info, . - __ixp43x_proc_info
+
.type __ixp46x_proc_info, #object
__ixp46x_proc_info:
.long 0x69054200
.long 0xffffff00
- .long 0x00000c0e
+ .long PMD_TYPE_SECT | \
+ PMD_SECT_BUFFERABLE | \
+ PMD_SECT_CACHEABLE | \
+ PMD_SECT_AP_WRITE | \
+ PMD_SECT_AP_READ
+ .long PMD_TYPE_SECT | \
+ PMD_SECT_AP_WRITE | \
+ PMD_SECT_AP_READ
b __xscale_setup
.long cpu_arch_name
.long cpu_elf_name
PMD_SECT_CACHEABLE | \
PMD_SECT_AP_WRITE | \
PMD_SECT_AP_READ
+ .long PMD_TYPE_SECT | \
+ PMD_SECT_AP_WRITE | \
+ PMD_SECT_AP_READ
b __xscale_setup
.long cpu_arch_name
.long cpu_elf_name
PMD_SECT_CACHEABLE | \
PMD_SECT_AP_WRITE | \
PMD_SECT_AP_READ
+ .long PMD_TYPE_SECT | \
+ PMD_SECT_AP_WRITE | \
+ PMD_SECT_AP_READ
b __xscale_setup
.long cpu_arch_name
.long cpu_elf_name