1 /* tsb.S: Sparc64 TSB table handling.
3 * Copyright (C) 2006 David S. Miller <davem@davemloft.net>
11 /* Invoked from TLB miss handler, we are in the
12 * MMU global registers and they are setup like
15 * %g1: TSB entry pointer
16 * %g2: available temporary
17 * %g3: FAULT_CODE_{D,I}TLB
18 * %g4: available temporary
19 * %g5: available temporary
21 * %g7: physical address base of the linux page
22 * tables for the current address space
26 mov TLB_TAG_ACCESS, %g4
27 ldxa [%g4] ASI_DMMU, %g4
28 ba,pt %xcc, tsb_miss_page_table_walk
33 mov TLB_TAG_ACCESS, %g4
34 ldxa [%g4] ASI_IMMU, %g4
35 ba,pt %xcc, tsb_miss_page_table_walk
38 tsb_miss_page_table_walk:
39 /* This clobbers %g1 and %g6, preserve them... */
48 USER_PGTABLE_WALK_TL1(%g4, %g7, %g5, %g2, tsb_do_fault)
51 TSB_LOCK_TAG(%g1, %g2, %g4)
53 /* Load and check PTE. */
54 ldxa [%g5] ASI_PHYS_USE_EC, %g5
55 brgez,a,pn %g5, tsb_do_fault
58 /* If it is larger than the base page size, don't
59 * bother putting it into the TSB.
62 sethi %hi(_PAGE_ALL_SZ_BITS >> 32), %g4
63 sethi %hi(_PAGE_SZBITS >> 32), %g7
66 bne,a,pn %xcc, tsb_tlb_reload
69 TSB_WRITE(%g1, %g5, %g6)
71 /* Finally, load TLB and return from trap. */
73 cmp %g3, FAULT_CODE_DTLB
74 bne,pn %xcc, tsb_itlb_load
78 stxa %g5, [%g0] ASI_DTLB_DATA_IN
82 stxa %g5, [%g0] ASI_ITLB_DATA_IN
85 /* No valid entry in the page tables, do full fault
91 cmp %g3, FAULT_CODE_DTLB
93 bne,pn %xcc, tsb_do_itlb_fault
94 wrpr %g5, PSTATE_AG | PSTATE_MG, %pstate
99 mov TLB_TAG_ACCESS, %g4
100 ldxa [%g4] ASI_DMMU, %g5
101 be,pt %xcc, sparc64_realfault_common
102 mov FAULT_CODE_DTLB, %g4
103 ba,pt %xcc, winfix_trampoline
108 ba,pt %xcc, sparc64_realfault_common
109 mov FAULT_CODE_ITLB, %g4
111 .globl sparc64_realfault_common
112 sparc64_realfault_common:
113 stb %g4, [%g6 + TI_FAULT_CODE] ! Save fault code
114 stx %g5, [%g6 + TI_FAULT_ADDR] ! Save fault address
115 ba,pt %xcc, etrap ! Save trap state
117 call do_sparc64_fault ! Call fault handler
118 add %sp, PTREGS_OFF, %o0 ! Compute pt_regs arg
119 ba,pt %xcc, rtrap_clr_l6 ! Restore cpu state
120 nop ! Delay slot (fill me)
122 .globl winfix_trampoline
124 rdpr %tpc, %g3 ! Prepare winfixup TNPC
125 or %g3, 0x7c, %g3 ! Compute branch offset
126 wrpr %g3, %tnpc ! Write it into TNPC
129 /* Reload MMU related context switch state at
132 * %o0: page table physical address
136 .globl tsb_context_switch
139 wrpr %o5, PSTATE_IE, %pstate
141 ldub [%g6 + TI_CPU], %o3
142 sethi %hi(trap_block), %o4
143 sllx %o3, TRAP_BLOCK_SZ_SHIFT, %o3
144 or %o4, %lo(trap_block), %o4
146 stx %o0, [%o4 + TRAP_PER_CPU_PGD_PADDR]
151 /* Lock TSB into D-TLB. */
152 sethi %hi(PAGE_SIZE), %o3
154 sethi %hi(TSBMAP_BASE), %o2
157 /* XXX handle PAGE_SIZE != 8K correctly... */
159 stxa %o2, [%g1] ASI_DMMU
162 stxa %o2, [%g1] ASI_IMMU
165 #define KERN_HIGHBITS ((_PAGE_VALID|_PAGE_SZBITS)^0xfffff80000000000)
166 #define KERN_LOWBITS (_PAGE_CP | _PAGE_CV | _PAGE_P | _PAGE_W | _PAGE_L)
167 sethi %uhi(KERN_HIGHBITS), %g2
168 or %g2, %ulo(KERN_HIGHBITS), %g2
170 or %g2, KERN_LOWBITS, %g2
176 /* We use entry 61 for this locked entry. This is the spitfire
177 * TLB entry number, and luckily cheetah masks the value with
178 * 15 ending us up with entry 13 which is what we want in that
181 * XXX Interactions with prom_world()...
183 mov TLB_TAG_ACCESS, %g1
184 stxa %o2, [%g1] ASI_DMMU
187 stxa %o1, [%g1] ASI_DTLB_DATA_ACCESS