2 * This program is free software; you can redistribute it and/or modify
3 * it under the terms of the GNU General Public License, version 2, as
4 * published by the Free Software Foundation.
6 * This program is distributed in the hope that it will be useful,
7 * but WITHOUT ANY WARRANTY; without even the implied warranty of
8 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
9 * GNU General Public License for more details.
11 * You should have received a copy of the GNU General Public License
12 * along with this program; if not, write to the Free Software
13 * Foundation, 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
15 * Copyright SUSE Linux Products GmbH 2009
17 * Authors: Alexander Graf <agraf@suse.de>
20 #include <asm/ppc_asm.h>
21 #include <asm/kvm_asm.h>
24 #include <asm/asm-offsets.h>
25 #include <asm/exception-64s.h>
27 #define KVMPPC_HANDLE_EXIT .kvmppc_handle_exit
29 #define VCPU_GPR(n) (VCPU_GPRS + (n * ULONG_SIZE))
31 .macro mfpaca tmp_reg, src_reg, offset, vcpu_reg
32 ld \tmp_reg, (PACA_EXMC+\offset)(r13)
33 std \tmp_reg, VCPU_GPR(\src_reg)(\vcpu_reg)
36 .macro DISABLE_INTERRUPTS
43 #define VCPU_LOAD_NVGPRS(vcpu) \
44 ld r14, VCPU_GPR(r14)(vcpu); \
45 ld r15, VCPU_GPR(r15)(vcpu); \
46 ld r16, VCPU_GPR(r16)(vcpu); \
47 ld r17, VCPU_GPR(r17)(vcpu); \
48 ld r18, VCPU_GPR(r18)(vcpu); \
49 ld r19, VCPU_GPR(r19)(vcpu); \
50 ld r20, VCPU_GPR(r20)(vcpu); \
51 ld r21, VCPU_GPR(r21)(vcpu); \
52 ld r22, VCPU_GPR(r22)(vcpu); \
53 ld r23, VCPU_GPR(r23)(vcpu); \
54 ld r24, VCPU_GPR(r24)(vcpu); \
55 ld r25, VCPU_GPR(r25)(vcpu); \
56 ld r26, VCPU_GPR(r26)(vcpu); \
57 ld r27, VCPU_GPR(r27)(vcpu); \
58 ld r28, VCPU_GPR(r28)(vcpu); \
59 ld r29, VCPU_GPR(r29)(vcpu); \
60 ld r30, VCPU_GPR(r30)(vcpu); \
61 ld r31, VCPU_GPR(r31)(vcpu); \
63 /*****************************************************************************
65 * Guest entry / exit code that is in kernel module memory (highmem) *
67 ****************************************************************************/
73 _GLOBAL(__kvmppc_vcpu_entry)
76 /* Write correct stack frame */
80 /* Save host state to the stack */
81 stdu r1, -SWITCH_FRAME_SIZE(r1)
83 /* Save r3 (kvm_run) and r4 (vcpu) */
86 /* Save non-volatile registers (r14 - r31) */
92 /* Load non-volatile guest state from the vcpu */
95 kvm_start_lightweight:
97 ld r9, VCPU_PC(r4) /* r9 = vcpu->arch.pc */
98 ld r10, VCPU_SHADOW_MSR(r4) /* r10 = vcpu->arch.shadow_msr */
102 /* Save R1/R2 in the PACA */
104 std r2, (PACA_EXMC+EX_SRR0)(r13)
105 ld r3, VCPU_HIGHMEM_HANDLER(r4)
106 std r3, PACASAVEDMSR(r13)
108 ld r3, VCPU_TRAMPOLINE_ENTER(r4)
111 LOAD_REG_IMMEDIATE(r3, MSR_KERNEL & ~(MSR_IR | MSR_DR))
114 /* Load guest state in the respective registers */
115 lwz r3, VCPU_CR(r4) /* r3 = vcpu->arch.cr */
116 stw r3, (PACA_EXMC + EX_CCR)(r13)
118 ld r3, VCPU_CTR(r4) /* r3 = vcpu->arch.ctr */
119 mtctr r3 /* CTR = r3 */
121 ld r3, VCPU_LR(r4) /* r3 = vcpu->arch.lr */
122 mtlr r3 /* LR = r3 */
124 ld r3, VCPU_XER(r4) /* r3 = vcpu->arch.xer */
125 std r3, (PACA_EXMC + EX_R3)(r13)
127 /* Some guests may need to have dcbz set to 32 byte length.
129 * Usually we ensure that by patching the guest's instructions
130 * to trap on dcbz and emulate it in the hypervisor.
132 * If we can, we should tell the CPU to use 32 byte dcbz though,
133 * because that's a lot faster.
136 ld r3, VCPU_HFLAGS(r4)
137 rldicl. r3, r3, 0, 63 /* CR = ((r3 & 1) == 0) */
141 ori r3, r3, 0x80 /* XXX HID5_dcbz32 = 0x80 */
145 /* Load guest GPRs */
147 ld r3, VCPU_GPR(r9)(r4)
148 std r3, (PACA_EXMC + EX_R9)(r13)
149 ld r3, VCPU_GPR(r10)(r4)
150 std r3, (PACA_EXMC + EX_R10)(r13)
151 ld r3, VCPU_GPR(r11)(r4)
152 std r3, (PACA_EXMC + EX_R11)(r13)
153 ld r3, VCPU_GPR(r12)(r4)
154 std r3, (PACA_EXMC + EX_R12)(r13)
155 ld r3, VCPU_GPR(r13)(r4)
156 std r3, (PACA_EXMC + EX_R13)(r13)
158 ld r0, VCPU_GPR(r0)(r4)
159 ld r1, VCPU_GPR(r1)(r4)
160 ld r2, VCPU_GPR(r2)(r4)
161 ld r3, VCPU_GPR(r3)(r4)
162 ld r5, VCPU_GPR(r5)(r4)
163 ld r6, VCPU_GPR(r6)(r4)
164 ld r7, VCPU_GPR(r7)(r4)
165 ld r8, VCPU_GPR(r8)(r4)
166 ld r4, VCPU_GPR(r4)(r4)
168 /* This sets the Magic value for the trampoline */
171 stb r11, PACA_KVM_IN_GUEST(r13)
173 /* Jump to SLB patching handlder and into our guest */
177 * This is the handler in module memory. It gets jumped at from the
178 * lowmem trampoline code, so it's basically the guest exit code.
182 .global kvmppc_handler_highmem
183 kvmppc_handler_highmem:
186 * Register usage at this point:
193 * R12 = exit handler id
195 * PACA.exmc.R9 = guest R1
196 * PACA.exmc.R10 = guest R10
197 * PACA.exmc.R11 = guest R11
198 * PACA.exmc.R12 = guest R12
199 * PACA.exmc.R13 = guest R2
200 * PACA.exmc.DAR = guest DAR
201 * PACA.exmc.DSISR = guest DSISR
202 * PACA.exmc.LR = guest instruction
203 * PACA.exmc.CCR = guest CR
204 * PACA.exmc.SRR0 = guest R0
208 std r3, (PACA_EXMC+EX_R3)(r13)
210 /* save the exit id in R3 */
216 /* Now save the guest state */
218 std r0, VCPU_GPR(r13)(r12)
219 std r4, VCPU_GPR(r4)(r12)
220 std r5, VCPU_GPR(r5)(r12)
221 std r6, VCPU_GPR(r6)(r12)
222 std r7, VCPU_GPR(r7)(r12)
223 std r8, VCPU_GPR(r8)(r12)
224 std r9, VCPU_GPR(r9)(r12)
226 /* get registers from PACA */
227 mfpaca r5, r0, EX_SRR0, r12
228 mfpaca r5, r3, EX_R3, r12
229 mfpaca r5, r1, EX_R9, r12
230 mfpaca r5, r10, EX_R10, r12
231 mfpaca r5, r11, EX_R11, r12
232 mfpaca r5, r12, EX_R12, r12
233 mfpaca r5, r2, EX_R13, r12
235 lwz r5, (PACA_EXMC+EX_LR)(r13)
236 stw r5, VCPU_LAST_INST(r12)
238 lwz r5, (PACA_EXMC+EX_CCR)(r13)
241 ld r5, VCPU_HFLAGS(r12)
242 rldicl. r5, r5, 0, 63 /* CR = ((r5 & 1) == 0) */
251 std r14, VCPU_GPR(r14)(r12)
252 std r15, VCPU_GPR(r15)(r12)
253 std r16, VCPU_GPR(r16)(r12)
254 std r17, VCPU_GPR(r17)(r12)
255 std r18, VCPU_GPR(r18)(r12)
256 std r19, VCPU_GPR(r19)(r12)
257 std r20, VCPU_GPR(r20)(r12)
258 std r21, VCPU_GPR(r21)(r12)
259 std r22, VCPU_GPR(r22)(r12)
260 std r23, VCPU_GPR(r23)(r12)
261 std r24, VCPU_GPR(r24)(r12)
262 std r25, VCPU_GPR(r25)(r12)
263 std r26, VCPU_GPR(r26)(r12)
264 std r27, VCPU_GPR(r27)(r12)
265 std r28, VCPU_GPR(r28)(r12)
266 std r29, VCPU_GPR(r29)(r12)
267 std r30, VCPU_GPR(r30)(r12)
268 std r31, VCPU_GPR(r31)(r12)
270 /* Save guest PC (R10) */
271 std r10, VCPU_PC(r12)
273 /* Save guest msr (R11) */
274 std r11, VCPU_SHADOW_MSR(r12)
276 /* Save guest CTR (in R12) */
278 std r5, VCPU_CTR(r12)
286 std r5, VCPU_XER(r12)
289 ld r5, (PACA_EXMC+EX_DAR)(r13)
290 std r5, VCPU_FAULT_DEAR(r12)
292 /* Save guest DSISR */
293 lwz r5, (PACA_EXMC+EX_DSISR)(r13)
294 std r5, VCPU_FAULT_DSISR(r12)
296 /* Restore host msr -> SRR1 */
297 ld r7, VCPU_HOST_MSR(r12)
300 /* Restore host IP -> SRR0 */
301 ld r6, VCPU_HOST_RETIP(r12)
305 * For some interrupts, we need to call the real Linux
306 * handler, so it can do work for us. This has to happen
307 * as if the interrupt arrived from the kernel though,
308 * so let's fake it here where most state is restored.
310 * Call Linux for hardware interrupts/decrementer
311 * r3 = address of interrupt handler (exit reason)
314 cmpwi r3, BOOK3S_INTERRUPT_EXTERNAL
315 beq call_linux_handler
316 cmpwi r3, BOOK3S_INTERRUPT_DECREMENTER
317 beq call_linux_handler
319 /* Back to Interruptable Mode! (goto kvm_return_point) */
325 * If we land here we need to jump back to the handler we
328 * We have a page that we can access from real mode, so let's
329 * jump back to that and use it as a trampoline to get back into the
332 * R3 still contains the exit code,
333 * R6 VCPU_HOST_RETIP and
339 ld r5, VCPU_TRAMPOLINE_LOWMEM(r12)
341 LOAD_REG_IMMEDIATE(r5, MSR_KERNEL & ~(MSR_IR | MSR_DR))
346 .global kvm_return_point
349 /* Jump back to lightweight entry if we're supposed to */
350 /* go back into the guest */
352 /* Pass the exit number as 3rd argument to kvmppc_handle_exit */
355 /* Restore r3 (kvm_run) and r4 (vcpu) */
357 bl KVMPPC_HANDLE_EXIT
359 /* If RESUME_GUEST, get back in the loop */
360 cmpwi r3, RESUME_GUEST
361 beq kvm_loop_lightweight
363 cmpwi r3, RESUME_GUEST_NV
364 beq kvm_loop_heavyweight
371 /* Restore non-volatile host registers (r14 - r31) */
374 addi r1, r1, SWITCH_FRAME_SIZE
377 kvm_loop_heavyweight:
380 std r4, (16 + SWITCH_FRAME_SIZE)(r1)
382 /* Load vcpu and cpu_run */
385 /* Load non-volatile guest state from the vcpu */
388 /* Jump back into the beginning of this function */
389 b kvm_start_lightweight
391 kvm_loop_lightweight:
393 /* We'll need the vcpu pointer */
396 /* Jump back into the beginning of this function */
397 b kvm_start_lightweight