2 * This program is free software; you can redistribute it and/or modify
3 * it under the terms of the GNU General Public License, version 2, as
4 * published by the Free Software Foundation.
6 * This program is distributed in the hope that it will be useful,
7 * but WITHOUT ANY WARRANTY; without even the implied warranty of
8 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
9 * GNU General Public License for more details.
11 * You should have received a copy of the GNU General Public License
12 * along with this program; if not, write to the Free Software
13 * Foundation, 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
15 * Copyright SUSE Linux Products GmbH 2009
17 * Authors: Alexander Graf <agraf@suse.de>
20 #include <asm/ppc_asm.h>
21 #include <asm/kvm_asm.h>
24 #include <asm/asm-offsets.h>
25 #include <asm/exception-64s.h>
27 #define KVMPPC_HANDLE_EXIT .kvmppc_handle_exit
29 #define VCPU_GPR(n) (VCPU_GPRS + (n * ULONG_SIZE))
31 .macro DISABLE_INTERRUPTS
38 #define VCPU_LOAD_NVGPRS(vcpu) \
39 ld r14, VCPU_GPR(r14)(vcpu); \
40 ld r15, VCPU_GPR(r15)(vcpu); \
41 ld r16, VCPU_GPR(r16)(vcpu); \
42 ld r17, VCPU_GPR(r17)(vcpu); \
43 ld r18, VCPU_GPR(r18)(vcpu); \
44 ld r19, VCPU_GPR(r19)(vcpu); \
45 ld r20, VCPU_GPR(r20)(vcpu); \
46 ld r21, VCPU_GPR(r21)(vcpu); \
47 ld r22, VCPU_GPR(r22)(vcpu); \
48 ld r23, VCPU_GPR(r23)(vcpu); \
49 ld r24, VCPU_GPR(r24)(vcpu); \
50 ld r25, VCPU_GPR(r25)(vcpu); \
51 ld r26, VCPU_GPR(r26)(vcpu); \
52 ld r27, VCPU_GPR(r27)(vcpu); \
53 ld r28, VCPU_GPR(r28)(vcpu); \
54 ld r29, VCPU_GPR(r29)(vcpu); \
55 ld r30, VCPU_GPR(r30)(vcpu); \
56 ld r31, VCPU_GPR(r31)(vcpu); \
58 /*****************************************************************************
60 * Guest entry / exit code that is in kernel module memory (highmem) *
62 ****************************************************************************/
68 _GLOBAL(__kvmppc_vcpu_entry)
71 /* Write correct stack frame */
75 /* Save host state to the stack */
76 stdu r1, -SWITCH_FRAME_SIZE(r1)
78 /* Save r3 (kvm_run) and r4 (vcpu) */
81 /* Save non-volatile registers (r14 - r31) */
87 /* Load non-volatile guest state from the vcpu */
90 /* Save R1/R2 in the PACA */
91 std r1, PACA_KVM_HOST_R1(r13)
92 std r2, PACA_KVM_HOST_R2(r13)
94 /* XXX swap in/out on load? */
95 ld r3, VCPU_HIGHMEM_HANDLER(r4)
96 std r3, PACA_KVM_VMHANDLER(r13)
98 ld r3, VCPU_TRAMPOLINE_ENTER(r4)
99 std r3, PACA_KVM_RMHANDLER(r13)
101 kvm_start_lightweight:
103 ld r9, VCPU_PC(r4) /* r9 = vcpu->arch.pc */
104 ld r10, VCPU_SHADOW_MSR(r4) /* r10 = vcpu->arch.shadow_msr */
106 /* Load some guest state in the respective registers */
107 ld r3, VCPU_CTR(r4) /* r3 = vcpu->arch.ctr */
108 mtctr r3 /* CTR = r3 */
110 ld r3, VCPU_LR(r4) /* r3 = vcpu->arch.lr */
111 mtlr r3 /* LR = r3 */
115 /* Some guests may need to have dcbz set to 32 byte length.
117 * Usually we ensure that by patching the guest's instructions
118 * to trap on dcbz and emulate it in the hypervisor.
120 * If we can, we should tell the CPU to use 32 byte dcbz though,
121 * because that's a lot faster.
124 ld r3, VCPU_HFLAGS(r4)
125 rldicl. r3, r3, 0, 63 /* CR = ((r3 & 1) == 0) */
129 ori r3, r3, 0x80 /* XXX HID5_dcbz32 = 0x80 */
134 /* This sets the Magic value for the trampoline */
136 /* XXX this needs to move into a safe function, so we can
137 be sure we don't get any interrupts */
140 stb r11, PACA_KVM_IN_GUEST(r13)
142 ld r3, PACA_KVM_RMHANDLER(r13)
145 LOAD_REG_IMMEDIATE(r3, MSR_KERNEL & ~(MSR_IR | MSR_DR))
148 /* Jump to SLB patching handlder and into our guest */
152 * This is the handler in module memory. It gets jumped at from the
153 * lowmem trampoline code, so it's basically the guest exit code.
157 .global kvmppc_handler_highmem
158 kvmppc_handler_highmem:
161 * Register usage at this point:
163 * R0 = guest last inst
171 * PACA.KVM.* = guest *
178 /* Now save the guest state */
180 stw r0, VCPU_LAST_INST(r7)
183 std r4, VCPU_SHADOW_MSR(r7)
184 std r5, VCPU_FAULT_DEAR(r7)
185 std r6, VCPU_FAULT_DSISR(r7)
187 ld r5, VCPU_HFLAGS(r7)
188 rldicl. r5, r5, 0, 63 /* CR = ((r5 & 1) == 0) */
197 std r14, VCPU_GPR(r14)(r7)
198 std r15, VCPU_GPR(r15)(r7)
199 std r16, VCPU_GPR(r16)(r7)
200 std r17, VCPU_GPR(r17)(r7)
201 std r18, VCPU_GPR(r18)(r7)
202 std r19, VCPU_GPR(r19)(r7)
203 std r20, VCPU_GPR(r20)(r7)
204 std r21, VCPU_GPR(r21)(r7)
205 std r22, VCPU_GPR(r22)(r7)
206 std r23, VCPU_GPR(r23)(r7)
207 std r24, VCPU_GPR(r24)(r7)
208 std r25, VCPU_GPR(r25)(r7)
209 std r26, VCPU_GPR(r26)(r7)
210 std r27, VCPU_GPR(r27)(r7)
211 std r28, VCPU_GPR(r28)(r7)
212 std r29, VCPU_GPR(r29)(r7)
213 std r30, VCPU_GPR(r30)(r7)
214 std r31, VCPU_GPR(r31)(r7)
224 /* XXX convert to safe function call */
226 /* Restore host msr -> SRR1 */
227 ld r6, VCPU_HOST_MSR(r7)
230 /* Restore host IP -> SRR0 */
231 ld r5, VCPU_HOST_RETIP(r7)
235 * For some interrupts, we need to call the real Linux
236 * handler, so it can do work for us. This has to happen
237 * as if the interrupt arrived from the kernel though,
238 * so let's fake it here where most state is restored.
240 * Call Linux for hardware interrupts/decrementer
241 * r3 = address of interrupt handler (exit reason)
244 cmpwi r12, BOOK3S_INTERRUPT_EXTERNAL
245 beq call_linux_handler
246 cmpwi r12, BOOK3S_INTERRUPT_DECREMENTER
247 beq call_linux_handler
249 /* Back to Interruptable Mode! (goto kvm_return_point) */
255 * If we land here we need to jump back to the handler we
258 * We have a page that we can access from real mode, so let's
259 * jump back to that and use it as a trampoline to get back into the
262 * R3 still contains the exit code,
263 * R6 VCPU_HOST_RETIP and
269 ld r4, VCPU_TRAMPOLINE_LOWMEM(r7)
271 LOAD_REG_IMMEDIATE(r3, MSR_KERNEL & ~(MSR_IR | MSR_DR))
276 .global kvm_return_point
279 /* Jump back to lightweight entry if we're supposed to */
280 /* go back into the guest */
282 /* Pass the exit number as 3rd argument to kvmppc_handle_exit */
285 /* Restore r3 (kvm_run) and r4 (vcpu) */
287 bl KVMPPC_HANDLE_EXIT
289 /* If RESUME_GUEST, get back in the loop */
290 cmpwi r3, RESUME_GUEST
291 beq kvm_loop_lightweight
293 cmpwi r3, RESUME_GUEST_NV
294 beq kvm_loop_heavyweight
301 /* Restore non-volatile host registers (r14 - r31) */
304 addi r1, r1, SWITCH_FRAME_SIZE
307 kvm_loop_heavyweight:
310 std r4, (16 + SWITCH_FRAME_SIZE)(r1)
312 /* Load vcpu and cpu_run */
315 /* Load non-volatile guest state from the vcpu */
318 /* Jump back into the beginning of this function */
319 b kvm_start_lightweight
321 kvm_loop_lightweight:
323 /* We'll need the vcpu pointer */
326 /* Jump back into the beginning of this function */
327 b kvm_start_lightweight