d95d0d967d56f125233a8d761c7ffaeb166543d7
[safe/jmp/linux-2.6] / arch / powerpc / kvm / book3s_64_interrupts.S
1 /*
2  * This program is free software; you can redistribute it and/or modify
3  * it under the terms of the GNU General Public License, version 2, as
4  * published by the Free Software Foundation.
5  *
6  * This program is distributed in the hope that it will be useful,
7  * but WITHOUT ANY WARRANTY; without even the implied warranty of
8  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
9  * GNU General Public License for more details.
10  *
11  * You should have received a copy of the GNU General Public License
12  * along with this program; if not, write to the Free Software
13  * Foundation, 51 Franklin Street, Fifth Floor, Boston, MA  02110-1301, USA.
14  *
15  * Copyright SUSE Linux Products GmbH 2009
16  *
17  * Authors: Alexander Graf <agraf@suse.de>
18  */
19
20 #include <asm/ppc_asm.h>
21 #include <asm/kvm_asm.h>
22 #include <asm/reg.h>
23 #include <asm/page.h>
24 #include <asm/asm-offsets.h>
25 #include <asm/exception-64s.h>
26
27 #define KVMPPC_HANDLE_EXIT .kvmppc_handle_exit
28 #define ULONG_SIZE 8
29 #define VCPU_GPR(n)     (VCPU_GPRS + (n * ULONG_SIZE))
30
31 .macro mfpaca tmp_reg, src_reg, offset, vcpu_reg
32         ld      \tmp_reg, (PACA_EXMC+\offset)(r13)
33         std     \tmp_reg, VCPU_GPR(\src_reg)(\vcpu_reg)
34 .endm
35
36 .macro DISABLE_INTERRUPTS
37        mfmsr   r0
38        rldicl  r0,r0,48,1
39        rotldi  r0,r0,16
40        mtmsrd  r0,1
41 .endm
42
43 #define VCPU_LOAD_NVGPRS(vcpu) \
44         ld      r14, VCPU_GPR(r14)(vcpu); \
45         ld      r15, VCPU_GPR(r15)(vcpu); \
46         ld      r16, VCPU_GPR(r16)(vcpu); \
47         ld      r17, VCPU_GPR(r17)(vcpu); \
48         ld      r18, VCPU_GPR(r18)(vcpu); \
49         ld      r19, VCPU_GPR(r19)(vcpu); \
50         ld      r20, VCPU_GPR(r20)(vcpu); \
51         ld      r21, VCPU_GPR(r21)(vcpu); \
52         ld      r22, VCPU_GPR(r22)(vcpu); \
53         ld      r23, VCPU_GPR(r23)(vcpu); \
54         ld      r24, VCPU_GPR(r24)(vcpu); \
55         ld      r25, VCPU_GPR(r25)(vcpu); \
56         ld      r26, VCPU_GPR(r26)(vcpu); \
57         ld      r27, VCPU_GPR(r27)(vcpu); \
58         ld      r28, VCPU_GPR(r28)(vcpu); \
59         ld      r29, VCPU_GPR(r29)(vcpu); \
60         ld      r30, VCPU_GPR(r30)(vcpu); \
61         ld      r31, VCPU_GPR(r31)(vcpu); \
62
63 /*****************************************************************************
64  *                                                                           *
65  *     Guest entry / exit code that is in kernel module memory (highmem)     *
66  *                                                                           *
67  ****************************************************************************/
68
69 /* Registers:
70  *  r3: kvm_run pointer
71  *  r4: vcpu pointer
72  */
73 _GLOBAL(__kvmppc_vcpu_entry)
74
75 kvm_start_entry:
76         /* Write correct stack frame */
77         mflr    r0
78         std     r0,16(r1)
79
80         /* Save host state to the stack */
81         stdu    r1, -SWITCH_FRAME_SIZE(r1)
82
83         /* Save r3 (kvm_run) and r4 (vcpu) */
84         SAVE_2GPRS(3, r1)
85
86         /* Save non-volatile registers (r14 - r31) */
87         SAVE_NVGPRS(r1)
88
89         /* Save LR */
90         std     r0, _LINK(r1)
91
92         /* Load non-volatile guest state from the vcpu */
93         VCPU_LOAD_NVGPRS(r4)
94
95 kvm_start_lightweight:
96
97         ld      r9, VCPU_PC(r4)                 /* r9 = vcpu->arch.pc */
98         ld      r10, VCPU_SHADOW_MSR(r4)        /* r10 = vcpu->arch.shadow_msr */
99
100         DISABLE_INTERRUPTS
101
102         /* Save R1/R2 in the PACA */
103         std     r1, PACAR1(r13)
104         std     r2, (PACA_EXMC+EX_SRR0)(r13)
105         ld      r3, VCPU_HIGHMEM_HANDLER(r4)
106         std     r3, PACASAVEDMSR(r13)
107
108         ld      r3, VCPU_TRAMPOLINE_ENTER(r4)
109         mtsrr0  r3
110
111         LOAD_REG_IMMEDIATE(r3, MSR_KERNEL & ~(MSR_IR | MSR_DR))
112         mtsrr1  r3
113
114         /* Load guest state in the respective registers */
115         lwz     r3, VCPU_CR(r4)         /* r3 = vcpu->arch.cr */
116         stw     r3, (PACA_EXMC + EX_CCR)(r13)
117
118         ld      r3, VCPU_CTR(r4)        /* r3 = vcpu->arch.ctr */
119         mtctr   r3                      /* CTR = r3 */
120
121         ld      r3, VCPU_LR(r4)         /* r3 = vcpu->arch.lr */
122         mtlr    r3                      /* LR = r3 */
123
124         ld      r3, VCPU_XER(r4)        /* r3 = vcpu->arch.xer */
125         std     r3, (PACA_EXMC + EX_R3)(r13)
126
127         /* Some guests may need to have dcbz set to 32 byte length.
128          *
129          * Usually we ensure that by patching the guest's instructions
130          * to trap on dcbz and emulate it in the hypervisor.
131          *
132          * If we can, we should tell the CPU to use 32 byte dcbz though,
133          * because that's a lot faster.
134          */
135
136         ld      r3, VCPU_HFLAGS(r4)
137         rldicl. r3, r3, 0, 63           /* CR = ((r3 & 1) == 0) */
138         beq     no_dcbz32_on
139
140         mfspr   r3,SPRN_HID5
141         ori     r3, r3, 0x80            /* XXX HID5_dcbz32 = 0x80 */
142         mtspr   SPRN_HID5,r3
143
144 no_dcbz32_on:
145         /*      Load guest GPRs */
146
147         ld      r3, VCPU_GPR(r9)(r4)
148         std     r3, (PACA_EXMC + EX_R9)(r13)
149         ld      r3, VCPU_GPR(r10)(r4)
150         std     r3, (PACA_EXMC + EX_R10)(r13)
151         ld      r3, VCPU_GPR(r11)(r4)
152         std     r3, (PACA_EXMC + EX_R11)(r13)
153         ld      r3, VCPU_GPR(r12)(r4)
154         std     r3, (PACA_EXMC + EX_R12)(r13)
155         ld      r3, VCPU_GPR(r13)(r4)
156         std     r3, (PACA_EXMC + EX_R13)(r13)
157
158         ld      r0, VCPU_GPR(r0)(r4)
159         ld      r1, VCPU_GPR(r1)(r4)
160         ld      r2, VCPU_GPR(r2)(r4)
161         ld      r3, VCPU_GPR(r3)(r4)
162         ld      r5, VCPU_GPR(r5)(r4)
163         ld      r6, VCPU_GPR(r6)(r4)
164         ld      r7, VCPU_GPR(r7)(r4)
165         ld      r8, VCPU_GPR(r8)(r4)
166         ld      r4, VCPU_GPR(r4)(r4)
167
168         /* This sets the Magic value for the trampoline */
169
170         li      r11, 1
171         stb     r11, PACA_KVM_IN_GUEST(r13)
172
173         /* Jump to SLB patching handlder and into our guest */
174         RFI
175
176 /*
177  * This is the handler in module memory. It gets jumped at from the
178  * lowmem trampoline code, so it's basically the guest exit code.
179  *
180  */
181
182 .global kvmppc_handler_highmem
183 kvmppc_handler_highmem:
184
185         /*
186          * Register usage at this point:
187          *
188          * R00   = guest R13
189          * R01   = host R1
190          * R02   = host R2
191          * R10   = guest PC
192          * R11   = guest MSR
193          * R12   = exit handler id
194          * R13   = PACA
195          * PACA.exmc.R9    = guest R1
196          * PACA.exmc.R10   = guest R10
197          * PACA.exmc.R11   = guest R11
198          * PACA.exmc.R12   = guest R12
199          * PACA.exmc.R13   = guest R2
200          * PACA.exmc.DAR   = guest DAR
201          * PACA.exmc.DSISR = guest DSISR
202          * PACA.exmc.LR    = guest instruction
203          * PACA.exmc.CCR   = guest CR
204          * PACA.exmc.SRR0  = guest R0
205          *
206          */
207
208         std     r3, (PACA_EXMC+EX_R3)(r13)
209
210         /* save the exit id in R3 */
211         mr      r3, r12
212
213         /* R12 = vcpu */
214         ld      r12, GPR4(r1)
215
216         /* Now save the guest state */
217
218         std     r0, VCPU_GPR(r13)(r12)
219         std     r4, VCPU_GPR(r4)(r12)
220         std     r5, VCPU_GPR(r5)(r12)
221         std     r6, VCPU_GPR(r6)(r12)
222         std     r7, VCPU_GPR(r7)(r12)
223         std     r8, VCPU_GPR(r8)(r12)
224         std     r9, VCPU_GPR(r9)(r12)
225
226         /* get registers from PACA */
227         mfpaca  r5, r0, EX_SRR0, r12
228         mfpaca  r5, r3, EX_R3, r12
229         mfpaca  r5, r1, EX_R9, r12
230         mfpaca  r5, r10, EX_R10, r12
231         mfpaca  r5, r11, EX_R11, r12
232         mfpaca  r5, r12, EX_R12, r12
233         mfpaca  r5, r2, EX_R13, r12
234
235         lwz     r5, (PACA_EXMC+EX_LR)(r13)
236         stw     r5, VCPU_LAST_INST(r12)
237
238         lwz     r5, (PACA_EXMC+EX_CCR)(r13)
239         stw     r5, VCPU_CR(r12)
240
241         ld      r5, VCPU_HFLAGS(r12)
242         rldicl. r5, r5, 0, 63           /* CR = ((r5 & 1) == 0) */
243         beq     no_dcbz32_off
244
245         mfspr   r5,SPRN_HID5
246         rldimi  r5,r5,6,56
247         mtspr   SPRN_HID5,r5
248
249 no_dcbz32_off:
250
251         std     r14, VCPU_GPR(r14)(r12)
252         std     r15, VCPU_GPR(r15)(r12)
253         std     r16, VCPU_GPR(r16)(r12)
254         std     r17, VCPU_GPR(r17)(r12)
255         std     r18, VCPU_GPR(r18)(r12)
256         std     r19, VCPU_GPR(r19)(r12)
257         std     r20, VCPU_GPR(r20)(r12)
258         std     r21, VCPU_GPR(r21)(r12)
259         std     r22, VCPU_GPR(r22)(r12)
260         std     r23, VCPU_GPR(r23)(r12)
261         std     r24, VCPU_GPR(r24)(r12)
262         std     r25, VCPU_GPR(r25)(r12)
263         std     r26, VCPU_GPR(r26)(r12)
264         std     r27, VCPU_GPR(r27)(r12)
265         std     r28, VCPU_GPR(r28)(r12)
266         std     r29, VCPU_GPR(r29)(r12)
267         std     r30, VCPU_GPR(r30)(r12)
268         std     r31, VCPU_GPR(r31)(r12)
269
270         /* Save guest PC (R10) */
271         std     r10, VCPU_PC(r12)
272
273         /* Save guest msr (R11) */
274         std     r11, VCPU_SHADOW_MSR(r12)
275
276         /* Save guest CTR (in R12) */
277         mfctr   r5
278         std     r5, VCPU_CTR(r12)
279
280         /* Save guest LR */
281         mflr    r5
282         std     r5, VCPU_LR(r12)
283
284         /* Save guest XER */
285         mfxer   r5
286         std     r5, VCPU_XER(r12)
287
288         /* Save guest DAR */
289         ld      r5, (PACA_EXMC+EX_DAR)(r13)
290         std     r5, VCPU_FAULT_DEAR(r12)
291
292         /* Save guest DSISR */
293         lwz     r5, (PACA_EXMC+EX_DSISR)(r13)
294         std     r5, VCPU_FAULT_DSISR(r12)
295
296         /* Restore host msr -> SRR1 */
297         ld      r7, VCPU_HOST_MSR(r12)
298         mtsrr1  r7
299
300         /* Restore host IP -> SRR0 */
301         ld      r6, VCPU_HOST_RETIP(r12)
302         mtsrr0  r6
303
304         /*
305          * For some interrupts, we need to call the real Linux
306          * handler, so it can do work for us. This has to happen
307          * as if the interrupt arrived from the kernel though,
308          * so let's fake it here where most state is restored.
309          *
310          * Call Linux for hardware interrupts/decrementer
311          * r3 = address of interrupt handler (exit reason)
312          */
313
314         cmpwi   r3, BOOK3S_INTERRUPT_EXTERNAL
315         beq     call_linux_handler
316         cmpwi   r3, BOOK3S_INTERRUPT_DECREMENTER
317         beq     call_linux_handler
318
319         /* Back to Interruptable Mode! (goto kvm_return_point) */
320         RFI
321
322 call_linux_handler:
323
324         /*
325          * If we land here we need to jump back to the handler we
326          * came from.
327          *
328          * We have a page that we can access from real mode, so let's
329          * jump back to that and use it as a trampoline to get back into the
330          * interrupt handler!
331          *
332          * R3 still contains the exit code,
333          * R6 VCPU_HOST_RETIP and
334          * R7 VCPU_HOST_MSR
335          */
336
337         mtlr    r3
338
339         ld      r5, VCPU_TRAMPOLINE_LOWMEM(r12)
340         mtsrr0  r5
341         LOAD_REG_IMMEDIATE(r5, MSR_KERNEL & ~(MSR_IR | MSR_DR))
342         mtsrr1  r5
343
344         RFI
345
346 .global kvm_return_point
347 kvm_return_point:
348
349         /* Jump back to lightweight entry if we're supposed to */
350         /* go back into the guest */
351
352         /* Pass the exit number as 3rd argument to kvmppc_handle_exit */
353         mr      r5, r3
354
355         /* Restore r3 (kvm_run) and r4 (vcpu) */
356         REST_2GPRS(3, r1)
357         bl      KVMPPC_HANDLE_EXIT
358
359         /* If RESUME_GUEST, get back in the loop */
360         cmpwi   r3, RESUME_GUEST
361         beq     kvm_loop_lightweight
362
363         cmpwi   r3, RESUME_GUEST_NV
364         beq     kvm_loop_heavyweight
365
366 kvm_exit_loop:
367
368         ld      r4, _LINK(r1)
369         mtlr    r4
370
371         /* Restore non-volatile host registers (r14 - r31) */
372         REST_NVGPRS(r1)
373
374         addi    r1, r1, SWITCH_FRAME_SIZE
375         blr
376
377 kvm_loop_heavyweight:
378
379         ld      r4, _LINK(r1)
380         std     r4, (16 + SWITCH_FRAME_SIZE)(r1)
381
382         /* Load vcpu and cpu_run */
383         REST_2GPRS(3, r1)
384
385         /* Load non-volatile guest state from the vcpu */
386         VCPU_LOAD_NVGPRS(r4)
387
388         /* Jump back into the beginning of this function */
389         b       kvm_start_lightweight
390
391 kvm_loop_lightweight:
392
393         /* We'll need the vcpu pointer */
394         REST_GPR(4, r1)
395
396         /* Jump back into the beginning of this function */
397         b       kvm_start_lightweight
398