KVM: Replace get_mt_mask_shift with get_mt_mask
[safe/jmp/linux-2.6] / arch / x86 / kvm / svm.c
1 /*
2  * Kernel-based Virtual Machine driver for Linux
3  *
4  * AMD SVM support
5  *
6  * Copyright (C) 2006 Qumranet, Inc.
7  *
8  * Authors:
9  *   Yaniv Kamay  <yaniv@qumranet.com>
10  *   Avi Kivity   <avi@qumranet.com>
11  *
12  * This work is licensed under the terms of the GNU GPL, version 2.  See
13  * the COPYING file in the top-level directory.
14  *
15  */
16 #include <linux/kvm_host.h>
17
18 #include "kvm_svm.h"
19 #include "irq.h"
20 #include "mmu.h"
21 #include "kvm_cache_regs.h"
22 #include "x86.h"
23
24 #include <linux/module.h>
25 #include <linux/kernel.h>
26 #include <linux/vmalloc.h>
27 #include <linux/highmem.h>
28 #include <linux/sched.h>
29
30 #include <asm/desc.h>
31
32 #include <asm/virtext.h>
33
34 #define __ex(x) __kvm_handle_fault_on_reboot(x)
35
36 MODULE_AUTHOR("Qumranet");
37 MODULE_LICENSE("GPL");
38
39 #define IOPM_ALLOC_ORDER 2
40 #define MSRPM_ALLOC_ORDER 1
41
42 #define SEG_TYPE_LDT 2
43 #define SEG_TYPE_BUSY_TSS16 3
44
45 #define SVM_FEATURE_NPT  (1 << 0)
46 #define SVM_FEATURE_LBRV (1 << 1)
47 #define SVM_FEATURE_SVML (1 << 2)
48
49 #define DEBUGCTL_RESERVED_BITS (~(0x3fULL))
50
51 /* Turn on to get debugging output*/
52 /* #define NESTED_DEBUG */
53
54 #ifdef NESTED_DEBUG
55 #define nsvm_printk(fmt, args...) printk(KERN_INFO fmt, ## args)
56 #else
57 #define nsvm_printk(fmt, args...) do {} while(0)
58 #endif
59
60 /* enable NPT for AMD64 and X86 with PAE */
61 #if defined(CONFIG_X86_64) || defined(CONFIG_X86_PAE)
62 static bool npt_enabled = true;
63 #else
64 static bool npt_enabled = false;
65 #endif
66 static int npt = 1;
67
68 module_param(npt, int, S_IRUGO);
69
70 static int nested = 0;
71 module_param(nested, int, S_IRUGO);
72
73 static void svm_flush_tlb(struct kvm_vcpu *vcpu);
74
75 static int nested_svm_exit_handled(struct vcpu_svm *svm, bool kvm_override);
76 static int nested_svm_vmexit(struct vcpu_svm *svm);
77 static int nested_svm_vmsave(struct vcpu_svm *svm, void *nested_vmcb,
78                              void *arg2, void *opaque);
79 static int nested_svm_check_exception(struct vcpu_svm *svm, unsigned nr,
80                                       bool has_error_code, u32 error_code);
81
82 static inline struct vcpu_svm *to_svm(struct kvm_vcpu *vcpu)
83 {
84         return container_of(vcpu, struct vcpu_svm, vcpu);
85 }
86
87 static inline bool is_nested(struct vcpu_svm *svm)
88 {
89         return svm->nested_vmcb;
90 }
91
92 static unsigned long iopm_base;
93
94 struct kvm_ldttss_desc {
95         u16 limit0;
96         u16 base0;
97         unsigned base1 : 8, type : 5, dpl : 2, p : 1;
98         unsigned limit1 : 4, zero0 : 3, g : 1, base2 : 8;
99         u32 base3;
100         u32 zero1;
101 } __attribute__((packed));
102
103 struct svm_cpu_data {
104         int cpu;
105
106         u64 asid_generation;
107         u32 max_asid;
108         u32 next_asid;
109         struct kvm_ldttss_desc *tss_desc;
110
111         struct page *save_area;
112 };
113
114 static DEFINE_PER_CPU(struct svm_cpu_data *, svm_data);
115 static uint32_t svm_features;
116
117 struct svm_init_data {
118         int cpu;
119         int r;
120 };
121
122 static u32 msrpm_ranges[] = {0, 0xc0000000, 0xc0010000};
123
124 #define NUM_MSR_MAPS ARRAY_SIZE(msrpm_ranges)
125 #define MSRS_RANGE_SIZE 2048
126 #define MSRS_IN_RANGE (MSRS_RANGE_SIZE * 8 / 2)
127
128 #define MAX_INST_SIZE 15
129
130 static inline u32 svm_has(u32 feat)
131 {
132         return svm_features & feat;
133 }
134
135 static inline void clgi(void)
136 {
137         asm volatile (__ex(SVM_CLGI));
138 }
139
140 static inline void stgi(void)
141 {
142         asm volatile (__ex(SVM_STGI));
143 }
144
145 static inline void invlpga(unsigned long addr, u32 asid)
146 {
147         asm volatile (__ex(SVM_INVLPGA) :: "a"(addr), "c"(asid));
148 }
149
150 static inline unsigned long kvm_read_cr2(void)
151 {
152         unsigned long cr2;
153
154         asm volatile ("mov %%cr2, %0" : "=r" (cr2));
155         return cr2;
156 }
157
158 static inline void kvm_write_cr2(unsigned long val)
159 {
160         asm volatile ("mov %0, %%cr2" :: "r" (val));
161 }
162
163 static inline void force_new_asid(struct kvm_vcpu *vcpu)
164 {
165         to_svm(vcpu)->asid_generation--;
166 }
167
168 static inline void flush_guest_tlb(struct kvm_vcpu *vcpu)
169 {
170         force_new_asid(vcpu);
171 }
172
173 static void svm_set_efer(struct kvm_vcpu *vcpu, u64 efer)
174 {
175         if (!npt_enabled && !(efer & EFER_LMA))
176                 efer &= ~EFER_LME;
177
178         to_svm(vcpu)->vmcb->save.efer = efer | EFER_SVME;
179         vcpu->arch.shadow_efer = efer;
180 }
181
182 static void svm_queue_exception(struct kvm_vcpu *vcpu, unsigned nr,
183                                 bool has_error_code, u32 error_code)
184 {
185         struct vcpu_svm *svm = to_svm(vcpu);
186
187         /* If we are within a nested VM we'd better #VMEXIT and let the
188            guest handle the exception */
189         if (nested_svm_check_exception(svm, nr, has_error_code, error_code))
190                 return;
191
192         svm->vmcb->control.event_inj = nr
193                 | SVM_EVTINJ_VALID
194                 | (has_error_code ? SVM_EVTINJ_VALID_ERR : 0)
195                 | SVM_EVTINJ_TYPE_EXEPT;
196         svm->vmcb->control.event_inj_err = error_code;
197 }
198
199 static int is_external_interrupt(u32 info)
200 {
201         info &= SVM_EVTINJ_TYPE_MASK | SVM_EVTINJ_VALID;
202         return info == (SVM_EVTINJ_VALID | SVM_EVTINJ_TYPE_INTR);
203 }
204
205 static void skip_emulated_instruction(struct kvm_vcpu *vcpu)
206 {
207         struct vcpu_svm *svm = to_svm(vcpu);
208
209         if (!svm->next_rip) {
210                 printk(KERN_DEBUG "%s: NOP\n", __func__);
211                 return;
212         }
213         if (svm->next_rip - kvm_rip_read(vcpu) > MAX_INST_SIZE)
214                 printk(KERN_ERR "%s: ip 0x%lx next 0x%llx\n",
215                        __func__, kvm_rip_read(vcpu), svm->next_rip);
216
217         kvm_rip_write(vcpu, svm->next_rip);
218         svm->vmcb->control.int_state &= ~SVM_INTERRUPT_SHADOW_MASK;
219 }
220
221 static int has_svm(void)
222 {
223         const char *msg;
224
225         if (!cpu_has_svm(&msg)) {
226                 printk(KERN_INFO "has_svm: %s\n", msg);
227                 return 0;
228         }
229
230         return 1;
231 }
232
233 static void svm_hardware_disable(void *garbage)
234 {
235         cpu_svm_disable();
236 }
237
238 static void svm_hardware_enable(void *garbage)
239 {
240
241         struct svm_cpu_data *svm_data;
242         uint64_t efer;
243         struct desc_ptr gdt_descr;
244         struct desc_struct *gdt;
245         int me = raw_smp_processor_id();
246
247         if (!has_svm()) {
248                 printk(KERN_ERR "svm_cpu_init: err EOPNOTSUPP on %d\n", me);
249                 return;
250         }
251         svm_data = per_cpu(svm_data, me);
252
253         if (!svm_data) {
254                 printk(KERN_ERR "svm_cpu_init: svm_data is NULL on %d\n",
255                        me);
256                 return;
257         }
258
259         svm_data->asid_generation = 1;
260         svm_data->max_asid = cpuid_ebx(SVM_CPUID_FUNC) - 1;
261         svm_data->next_asid = svm_data->max_asid + 1;
262
263         asm volatile ("sgdt %0" : "=m"(gdt_descr));
264         gdt = (struct desc_struct *)gdt_descr.address;
265         svm_data->tss_desc = (struct kvm_ldttss_desc *)(gdt + GDT_ENTRY_TSS);
266
267         rdmsrl(MSR_EFER, efer);
268         wrmsrl(MSR_EFER, efer | EFER_SVME);
269
270         wrmsrl(MSR_VM_HSAVE_PA,
271                page_to_pfn(svm_data->save_area) << PAGE_SHIFT);
272 }
273
274 static void svm_cpu_uninit(int cpu)
275 {
276         struct svm_cpu_data *svm_data
277                 = per_cpu(svm_data, raw_smp_processor_id());
278
279         if (!svm_data)
280                 return;
281
282         per_cpu(svm_data, raw_smp_processor_id()) = NULL;
283         __free_page(svm_data->save_area);
284         kfree(svm_data);
285 }
286
287 static int svm_cpu_init(int cpu)
288 {
289         struct svm_cpu_data *svm_data;
290         int r;
291
292         svm_data = kzalloc(sizeof(struct svm_cpu_data), GFP_KERNEL);
293         if (!svm_data)
294                 return -ENOMEM;
295         svm_data->cpu = cpu;
296         svm_data->save_area = alloc_page(GFP_KERNEL);
297         r = -ENOMEM;
298         if (!svm_data->save_area)
299                 goto err_1;
300
301         per_cpu(svm_data, cpu) = svm_data;
302
303         return 0;
304
305 err_1:
306         kfree(svm_data);
307         return r;
308
309 }
310
311 static void set_msr_interception(u32 *msrpm, unsigned msr,
312                                  int read, int write)
313 {
314         int i;
315
316         for (i = 0; i < NUM_MSR_MAPS; i++) {
317                 if (msr >= msrpm_ranges[i] &&
318                     msr < msrpm_ranges[i] + MSRS_IN_RANGE) {
319                         u32 msr_offset = (i * MSRS_IN_RANGE + msr -
320                                           msrpm_ranges[i]) * 2;
321
322                         u32 *base = msrpm + (msr_offset / 32);
323                         u32 msr_shift = msr_offset % 32;
324                         u32 mask = ((write) ? 0 : 2) | ((read) ? 0 : 1);
325                         *base = (*base & ~(0x3 << msr_shift)) |
326                                 (mask << msr_shift);
327                         return;
328                 }
329         }
330         BUG();
331 }
332
333 static void svm_vcpu_init_msrpm(u32 *msrpm)
334 {
335         memset(msrpm, 0xff, PAGE_SIZE * (1 << MSRPM_ALLOC_ORDER));
336
337 #ifdef CONFIG_X86_64
338         set_msr_interception(msrpm, MSR_GS_BASE, 1, 1);
339         set_msr_interception(msrpm, MSR_FS_BASE, 1, 1);
340         set_msr_interception(msrpm, MSR_KERNEL_GS_BASE, 1, 1);
341         set_msr_interception(msrpm, MSR_LSTAR, 1, 1);
342         set_msr_interception(msrpm, MSR_CSTAR, 1, 1);
343         set_msr_interception(msrpm, MSR_SYSCALL_MASK, 1, 1);
344 #endif
345         set_msr_interception(msrpm, MSR_K6_STAR, 1, 1);
346         set_msr_interception(msrpm, MSR_IA32_SYSENTER_CS, 1, 1);
347         set_msr_interception(msrpm, MSR_IA32_SYSENTER_ESP, 1, 1);
348         set_msr_interception(msrpm, MSR_IA32_SYSENTER_EIP, 1, 1);
349 }
350
351 static void svm_enable_lbrv(struct vcpu_svm *svm)
352 {
353         u32 *msrpm = svm->msrpm;
354
355         svm->vmcb->control.lbr_ctl = 1;
356         set_msr_interception(msrpm, MSR_IA32_LASTBRANCHFROMIP, 1, 1);
357         set_msr_interception(msrpm, MSR_IA32_LASTBRANCHTOIP, 1, 1);
358         set_msr_interception(msrpm, MSR_IA32_LASTINTFROMIP, 1, 1);
359         set_msr_interception(msrpm, MSR_IA32_LASTINTTOIP, 1, 1);
360 }
361
362 static void svm_disable_lbrv(struct vcpu_svm *svm)
363 {
364         u32 *msrpm = svm->msrpm;
365
366         svm->vmcb->control.lbr_ctl = 0;
367         set_msr_interception(msrpm, MSR_IA32_LASTBRANCHFROMIP, 0, 0);
368         set_msr_interception(msrpm, MSR_IA32_LASTBRANCHTOIP, 0, 0);
369         set_msr_interception(msrpm, MSR_IA32_LASTINTFROMIP, 0, 0);
370         set_msr_interception(msrpm, MSR_IA32_LASTINTTOIP, 0, 0);
371 }
372
373 static __init int svm_hardware_setup(void)
374 {
375         int cpu;
376         struct page *iopm_pages;
377         void *iopm_va;
378         int r;
379
380         iopm_pages = alloc_pages(GFP_KERNEL, IOPM_ALLOC_ORDER);
381
382         if (!iopm_pages)
383                 return -ENOMEM;
384
385         iopm_va = page_address(iopm_pages);
386         memset(iopm_va, 0xff, PAGE_SIZE * (1 << IOPM_ALLOC_ORDER));
387         iopm_base = page_to_pfn(iopm_pages) << PAGE_SHIFT;
388
389         if (boot_cpu_has(X86_FEATURE_NX))
390                 kvm_enable_efer_bits(EFER_NX);
391
392         if (boot_cpu_has(X86_FEATURE_FXSR_OPT))
393                 kvm_enable_efer_bits(EFER_FFXSR);
394
395         if (nested) {
396                 printk(KERN_INFO "kvm: Nested Virtualization enabled\n");
397                 kvm_enable_efer_bits(EFER_SVME);
398         }
399
400         for_each_online_cpu(cpu) {
401                 r = svm_cpu_init(cpu);
402                 if (r)
403                         goto err;
404         }
405
406         svm_features = cpuid_edx(SVM_CPUID_FUNC);
407
408         if (!svm_has(SVM_FEATURE_NPT))
409                 npt_enabled = false;
410
411         if (npt_enabled && !npt) {
412                 printk(KERN_INFO "kvm: Nested Paging disabled\n");
413                 npt_enabled = false;
414         }
415
416         if (npt_enabled) {
417                 printk(KERN_INFO "kvm: Nested Paging enabled\n");
418                 kvm_enable_tdp();
419         } else
420                 kvm_disable_tdp();
421
422         return 0;
423
424 err:
425         __free_pages(iopm_pages, IOPM_ALLOC_ORDER);
426         iopm_base = 0;
427         return r;
428 }
429
430 static __exit void svm_hardware_unsetup(void)
431 {
432         int cpu;
433
434         for_each_online_cpu(cpu)
435                 svm_cpu_uninit(cpu);
436
437         __free_pages(pfn_to_page(iopm_base >> PAGE_SHIFT), IOPM_ALLOC_ORDER);
438         iopm_base = 0;
439 }
440
441 static void init_seg(struct vmcb_seg *seg)
442 {
443         seg->selector = 0;
444         seg->attrib = SVM_SELECTOR_P_MASK | SVM_SELECTOR_S_MASK |
445                 SVM_SELECTOR_WRITE_MASK; /* Read/Write Data Segment */
446         seg->limit = 0xffff;
447         seg->base = 0;
448 }
449
450 static void init_sys_seg(struct vmcb_seg *seg, uint32_t type)
451 {
452         seg->selector = 0;
453         seg->attrib = SVM_SELECTOR_P_MASK | type;
454         seg->limit = 0xffff;
455         seg->base = 0;
456 }
457
458 static void init_vmcb(struct vcpu_svm *svm)
459 {
460         struct vmcb_control_area *control = &svm->vmcb->control;
461         struct vmcb_save_area *save = &svm->vmcb->save;
462
463         control->intercept_cr_read =    INTERCEPT_CR0_MASK |
464                                         INTERCEPT_CR3_MASK |
465                                         INTERCEPT_CR4_MASK;
466
467         control->intercept_cr_write =   INTERCEPT_CR0_MASK |
468                                         INTERCEPT_CR3_MASK |
469                                         INTERCEPT_CR4_MASK |
470                                         INTERCEPT_CR8_MASK;
471
472         control->intercept_dr_read =    INTERCEPT_DR0_MASK |
473                                         INTERCEPT_DR1_MASK |
474                                         INTERCEPT_DR2_MASK |
475                                         INTERCEPT_DR3_MASK;
476
477         control->intercept_dr_write =   INTERCEPT_DR0_MASK |
478                                         INTERCEPT_DR1_MASK |
479                                         INTERCEPT_DR2_MASK |
480                                         INTERCEPT_DR3_MASK |
481                                         INTERCEPT_DR5_MASK |
482                                         INTERCEPT_DR7_MASK;
483
484         control->intercept_exceptions = (1 << PF_VECTOR) |
485                                         (1 << UD_VECTOR) |
486                                         (1 << MC_VECTOR);
487
488
489         control->intercept =    (1ULL << INTERCEPT_INTR) |
490                                 (1ULL << INTERCEPT_NMI) |
491                                 (1ULL << INTERCEPT_SMI) |
492                                 (1ULL << INTERCEPT_CPUID) |
493                                 (1ULL << INTERCEPT_INVD) |
494                                 (1ULL << INTERCEPT_HLT) |
495                                 (1ULL << INTERCEPT_INVLPG) |
496                                 (1ULL << INTERCEPT_INVLPGA) |
497                                 (1ULL << INTERCEPT_IOIO_PROT) |
498                                 (1ULL << INTERCEPT_MSR_PROT) |
499                                 (1ULL << INTERCEPT_TASK_SWITCH) |
500                                 (1ULL << INTERCEPT_SHUTDOWN) |
501                                 (1ULL << INTERCEPT_VMRUN) |
502                                 (1ULL << INTERCEPT_VMMCALL) |
503                                 (1ULL << INTERCEPT_VMLOAD) |
504                                 (1ULL << INTERCEPT_VMSAVE) |
505                                 (1ULL << INTERCEPT_STGI) |
506                                 (1ULL << INTERCEPT_CLGI) |
507                                 (1ULL << INTERCEPT_SKINIT) |
508                                 (1ULL << INTERCEPT_WBINVD) |
509                                 (1ULL << INTERCEPT_MONITOR) |
510                                 (1ULL << INTERCEPT_MWAIT);
511
512         control->iopm_base_pa = iopm_base;
513         control->msrpm_base_pa = __pa(svm->msrpm);
514         control->tsc_offset = 0;
515         control->int_ctl = V_INTR_MASKING_MASK;
516
517         init_seg(&save->es);
518         init_seg(&save->ss);
519         init_seg(&save->ds);
520         init_seg(&save->fs);
521         init_seg(&save->gs);
522
523         save->cs.selector = 0xf000;
524         /* Executable/Readable Code Segment */
525         save->cs.attrib = SVM_SELECTOR_READ_MASK | SVM_SELECTOR_P_MASK |
526                 SVM_SELECTOR_S_MASK | SVM_SELECTOR_CODE_MASK;
527         save->cs.limit = 0xffff;
528         /*
529          * cs.base should really be 0xffff0000, but vmx can't handle that, so
530          * be consistent with it.
531          *
532          * Replace when we have real mode working for vmx.
533          */
534         save->cs.base = 0xf0000;
535
536         save->gdtr.limit = 0xffff;
537         save->idtr.limit = 0xffff;
538
539         init_sys_seg(&save->ldtr, SEG_TYPE_LDT);
540         init_sys_seg(&save->tr, SEG_TYPE_BUSY_TSS16);
541
542         save->efer = EFER_SVME;
543         save->dr6 = 0xffff0ff0;
544         save->dr7 = 0x400;
545         save->rflags = 2;
546         save->rip = 0x0000fff0;
547         svm->vcpu.arch.regs[VCPU_REGS_RIP] = save->rip;
548
549         /*
550          * cr0 val on cpu init should be 0x60000010, we enable cpu
551          * cache by default. the orderly way is to enable cache in bios.
552          */
553         save->cr0 = 0x00000010 | X86_CR0_PG | X86_CR0_WP;
554         save->cr4 = X86_CR4_PAE;
555         /* rdx = ?? */
556
557         if (npt_enabled) {
558                 /* Setup VMCB for Nested Paging */
559                 control->nested_ctl = 1;
560                 control->intercept &= ~((1ULL << INTERCEPT_TASK_SWITCH) |
561                                         (1ULL << INTERCEPT_INVLPG));
562                 control->intercept_exceptions &= ~(1 << PF_VECTOR);
563                 control->intercept_cr_read &= ~(INTERCEPT_CR0_MASK|
564                                                 INTERCEPT_CR3_MASK);
565                 control->intercept_cr_write &= ~(INTERCEPT_CR0_MASK|
566                                                  INTERCEPT_CR3_MASK);
567                 save->g_pat = 0x0007040600070406ULL;
568                 /* enable caching because the QEMU Bios doesn't enable it */
569                 save->cr0 = X86_CR0_ET;
570                 save->cr3 = 0;
571                 save->cr4 = 0;
572         }
573         force_new_asid(&svm->vcpu);
574
575         svm->nested_vmcb = 0;
576         svm->vcpu.arch.hflags = HF_GIF_MASK;
577 }
578
579 static int svm_vcpu_reset(struct kvm_vcpu *vcpu)
580 {
581         struct vcpu_svm *svm = to_svm(vcpu);
582
583         init_vmcb(svm);
584
585         if (vcpu->vcpu_id != 0) {
586                 kvm_rip_write(vcpu, 0);
587                 svm->vmcb->save.cs.base = svm->vcpu.arch.sipi_vector << 12;
588                 svm->vmcb->save.cs.selector = svm->vcpu.arch.sipi_vector << 8;
589         }
590         vcpu->arch.regs_avail = ~0;
591         vcpu->arch.regs_dirty = ~0;
592
593         return 0;
594 }
595
596 static struct kvm_vcpu *svm_create_vcpu(struct kvm *kvm, unsigned int id)
597 {
598         struct vcpu_svm *svm;
599         struct page *page;
600         struct page *msrpm_pages;
601         struct page *hsave_page;
602         struct page *nested_msrpm_pages;
603         int err;
604
605         svm = kmem_cache_zalloc(kvm_vcpu_cache, GFP_KERNEL);
606         if (!svm) {
607                 err = -ENOMEM;
608                 goto out;
609         }
610
611         err = kvm_vcpu_init(&svm->vcpu, kvm, id);
612         if (err)
613                 goto free_svm;
614
615         page = alloc_page(GFP_KERNEL);
616         if (!page) {
617                 err = -ENOMEM;
618                 goto uninit;
619         }
620
621         err = -ENOMEM;
622         msrpm_pages = alloc_pages(GFP_KERNEL, MSRPM_ALLOC_ORDER);
623         if (!msrpm_pages)
624                 goto uninit;
625
626         nested_msrpm_pages = alloc_pages(GFP_KERNEL, MSRPM_ALLOC_ORDER);
627         if (!nested_msrpm_pages)
628                 goto uninit;
629
630         svm->msrpm = page_address(msrpm_pages);
631         svm_vcpu_init_msrpm(svm->msrpm);
632
633         hsave_page = alloc_page(GFP_KERNEL);
634         if (!hsave_page)
635                 goto uninit;
636         svm->hsave = page_address(hsave_page);
637
638         svm->nested_msrpm = page_address(nested_msrpm_pages);
639
640         svm->vmcb = page_address(page);
641         clear_page(svm->vmcb);
642         svm->vmcb_pa = page_to_pfn(page) << PAGE_SHIFT;
643         svm->asid_generation = 0;
644         init_vmcb(svm);
645
646         fx_init(&svm->vcpu);
647         svm->vcpu.fpu_active = 1;
648         svm->vcpu.arch.apic_base = 0xfee00000 | MSR_IA32_APICBASE_ENABLE;
649         if (svm->vcpu.vcpu_id == 0)
650                 svm->vcpu.arch.apic_base |= MSR_IA32_APICBASE_BSP;
651
652         return &svm->vcpu;
653
654 uninit:
655         kvm_vcpu_uninit(&svm->vcpu);
656 free_svm:
657         kmem_cache_free(kvm_vcpu_cache, svm);
658 out:
659         return ERR_PTR(err);
660 }
661
662 static void svm_free_vcpu(struct kvm_vcpu *vcpu)
663 {
664         struct vcpu_svm *svm = to_svm(vcpu);
665
666         __free_page(pfn_to_page(svm->vmcb_pa >> PAGE_SHIFT));
667         __free_pages(virt_to_page(svm->msrpm), MSRPM_ALLOC_ORDER);
668         __free_page(virt_to_page(svm->hsave));
669         __free_pages(virt_to_page(svm->nested_msrpm), MSRPM_ALLOC_ORDER);
670         kvm_vcpu_uninit(vcpu);
671         kmem_cache_free(kvm_vcpu_cache, svm);
672 }
673
674 static void svm_vcpu_load(struct kvm_vcpu *vcpu, int cpu)
675 {
676         struct vcpu_svm *svm = to_svm(vcpu);
677         int i;
678
679         if (unlikely(cpu != vcpu->cpu)) {
680                 u64 tsc_this, delta;
681
682                 /*
683                  * Make sure that the guest sees a monotonically
684                  * increasing TSC.
685                  */
686                 rdtscll(tsc_this);
687                 delta = vcpu->arch.host_tsc - tsc_this;
688                 svm->vmcb->control.tsc_offset += delta;
689                 vcpu->cpu = cpu;
690                 kvm_migrate_timers(vcpu);
691         }
692
693         for (i = 0; i < NR_HOST_SAVE_USER_MSRS; i++)
694                 rdmsrl(host_save_user_msrs[i], svm->host_user_msrs[i]);
695 }
696
697 static void svm_vcpu_put(struct kvm_vcpu *vcpu)
698 {
699         struct vcpu_svm *svm = to_svm(vcpu);
700         int i;
701
702         ++vcpu->stat.host_state_reload;
703         for (i = 0; i < NR_HOST_SAVE_USER_MSRS; i++)
704                 wrmsrl(host_save_user_msrs[i], svm->host_user_msrs[i]);
705
706         rdtscll(vcpu->arch.host_tsc);
707 }
708
709 static unsigned long svm_get_rflags(struct kvm_vcpu *vcpu)
710 {
711         return to_svm(vcpu)->vmcb->save.rflags;
712 }
713
714 static void svm_set_rflags(struct kvm_vcpu *vcpu, unsigned long rflags)
715 {
716         to_svm(vcpu)->vmcb->save.rflags = rflags;
717 }
718
719 static void svm_set_vintr(struct vcpu_svm *svm)
720 {
721         svm->vmcb->control.intercept |= 1ULL << INTERCEPT_VINTR;
722 }
723
724 static void svm_clear_vintr(struct vcpu_svm *svm)
725 {
726         svm->vmcb->control.intercept &= ~(1ULL << INTERCEPT_VINTR);
727 }
728
729 static struct vmcb_seg *svm_seg(struct kvm_vcpu *vcpu, int seg)
730 {
731         struct vmcb_save_area *save = &to_svm(vcpu)->vmcb->save;
732
733         switch (seg) {
734         case VCPU_SREG_CS: return &save->cs;
735         case VCPU_SREG_DS: return &save->ds;
736         case VCPU_SREG_ES: return &save->es;
737         case VCPU_SREG_FS: return &save->fs;
738         case VCPU_SREG_GS: return &save->gs;
739         case VCPU_SREG_SS: return &save->ss;
740         case VCPU_SREG_TR: return &save->tr;
741         case VCPU_SREG_LDTR: return &save->ldtr;
742         }
743         BUG();
744         return NULL;
745 }
746
747 static u64 svm_get_segment_base(struct kvm_vcpu *vcpu, int seg)
748 {
749         struct vmcb_seg *s = svm_seg(vcpu, seg);
750
751         return s->base;
752 }
753
754 static void svm_get_segment(struct kvm_vcpu *vcpu,
755                             struct kvm_segment *var, int seg)
756 {
757         struct vmcb_seg *s = svm_seg(vcpu, seg);
758
759         var->base = s->base;
760         var->limit = s->limit;
761         var->selector = s->selector;
762         var->type = s->attrib & SVM_SELECTOR_TYPE_MASK;
763         var->s = (s->attrib >> SVM_SELECTOR_S_SHIFT) & 1;
764         var->dpl = (s->attrib >> SVM_SELECTOR_DPL_SHIFT) & 3;
765         var->present = (s->attrib >> SVM_SELECTOR_P_SHIFT) & 1;
766         var->avl = (s->attrib >> SVM_SELECTOR_AVL_SHIFT) & 1;
767         var->l = (s->attrib >> SVM_SELECTOR_L_SHIFT) & 1;
768         var->db = (s->attrib >> SVM_SELECTOR_DB_SHIFT) & 1;
769         var->g = (s->attrib >> SVM_SELECTOR_G_SHIFT) & 1;
770
771         /* AMD's VMCB does not have an explicit unusable field, so emulate it
772          * for cross vendor migration purposes by "not present"
773          */
774         var->unusable = !var->present || (var->type == 0);
775
776         switch (seg) {
777         case VCPU_SREG_CS:
778                 /*
779                  * SVM always stores 0 for the 'G' bit in the CS selector in
780                  * the VMCB on a VMEXIT. This hurts cross-vendor migration:
781                  * Intel's VMENTRY has a check on the 'G' bit.
782                  */
783                 var->g = s->limit > 0xfffff;
784                 break;
785         case VCPU_SREG_TR:
786                 /*
787                  * Work around a bug where the busy flag in the tr selector
788                  * isn't exposed
789                  */
790                 var->type |= 0x2;
791                 break;
792         case VCPU_SREG_DS:
793         case VCPU_SREG_ES:
794         case VCPU_SREG_FS:
795         case VCPU_SREG_GS:
796                 /*
797                  * The accessed bit must always be set in the segment
798                  * descriptor cache, although it can be cleared in the
799                  * descriptor, the cached bit always remains at 1. Since
800                  * Intel has a check on this, set it here to support
801                  * cross-vendor migration.
802                  */
803                 if (!var->unusable)
804                         var->type |= 0x1;
805                 break;
806         }
807 }
808
809 static int svm_get_cpl(struct kvm_vcpu *vcpu)
810 {
811         struct vmcb_save_area *save = &to_svm(vcpu)->vmcb->save;
812
813         return save->cpl;
814 }
815
816 static void svm_get_idt(struct kvm_vcpu *vcpu, struct descriptor_table *dt)
817 {
818         struct vcpu_svm *svm = to_svm(vcpu);
819
820         dt->limit = svm->vmcb->save.idtr.limit;
821         dt->base = svm->vmcb->save.idtr.base;
822 }
823
824 static void svm_set_idt(struct kvm_vcpu *vcpu, struct descriptor_table *dt)
825 {
826         struct vcpu_svm *svm = to_svm(vcpu);
827
828         svm->vmcb->save.idtr.limit = dt->limit;
829         svm->vmcb->save.idtr.base = dt->base ;
830 }
831
832 static void svm_get_gdt(struct kvm_vcpu *vcpu, struct descriptor_table *dt)
833 {
834         struct vcpu_svm *svm = to_svm(vcpu);
835
836         dt->limit = svm->vmcb->save.gdtr.limit;
837         dt->base = svm->vmcb->save.gdtr.base;
838 }
839
840 static void svm_set_gdt(struct kvm_vcpu *vcpu, struct descriptor_table *dt)
841 {
842         struct vcpu_svm *svm = to_svm(vcpu);
843
844         svm->vmcb->save.gdtr.limit = dt->limit;
845         svm->vmcb->save.gdtr.base = dt->base ;
846 }
847
848 static void svm_decache_cr4_guest_bits(struct kvm_vcpu *vcpu)
849 {
850 }
851
852 static void svm_set_cr0(struct kvm_vcpu *vcpu, unsigned long cr0)
853 {
854         struct vcpu_svm *svm = to_svm(vcpu);
855
856 #ifdef CONFIG_X86_64
857         if (vcpu->arch.shadow_efer & EFER_LME) {
858                 if (!is_paging(vcpu) && (cr0 & X86_CR0_PG)) {
859                         vcpu->arch.shadow_efer |= EFER_LMA;
860                         svm->vmcb->save.efer |= EFER_LMA | EFER_LME;
861                 }
862
863                 if (is_paging(vcpu) && !(cr0 & X86_CR0_PG)) {
864                         vcpu->arch.shadow_efer &= ~EFER_LMA;
865                         svm->vmcb->save.efer &= ~(EFER_LMA | EFER_LME);
866                 }
867         }
868 #endif
869         if (npt_enabled)
870                 goto set;
871
872         if ((vcpu->arch.cr0 & X86_CR0_TS) && !(cr0 & X86_CR0_TS)) {
873                 svm->vmcb->control.intercept_exceptions &= ~(1 << NM_VECTOR);
874                 vcpu->fpu_active = 1;
875         }
876
877         vcpu->arch.cr0 = cr0;
878         cr0 |= X86_CR0_PG | X86_CR0_WP;
879         if (!vcpu->fpu_active) {
880                 svm->vmcb->control.intercept_exceptions |= (1 << NM_VECTOR);
881                 cr0 |= X86_CR0_TS;
882         }
883 set:
884         /*
885          * re-enable caching here because the QEMU bios
886          * does not do it - this results in some delay at
887          * reboot
888          */
889         cr0 &= ~(X86_CR0_CD | X86_CR0_NW);
890         svm->vmcb->save.cr0 = cr0;
891 }
892
893 static void svm_set_cr4(struct kvm_vcpu *vcpu, unsigned long cr4)
894 {
895         unsigned long host_cr4_mce = read_cr4() & X86_CR4_MCE;
896         unsigned long old_cr4 = to_svm(vcpu)->vmcb->save.cr4;
897
898         if (npt_enabled && ((old_cr4 ^ cr4) & X86_CR4_PGE))
899                 force_new_asid(vcpu);
900
901         vcpu->arch.cr4 = cr4;
902         if (!npt_enabled)
903                 cr4 |= X86_CR4_PAE;
904         cr4 |= host_cr4_mce;
905         to_svm(vcpu)->vmcb->save.cr4 = cr4;
906 }
907
908 static void svm_set_segment(struct kvm_vcpu *vcpu,
909                             struct kvm_segment *var, int seg)
910 {
911         struct vcpu_svm *svm = to_svm(vcpu);
912         struct vmcb_seg *s = svm_seg(vcpu, seg);
913
914         s->base = var->base;
915         s->limit = var->limit;
916         s->selector = var->selector;
917         if (var->unusable)
918                 s->attrib = 0;
919         else {
920                 s->attrib = (var->type & SVM_SELECTOR_TYPE_MASK);
921                 s->attrib |= (var->s & 1) << SVM_SELECTOR_S_SHIFT;
922                 s->attrib |= (var->dpl & 3) << SVM_SELECTOR_DPL_SHIFT;
923                 s->attrib |= (var->present & 1) << SVM_SELECTOR_P_SHIFT;
924                 s->attrib |= (var->avl & 1) << SVM_SELECTOR_AVL_SHIFT;
925                 s->attrib |= (var->l & 1) << SVM_SELECTOR_L_SHIFT;
926                 s->attrib |= (var->db & 1) << SVM_SELECTOR_DB_SHIFT;
927                 s->attrib |= (var->g & 1) << SVM_SELECTOR_G_SHIFT;
928         }
929         if (seg == VCPU_SREG_CS)
930                 svm->vmcb->save.cpl
931                         = (svm->vmcb->save.cs.attrib
932                            >> SVM_SELECTOR_DPL_SHIFT) & 3;
933
934 }
935
936 static int svm_guest_debug(struct kvm_vcpu *vcpu, struct kvm_guest_debug *dbg)
937 {
938         int old_debug = vcpu->guest_debug;
939         struct vcpu_svm *svm = to_svm(vcpu);
940
941         vcpu->guest_debug = dbg->control;
942
943         svm->vmcb->control.intercept_exceptions &=
944                 ~((1 << DB_VECTOR) | (1 << BP_VECTOR));
945         if (vcpu->guest_debug & KVM_GUESTDBG_ENABLE) {
946                 if (vcpu->guest_debug &
947                     (KVM_GUESTDBG_SINGLESTEP | KVM_GUESTDBG_USE_HW_BP))
948                         svm->vmcb->control.intercept_exceptions |=
949                                 1 << DB_VECTOR;
950                 if (vcpu->guest_debug & KVM_GUESTDBG_USE_SW_BP)
951                         svm->vmcb->control.intercept_exceptions |=
952                                 1 << BP_VECTOR;
953         } else
954                 vcpu->guest_debug = 0;
955
956         if (vcpu->guest_debug & KVM_GUESTDBG_USE_HW_BP)
957                 svm->vmcb->save.dr7 = dbg->arch.debugreg[7];
958         else
959                 svm->vmcb->save.dr7 = vcpu->arch.dr7;
960
961         if (vcpu->guest_debug & KVM_GUESTDBG_SINGLESTEP)
962                 svm->vmcb->save.rflags |= X86_EFLAGS_TF | X86_EFLAGS_RF;
963         else if (old_debug & KVM_GUESTDBG_SINGLESTEP)
964                 svm->vmcb->save.rflags &= ~(X86_EFLAGS_TF | X86_EFLAGS_RF);
965
966         return 0;
967 }
968
969 static void load_host_msrs(struct kvm_vcpu *vcpu)
970 {
971 #ifdef CONFIG_X86_64
972         wrmsrl(MSR_GS_BASE, to_svm(vcpu)->host_gs_base);
973 #endif
974 }
975
976 static void save_host_msrs(struct kvm_vcpu *vcpu)
977 {
978 #ifdef CONFIG_X86_64
979         rdmsrl(MSR_GS_BASE, to_svm(vcpu)->host_gs_base);
980 #endif
981 }
982
983 static void new_asid(struct vcpu_svm *svm, struct svm_cpu_data *svm_data)
984 {
985         if (svm_data->next_asid > svm_data->max_asid) {
986                 ++svm_data->asid_generation;
987                 svm_data->next_asid = 1;
988                 svm->vmcb->control.tlb_ctl = TLB_CONTROL_FLUSH_ALL_ASID;
989         }
990
991         svm->vcpu.cpu = svm_data->cpu;
992         svm->asid_generation = svm_data->asid_generation;
993         svm->vmcb->control.asid = svm_data->next_asid++;
994 }
995
996 static unsigned long svm_get_dr(struct kvm_vcpu *vcpu, int dr)
997 {
998         struct vcpu_svm *svm = to_svm(vcpu);
999         unsigned long val;
1000
1001         switch (dr) {
1002         case 0 ... 3:
1003                 val = vcpu->arch.db[dr];
1004                 break;
1005         case 6:
1006                 if (vcpu->guest_debug & KVM_GUESTDBG_USE_HW_BP)
1007                         val = vcpu->arch.dr6;
1008                 else
1009                         val = svm->vmcb->save.dr6;
1010                 break;
1011         case 7:
1012                 if (vcpu->guest_debug & KVM_GUESTDBG_USE_HW_BP)
1013                         val = vcpu->arch.dr7;
1014                 else
1015                         val = svm->vmcb->save.dr7;
1016                 break;
1017         default:
1018                 val = 0;
1019         }
1020
1021         KVMTRACE_2D(DR_READ, vcpu, (u32)dr, (u32)val, handler);
1022         return val;
1023 }
1024
1025 static void svm_set_dr(struct kvm_vcpu *vcpu, int dr, unsigned long value,
1026                        int *exception)
1027 {
1028         struct vcpu_svm *svm = to_svm(vcpu);
1029
1030         KVMTRACE_2D(DR_WRITE, vcpu, (u32)dr, (u32)value, handler);
1031
1032         *exception = 0;
1033
1034         switch (dr) {
1035         case 0 ... 3:
1036                 vcpu->arch.db[dr] = value;
1037                 if (!(vcpu->guest_debug & KVM_GUESTDBG_USE_HW_BP))
1038                         vcpu->arch.eff_db[dr] = value;
1039                 return;
1040         case 4 ... 5:
1041                 if (vcpu->arch.cr4 & X86_CR4_DE)
1042                         *exception = UD_VECTOR;
1043                 return;
1044         case 6:
1045                 if (value & 0xffffffff00000000ULL) {
1046                         *exception = GP_VECTOR;
1047                         return;
1048                 }
1049                 vcpu->arch.dr6 = (value & DR6_VOLATILE) | DR6_FIXED_1;
1050                 return;
1051         case 7:
1052                 if (value & 0xffffffff00000000ULL) {
1053                         *exception = GP_VECTOR;
1054                         return;
1055                 }
1056                 vcpu->arch.dr7 = (value & DR7_VOLATILE) | DR7_FIXED_1;
1057                 if (!(vcpu->guest_debug & KVM_GUESTDBG_USE_HW_BP)) {
1058                         svm->vmcb->save.dr7 = vcpu->arch.dr7;
1059                         vcpu->arch.switch_db_regs = (value & DR7_BP_EN_MASK);
1060                 }
1061                 return;
1062         default:
1063                 /* FIXME: Possible case? */
1064                 printk(KERN_DEBUG "%s: unexpected dr %u\n",
1065                        __func__, dr);
1066                 *exception = UD_VECTOR;
1067                 return;
1068         }
1069 }
1070
1071 static int pf_interception(struct vcpu_svm *svm, struct kvm_run *kvm_run)
1072 {
1073         u64 fault_address;
1074         u32 error_code;
1075
1076         fault_address  = svm->vmcb->control.exit_info_2;
1077         error_code = svm->vmcb->control.exit_info_1;
1078
1079         if (!npt_enabled)
1080                 KVMTRACE_3D(PAGE_FAULT, &svm->vcpu, error_code,
1081                             (u32)fault_address, (u32)(fault_address >> 32),
1082                             handler);
1083         else
1084                 KVMTRACE_3D(TDP_FAULT, &svm->vcpu, error_code,
1085                             (u32)fault_address, (u32)(fault_address >> 32),
1086                             handler);
1087         /*
1088          * FIXME: Tis shouldn't be necessary here, but there is a flush
1089          * missing in the MMU code. Until we find this bug, flush the
1090          * complete TLB here on an NPF
1091          */
1092         if (npt_enabled)
1093                 svm_flush_tlb(&svm->vcpu);
1094         else {
1095                 if (svm->vcpu.arch.interrupt.pending ||
1096                                 svm->vcpu.arch.exception.pending)
1097                         kvm_mmu_unprotect_page_virt(&svm->vcpu, fault_address);
1098         }
1099         return kvm_mmu_page_fault(&svm->vcpu, fault_address, error_code);
1100 }
1101
1102 static int db_interception(struct vcpu_svm *svm, struct kvm_run *kvm_run)
1103 {
1104         if (!(svm->vcpu.guest_debug &
1105               (KVM_GUESTDBG_SINGLESTEP | KVM_GUESTDBG_USE_HW_BP))) {
1106                 kvm_queue_exception(&svm->vcpu, DB_VECTOR);
1107                 return 1;
1108         }
1109         kvm_run->exit_reason = KVM_EXIT_DEBUG;
1110         kvm_run->debug.arch.pc = svm->vmcb->save.cs.base + svm->vmcb->save.rip;
1111         kvm_run->debug.arch.exception = DB_VECTOR;
1112         return 0;
1113 }
1114
1115 static int bp_interception(struct vcpu_svm *svm, struct kvm_run *kvm_run)
1116 {
1117         kvm_run->exit_reason = KVM_EXIT_DEBUG;
1118         kvm_run->debug.arch.pc = svm->vmcb->save.cs.base + svm->vmcb->save.rip;
1119         kvm_run->debug.arch.exception = BP_VECTOR;
1120         return 0;
1121 }
1122
1123 static int ud_interception(struct vcpu_svm *svm, struct kvm_run *kvm_run)
1124 {
1125         int er;
1126
1127         er = emulate_instruction(&svm->vcpu, kvm_run, 0, 0, EMULTYPE_TRAP_UD);
1128         if (er != EMULATE_DONE)
1129                 kvm_queue_exception(&svm->vcpu, UD_VECTOR);
1130         return 1;
1131 }
1132
1133 static int nm_interception(struct vcpu_svm *svm, struct kvm_run *kvm_run)
1134 {
1135         svm->vmcb->control.intercept_exceptions &= ~(1 << NM_VECTOR);
1136         if (!(svm->vcpu.arch.cr0 & X86_CR0_TS))
1137                 svm->vmcb->save.cr0 &= ~X86_CR0_TS;
1138         svm->vcpu.fpu_active = 1;
1139
1140         return 1;
1141 }
1142
1143 static int mc_interception(struct vcpu_svm *svm, struct kvm_run *kvm_run)
1144 {
1145         /*
1146          * On an #MC intercept the MCE handler is not called automatically in
1147          * the host. So do it by hand here.
1148          */
1149         asm volatile (
1150                 "int $0x12\n");
1151         /* not sure if we ever come back to this point */
1152
1153         return 1;
1154 }
1155
1156 static int shutdown_interception(struct vcpu_svm *svm, struct kvm_run *kvm_run)
1157 {
1158         /*
1159          * VMCB is undefined after a SHUTDOWN intercept
1160          * so reinitialize it.
1161          */
1162         clear_page(svm->vmcb);
1163         init_vmcb(svm);
1164
1165         kvm_run->exit_reason = KVM_EXIT_SHUTDOWN;
1166         return 0;
1167 }
1168
1169 static int io_interception(struct vcpu_svm *svm, struct kvm_run *kvm_run)
1170 {
1171         u32 io_info = svm->vmcb->control.exit_info_1; /* address size bug? */
1172         int size, in, string;
1173         unsigned port;
1174
1175         ++svm->vcpu.stat.io_exits;
1176
1177         svm->next_rip = svm->vmcb->control.exit_info_2;
1178
1179         string = (io_info & SVM_IOIO_STR_MASK) != 0;
1180
1181         if (string) {
1182                 if (emulate_instruction(&svm->vcpu,
1183                                         kvm_run, 0, 0, 0) == EMULATE_DO_MMIO)
1184                         return 0;
1185                 return 1;
1186         }
1187
1188         in = (io_info & SVM_IOIO_TYPE_MASK) != 0;
1189         port = io_info >> 16;
1190         size = (io_info & SVM_IOIO_SIZE_MASK) >> SVM_IOIO_SIZE_SHIFT;
1191
1192         skip_emulated_instruction(&svm->vcpu);
1193         return kvm_emulate_pio(&svm->vcpu, kvm_run, in, size, port);
1194 }
1195
1196 static int nmi_interception(struct vcpu_svm *svm, struct kvm_run *kvm_run)
1197 {
1198         KVMTRACE_0D(NMI, &svm->vcpu, handler);
1199         return 1;
1200 }
1201
1202 static int intr_interception(struct vcpu_svm *svm, struct kvm_run *kvm_run)
1203 {
1204         ++svm->vcpu.stat.irq_exits;
1205         KVMTRACE_0D(INTR, &svm->vcpu, handler);
1206         return 1;
1207 }
1208
1209 static int nop_on_interception(struct vcpu_svm *svm, struct kvm_run *kvm_run)
1210 {
1211         return 1;
1212 }
1213
1214 static int halt_interception(struct vcpu_svm *svm, struct kvm_run *kvm_run)
1215 {
1216         svm->next_rip = kvm_rip_read(&svm->vcpu) + 1;
1217         skip_emulated_instruction(&svm->vcpu);
1218         return kvm_emulate_halt(&svm->vcpu);
1219 }
1220
1221 static int vmmcall_interception(struct vcpu_svm *svm, struct kvm_run *kvm_run)
1222 {
1223         svm->next_rip = kvm_rip_read(&svm->vcpu) + 3;
1224         skip_emulated_instruction(&svm->vcpu);
1225         kvm_emulate_hypercall(&svm->vcpu);
1226         return 1;
1227 }
1228
1229 static int nested_svm_check_permissions(struct vcpu_svm *svm)
1230 {
1231         if (!(svm->vcpu.arch.shadow_efer & EFER_SVME)
1232             || !is_paging(&svm->vcpu)) {
1233                 kvm_queue_exception(&svm->vcpu, UD_VECTOR);
1234                 return 1;
1235         }
1236
1237         if (svm->vmcb->save.cpl) {
1238                 kvm_inject_gp(&svm->vcpu, 0);
1239                 return 1;
1240         }
1241
1242        return 0;
1243 }
1244
1245 static int nested_svm_check_exception(struct vcpu_svm *svm, unsigned nr,
1246                                       bool has_error_code, u32 error_code)
1247 {
1248         if (is_nested(svm)) {
1249                 svm->vmcb->control.exit_code = SVM_EXIT_EXCP_BASE + nr;
1250                 svm->vmcb->control.exit_code_hi = 0;
1251                 svm->vmcb->control.exit_info_1 = error_code;
1252                 svm->vmcb->control.exit_info_2 = svm->vcpu.arch.cr2;
1253                 if (nested_svm_exit_handled(svm, false)) {
1254                         nsvm_printk("VMexit -> EXCP 0x%x\n", nr);
1255
1256                         nested_svm_vmexit(svm);
1257                         return 1;
1258                 }
1259         }
1260
1261         return 0;
1262 }
1263
1264 static inline int nested_svm_intr(struct vcpu_svm *svm)
1265 {
1266         if (is_nested(svm)) {
1267                 if (!(svm->vcpu.arch.hflags & HF_VINTR_MASK))
1268                         return 0;
1269
1270                 if (!(svm->vcpu.arch.hflags & HF_HIF_MASK))
1271                         return 0;
1272
1273                 svm->vmcb->control.exit_code = SVM_EXIT_INTR;
1274
1275                 if (nested_svm_exit_handled(svm, false)) {
1276                         nsvm_printk("VMexit -> INTR\n");
1277                         nested_svm_vmexit(svm);
1278                         return 1;
1279                 }
1280         }
1281
1282         return 0;
1283 }
1284
1285 static struct page *nested_svm_get_page(struct vcpu_svm *svm, u64 gpa)
1286 {
1287         struct page *page;
1288
1289         down_read(&current->mm->mmap_sem);
1290         page = gfn_to_page(svm->vcpu.kvm, gpa >> PAGE_SHIFT);
1291         up_read(&current->mm->mmap_sem);
1292
1293         if (is_error_page(page)) {
1294                 printk(KERN_INFO "%s: could not find page at 0x%llx\n",
1295                        __func__, gpa);
1296                 kvm_release_page_clean(page);
1297                 kvm_inject_gp(&svm->vcpu, 0);
1298                 return NULL;
1299         }
1300         return page;
1301 }
1302
1303 static int nested_svm_do(struct vcpu_svm *svm,
1304                          u64 arg1_gpa, u64 arg2_gpa, void *opaque,
1305                          int (*handler)(struct vcpu_svm *svm,
1306                                         void *arg1,
1307                                         void *arg2,
1308                                         void *opaque))
1309 {
1310         struct page *arg1_page;
1311         struct page *arg2_page = NULL;
1312         void *arg1;
1313         void *arg2 = NULL;
1314         int retval;
1315
1316         arg1_page = nested_svm_get_page(svm, arg1_gpa);
1317         if(arg1_page == NULL)
1318                 return 1;
1319
1320         if (arg2_gpa) {
1321                 arg2_page = nested_svm_get_page(svm, arg2_gpa);
1322                 if(arg2_page == NULL) {
1323                         kvm_release_page_clean(arg1_page);
1324                         return 1;
1325                 }
1326         }
1327
1328         arg1 = kmap_atomic(arg1_page, KM_USER0);
1329         if (arg2_gpa)
1330                 arg2 = kmap_atomic(arg2_page, KM_USER1);
1331
1332         retval = handler(svm, arg1, arg2, opaque);
1333
1334         kunmap_atomic(arg1, KM_USER0);
1335         if (arg2_gpa)
1336                 kunmap_atomic(arg2, KM_USER1);
1337
1338         kvm_release_page_dirty(arg1_page);
1339         if (arg2_gpa)
1340                 kvm_release_page_dirty(arg2_page);
1341
1342         return retval;
1343 }
1344
1345 static int nested_svm_exit_handled_real(struct vcpu_svm *svm,
1346                                         void *arg1,
1347                                         void *arg2,
1348                                         void *opaque)
1349 {
1350         struct vmcb *nested_vmcb = (struct vmcb *)arg1;
1351         bool kvm_overrides = *(bool *)opaque;
1352         u32 exit_code = svm->vmcb->control.exit_code;
1353
1354         if (kvm_overrides) {
1355                 switch (exit_code) {
1356                 case SVM_EXIT_INTR:
1357                 case SVM_EXIT_NMI:
1358                         return 0;
1359                 /* For now we are always handling NPFs when using them */
1360                 case SVM_EXIT_NPF:
1361                         if (npt_enabled)
1362                                 return 0;
1363                         break;
1364                 /* When we're shadowing, trap PFs */
1365                 case SVM_EXIT_EXCP_BASE + PF_VECTOR:
1366                         if (!npt_enabled)
1367                                 return 0;
1368                         break;
1369                 default:
1370                         break;
1371                 }
1372         }
1373
1374         switch (exit_code) {
1375         case SVM_EXIT_READ_CR0 ... SVM_EXIT_READ_CR8: {
1376                 u32 cr_bits = 1 << (exit_code - SVM_EXIT_READ_CR0);
1377                 if (nested_vmcb->control.intercept_cr_read & cr_bits)
1378                         return 1;
1379                 break;
1380         }
1381         case SVM_EXIT_WRITE_CR0 ... SVM_EXIT_WRITE_CR8: {
1382                 u32 cr_bits = 1 << (exit_code - SVM_EXIT_WRITE_CR0);
1383                 if (nested_vmcb->control.intercept_cr_write & cr_bits)
1384                         return 1;
1385                 break;
1386         }
1387         case SVM_EXIT_READ_DR0 ... SVM_EXIT_READ_DR7: {
1388                 u32 dr_bits = 1 << (exit_code - SVM_EXIT_READ_DR0);
1389                 if (nested_vmcb->control.intercept_dr_read & dr_bits)
1390                         return 1;
1391                 break;
1392         }
1393         case SVM_EXIT_WRITE_DR0 ... SVM_EXIT_WRITE_DR7: {
1394                 u32 dr_bits = 1 << (exit_code - SVM_EXIT_WRITE_DR0);
1395                 if (nested_vmcb->control.intercept_dr_write & dr_bits)
1396                         return 1;
1397                 break;
1398         }
1399         case SVM_EXIT_EXCP_BASE ... SVM_EXIT_EXCP_BASE + 0x1f: {
1400                 u32 excp_bits = 1 << (exit_code - SVM_EXIT_EXCP_BASE);
1401                 if (nested_vmcb->control.intercept_exceptions & excp_bits)
1402                         return 1;
1403                 break;
1404         }
1405         default: {
1406                 u64 exit_bits = 1ULL << (exit_code - SVM_EXIT_INTR);
1407                 nsvm_printk("exit code: 0x%x\n", exit_code);
1408                 if (nested_vmcb->control.intercept & exit_bits)
1409                         return 1;
1410         }
1411         }
1412
1413         return 0;
1414 }
1415
1416 static int nested_svm_exit_handled_msr(struct vcpu_svm *svm,
1417                                        void *arg1, void *arg2,
1418                                        void *opaque)
1419 {
1420         struct vmcb *nested_vmcb = (struct vmcb *)arg1;
1421         u8 *msrpm = (u8 *)arg2;
1422         u32 t0, t1;
1423         u32 msr = svm->vcpu.arch.regs[VCPU_REGS_RCX];
1424         u32 param = svm->vmcb->control.exit_info_1 & 1;
1425
1426         if (!(nested_vmcb->control.intercept & (1ULL << INTERCEPT_MSR_PROT)))
1427                 return 0;
1428
1429         switch(msr) {
1430         case 0 ... 0x1fff:
1431                 t0 = (msr * 2) % 8;
1432                 t1 = msr / 8;
1433                 break;
1434         case 0xc0000000 ... 0xc0001fff:
1435                 t0 = (8192 + msr - 0xc0000000) * 2;
1436                 t1 = (t0 / 8);
1437                 t0 %= 8;
1438                 break;
1439         case 0xc0010000 ... 0xc0011fff:
1440                 t0 = (16384 + msr - 0xc0010000) * 2;
1441                 t1 = (t0 / 8);
1442                 t0 %= 8;
1443                 break;
1444         default:
1445                 return 1;
1446                 break;
1447         }
1448         if (msrpm[t1] & ((1 << param) << t0))
1449                 return 1;
1450
1451         return 0;
1452 }
1453
1454 static int nested_svm_exit_handled(struct vcpu_svm *svm, bool kvm_override)
1455 {
1456         bool k = kvm_override;
1457
1458         switch (svm->vmcb->control.exit_code) {
1459         case SVM_EXIT_MSR:
1460                 return nested_svm_do(svm, svm->nested_vmcb,
1461                                      svm->nested_vmcb_msrpm, NULL,
1462                                      nested_svm_exit_handled_msr);
1463         default: break;
1464         }
1465
1466         return nested_svm_do(svm, svm->nested_vmcb, 0, &k,
1467                              nested_svm_exit_handled_real);
1468 }
1469
1470 static int nested_svm_vmexit_real(struct vcpu_svm *svm, void *arg1,
1471                                   void *arg2, void *opaque)
1472 {
1473         struct vmcb *nested_vmcb = (struct vmcb *)arg1;
1474         struct vmcb *hsave = svm->hsave;
1475         u64 nested_save[] = { nested_vmcb->save.cr0,
1476                               nested_vmcb->save.cr3,
1477                               nested_vmcb->save.cr4,
1478                               nested_vmcb->save.efer,
1479                               nested_vmcb->control.intercept_cr_read,
1480                               nested_vmcb->control.intercept_cr_write,
1481                               nested_vmcb->control.intercept_dr_read,
1482                               nested_vmcb->control.intercept_dr_write,
1483                               nested_vmcb->control.intercept_exceptions,
1484                               nested_vmcb->control.intercept,
1485                               nested_vmcb->control.msrpm_base_pa,
1486                               nested_vmcb->control.iopm_base_pa,
1487                               nested_vmcb->control.tsc_offset };
1488
1489         /* Give the current vmcb to the guest */
1490         memcpy(nested_vmcb, svm->vmcb, sizeof(struct vmcb));
1491         nested_vmcb->save.cr0 = nested_save[0];
1492         if (!npt_enabled)
1493                 nested_vmcb->save.cr3 = nested_save[1];
1494         nested_vmcb->save.cr4 = nested_save[2];
1495         nested_vmcb->save.efer = nested_save[3];
1496         nested_vmcb->control.intercept_cr_read = nested_save[4];
1497         nested_vmcb->control.intercept_cr_write = nested_save[5];
1498         nested_vmcb->control.intercept_dr_read = nested_save[6];
1499         nested_vmcb->control.intercept_dr_write = nested_save[7];
1500         nested_vmcb->control.intercept_exceptions = nested_save[8];
1501         nested_vmcb->control.intercept = nested_save[9];
1502         nested_vmcb->control.msrpm_base_pa = nested_save[10];
1503         nested_vmcb->control.iopm_base_pa = nested_save[11];
1504         nested_vmcb->control.tsc_offset = nested_save[12];
1505
1506         /* We always set V_INTR_MASKING and remember the old value in hflags */
1507         if (!(svm->vcpu.arch.hflags & HF_VINTR_MASK))
1508                 nested_vmcb->control.int_ctl &= ~V_INTR_MASKING_MASK;
1509
1510         if ((nested_vmcb->control.int_ctl & V_IRQ_MASK) &&
1511             (nested_vmcb->control.int_vector)) {
1512                 nsvm_printk("WARNING: IRQ 0x%x still enabled on #VMEXIT\n",
1513                                 nested_vmcb->control.int_vector);
1514         }
1515
1516         /* Restore the original control entries */
1517         svm->vmcb->control = hsave->control;
1518
1519         /* Kill any pending exceptions */
1520         if (svm->vcpu.arch.exception.pending == true)
1521                 nsvm_printk("WARNING: Pending Exception\n");
1522         svm->vcpu.arch.exception.pending = false;
1523
1524         /* Restore selected save entries */
1525         svm->vmcb->save.es = hsave->save.es;
1526         svm->vmcb->save.cs = hsave->save.cs;
1527         svm->vmcb->save.ss = hsave->save.ss;
1528         svm->vmcb->save.ds = hsave->save.ds;
1529         svm->vmcb->save.gdtr = hsave->save.gdtr;
1530         svm->vmcb->save.idtr = hsave->save.idtr;
1531         svm->vmcb->save.rflags = hsave->save.rflags;
1532         svm_set_efer(&svm->vcpu, hsave->save.efer);
1533         svm_set_cr0(&svm->vcpu, hsave->save.cr0 | X86_CR0_PE);
1534         svm_set_cr4(&svm->vcpu, hsave->save.cr4);
1535         if (npt_enabled) {
1536                 svm->vmcb->save.cr3 = hsave->save.cr3;
1537                 svm->vcpu.arch.cr3 = hsave->save.cr3;
1538         } else {
1539                 kvm_set_cr3(&svm->vcpu, hsave->save.cr3);
1540         }
1541         kvm_register_write(&svm->vcpu, VCPU_REGS_RAX, hsave->save.rax);
1542         kvm_register_write(&svm->vcpu, VCPU_REGS_RSP, hsave->save.rsp);
1543         kvm_register_write(&svm->vcpu, VCPU_REGS_RIP, hsave->save.rip);
1544         svm->vmcb->save.dr7 = 0;
1545         svm->vmcb->save.cpl = 0;
1546         svm->vmcb->control.exit_int_info = 0;
1547
1548         svm->vcpu.arch.hflags &= ~HF_GIF_MASK;
1549         /* Exit nested SVM mode */
1550         svm->nested_vmcb = 0;
1551
1552         return 0;
1553 }
1554
1555 static int nested_svm_vmexit(struct vcpu_svm *svm)
1556 {
1557         nsvm_printk("VMexit\n");
1558         if (nested_svm_do(svm, svm->nested_vmcb, 0,
1559                           NULL, nested_svm_vmexit_real))
1560                 return 1;
1561
1562         kvm_mmu_reset_context(&svm->vcpu);
1563         kvm_mmu_load(&svm->vcpu);
1564
1565         return 0;
1566 }
1567
1568 static int nested_svm_vmrun_msrpm(struct vcpu_svm *svm, void *arg1,
1569                                   void *arg2, void *opaque)
1570 {
1571         int i;
1572         u32 *nested_msrpm = (u32*)arg1;
1573         for (i=0; i< PAGE_SIZE * (1 << MSRPM_ALLOC_ORDER) / 4; i++)
1574                 svm->nested_msrpm[i] = svm->msrpm[i] | nested_msrpm[i];
1575         svm->vmcb->control.msrpm_base_pa = __pa(svm->nested_msrpm);
1576
1577         return 0;
1578 }
1579
1580 static int nested_svm_vmrun(struct vcpu_svm *svm, void *arg1,
1581                             void *arg2, void *opaque)
1582 {
1583         struct vmcb *nested_vmcb = (struct vmcb *)arg1;
1584         struct vmcb *hsave = svm->hsave;
1585
1586         /* nested_vmcb is our indicator if nested SVM is activated */
1587         svm->nested_vmcb = svm->vmcb->save.rax;
1588
1589         /* Clear internal status */
1590         svm->vcpu.arch.exception.pending = false;
1591
1592         /* Save the old vmcb, so we don't need to pick what we save, but
1593            can restore everything when a VMEXIT occurs */
1594         memcpy(hsave, svm->vmcb, sizeof(struct vmcb));
1595         /* We need to remember the original CR3 in the SPT case */
1596         if (!npt_enabled)
1597                 hsave->save.cr3 = svm->vcpu.arch.cr3;
1598         hsave->save.cr4 = svm->vcpu.arch.cr4;
1599         hsave->save.rip = svm->next_rip;
1600
1601         if (svm->vmcb->save.rflags & X86_EFLAGS_IF)
1602                 svm->vcpu.arch.hflags |= HF_HIF_MASK;
1603         else
1604                 svm->vcpu.arch.hflags &= ~HF_HIF_MASK;
1605
1606         /* Load the nested guest state */
1607         svm->vmcb->save.es = nested_vmcb->save.es;
1608         svm->vmcb->save.cs = nested_vmcb->save.cs;
1609         svm->vmcb->save.ss = nested_vmcb->save.ss;
1610         svm->vmcb->save.ds = nested_vmcb->save.ds;
1611         svm->vmcb->save.gdtr = nested_vmcb->save.gdtr;
1612         svm->vmcb->save.idtr = nested_vmcb->save.idtr;
1613         svm->vmcb->save.rflags = nested_vmcb->save.rflags;
1614         svm_set_efer(&svm->vcpu, nested_vmcb->save.efer);
1615         svm_set_cr0(&svm->vcpu, nested_vmcb->save.cr0);
1616         svm_set_cr4(&svm->vcpu, nested_vmcb->save.cr4);
1617         if (npt_enabled) {
1618                 svm->vmcb->save.cr3 = nested_vmcb->save.cr3;
1619                 svm->vcpu.arch.cr3 = nested_vmcb->save.cr3;
1620         } else {
1621                 kvm_set_cr3(&svm->vcpu, nested_vmcb->save.cr3);
1622                 kvm_mmu_reset_context(&svm->vcpu);
1623         }
1624         svm->vmcb->save.cr2 = nested_vmcb->save.cr2;
1625         kvm_register_write(&svm->vcpu, VCPU_REGS_RAX, nested_vmcb->save.rax);
1626         kvm_register_write(&svm->vcpu, VCPU_REGS_RSP, nested_vmcb->save.rsp);
1627         kvm_register_write(&svm->vcpu, VCPU_REGS_RIP, nested_vmcb->save.rip);
1628         /* In case we don't even reach vcpu_run, the fields are not updated */
1629         svm->vmcb->save.rax = nested_vmcb->save.rax;
1630         svm->vmcb->save.rsp = nested_vmcb->save.rsp;
1631         svm->vmcb->save.rip = nested_vmcb->save.rip;
1632         svm->vmcb->save.dr7 = nested_vmcb->save.dr7;
1633         svm->vmcb->save.dr6 = nested_vmcb->save.dr6;
1634         svm->vmcb->save.cpl = nested_vmcb->save.cpl;
1635
1636         /* We don't want a nested guest to be more powerful than the guest,
1637            so all intercepts are ORed */
1638         svm->vmcb->control.intercept_cr_read |=
1639                 nested_vmcb->control.intercept_cr_read;
1640         svm->vmcb->control.intercept_cr_write |=
1641                 nested_vmcb->control.intercept_cr_write;
1642         svm->vmcb->control.intercept_dr_read |=
1643                 nested_vmcb->control.intercept_dr_read;
1644         svm->vmcb->control.intercept_dr_write |=
1645                 nested_vmcb->control.intercept_dr_write;
1646         svm->vmcb->control.intercept_exceptions |=
1647                 nested_vmcb->control.intercept_exceptions;
1648
1649         svm->vmcb->control.intercept |= nested_vmcb->control.intercept;
1650
1651         svm->nested_vmcb_msrpm = nested_vmcb->control.msrpm_base_pa;
1652
1653         force_new_asid(&svm->vcpu);
1654         svm->vmcb->control.exit_int_info = nested_vmcb->control.exit_int_info;
1655         svm->vmcb->control.exit_int_info_err = nested_vmcb->control.exit_int_info_err;
1656         svm->vmcb->control.int_ctl = nested_vmcb->control.int_ctl | V_INTR_MASKING_MASK;
1657         if (nested_vmcb->control.int_ctl & V_IRQ_MASK) {
1658                 nsvm_printk("nSVM Injecting Interrupt: 0x%x\n",
1659                                 nested_vmcb->control.int_ctl);
1660         }
1661         if (nested_vmcb->control.int_ctl & V_INTR_MASKING_MASK)
1662                 svm->vcpu.arch.hflags |= HF_VINTR_MASK;
1663         else
1664                 svm->vcpu.arch.hflags &= ~HF_VINTR_MASK;
1665
1666         nsvm_printk("nSVM exit_int_info: 0x%x | int_state: 0x%x\n",
1667                         nested_vmcb->control.exit_int_info,
1668                         nested_vmcb->control.int_state);
1669
1670         svm->vmcb->control.int_vector = nested_vmcb->control.int_vector;
1671         svm->vmcb->control.int_state = nested_vmcb->control.int_state;
1672         svm->vmcb->control.tsc_offset += nested_vmcb->control.tsc_offset;
1673         if (nested_vmcb->control.event_inj & SVM_EVTINJ_VALID)
1674                 nsvm_printk("Injecting Event: 0x%x\n",
1675                                 nested_vmcb->control.event_inj);
1676         svm->vmcb->control.event_inj = nested_vmcb->control.event_inj;
1677         svm->vmcb->control.event_inj_err = nested_vmcb->control.event_inj_err;
1678
1679         svm->vcpu.arch.hflags |= HF_GIF_MASK;
1680
1681         return 0;
1682 }
1683
1684 static int nested_svm_vmloadsave(struct vmcb *from_vmcb, struct vmcb *to_vmcb)
1685 {
1686         to_vmcb->save.fs = from_vmcb->save.fs;
1687         to_vmcb->save.gs = from_vmcb->save.gs;
1688         to_vmcb->save.tr = from_vmcb->save.tr;
1689         to_vmcb->save.ldtr = from_vmcb->save.ldtr;
1690         to_vmcb->save.kernel_gs_base = from_vmcb->save.kernel_gs_base;
1691         to_vmcb->save.star = from_vmcb->save.star;
1692         to_vmcb->save.lstar = from_vmcb->save.lstar;
1693         to_vmcb->save.cstar = from_vmcb->save.cstar;
1694         to_vmcb->save.sfmask = from_vmcb->save.sfmask;
1695         to_vmcb->save.sysenter_cs = from_vmcb->save.sysenter_cs;
1696         to_vmcb->save.sysenter_esp = from_vmcb->save.sysenter_esp;
1697         to_vmcb->save.sysenter_eip = from_vmcb->save.sysenter_eip;
1698
1699         return 1;
1700 }
1701
1702 static int nested_svm_vmload(struct vcpu_svm *svm, void *nested_vmcb,
1703                              void *arg2, void *opaque)
1704 {
1705         return nested_svm_vmloadsave((struct vmcb *)nested_vmcb, svm->vmcb);
1706 }
1707
1708 static int nested_svm_vmsave(struct vcpu_svm *svm, void *nested_vmcb,
1709                              void *arg2, void *opaque)
1710 {
1711         return nested_svm_vmloadsave(svm->vmcb, (struct vmcb *)nested_vmcb);
1712 }
1713
1714 static int vmload_interception(struct vcpu_svm *svm, struct kvm_run *kvm_run)
1715 {
1716         if (nested_svm_check_permissions(svm))
1717                 return 1;
1718
1719         svm->next_rip = kvm_rip_read(&svm->vcpu) + 3;
1720         skip_emulated_instruction(&svm->vcpu);
1721
1722         nested_svm_do(svm, svm->vmcb->save.rax, 0, NULL, nested_svm_vmload);
1723
1724         return 1;
1725 }
1726
1727 static int vmsave_interception(struct vcpu_svm *svm, struct kvm_run *kvm_run)
1728 {
1729         if (nested_svm_check_permissions(svm))
1730                 return 1;
1731
1732         svm->next_rip = kvm_rip_read(&svm->vcpu) + 3;
1733         skip_emulated_instruction(&svm->vcpu);
1734
1735         nested_svm_do(svm, svm->vmcb->save.rax, 0, NULL, nested_svm_vmsave);
1736
1737         return 1;
1738 }
1739
1740 static int vmrun_interception(struct vcpu_svm *svm, struct kvm_run *kvm_run)
1741 {
1742         nsvm_printk("VMrun\n");
1743         if (nested_svm_check_permissions(svm))
1744                 return 1;
1745
1746         svm->next_rip = kvm_rip_read(&svm->vcpu) + 3;
1747         skip_emulated_instruction(&svm->vcpu);
1748
1749         if (nested_svm_do(svm, svm->vmcb->save.rax, 0,
1750                           NULL, nested_svm_vmrun))
1751                 return 1;
1752
1753         if (nested_svm_do(svm, svm->nested_vmcb_msrpm, 0,
1754                       NULL, nested_svm_vmrun_msrpm))
1755                 return 1;
1756
1757         return 1;
1758 }
1759
1760 static int stgi_interception(struct vcpu_svm *svm, struct kvm_run *kvm_run)
1761 {
1762         if (nested_svm_check_permissions(svm))
1763                 return 1;
1764
1765         svm->next_rip = kvm_rip_read(&svm->vcpu) + 3;
1766         skip_emulated_instruction(&svm->vcpu);
1767
1768         svm->vcpu.arch.hflags |= HF_GIF_MASK;
1769
1770         return 1;
1771 }
1772
1773 static int clgi_interception(struct vcpu_svm *svm, struct kvm_run *kvm_run)
1774 {
1775         if (nested_svm_check_permissions(svm))
1776                 return 1;
1777
1778         svm->next_rip = kvm_rip_read(&svm->vcpu) + 3;
1779         skip_emulated_instruction(&svm->vcpu);
1780
1781         svm->vcpu.arch.hflags &= ~HF_GIF_MASK;
1782
1783         /* After a CLGI no interrupts should come */
1784         svm_clear_vintr(svm);
1785         svm->vmcb->control.int_ctl &= ~V_IRQ_MASK;
1786
1787         return 1;
1788 }
1789
1790 static int invalid_op_interception(struct vcpu_svm *svm,
1791                                    struct kvm_run *kvm_run)
1792 {
1793         kvm_queue_exception(&svm->vcpu, UD_VECTOR);
1794         return 1;
1795 }
1796
1797 static int task_switch_interception(struct vcpu_svm *svm,
1798                                     struct kvm_run *kvm_run)
1799 {
1800         u16 tss_selector;
1801         int reason;
1802         int int_type = svm->vmcb->control.exit_int_info &
1803                 SVM_EXITINTINFO_TYPE_MASK;
1804         int int_vec = svm->vmcb->control.exit_int_info & SVM_EVTINJ_VEC_MASK;
1805
1806         tss_selector = (u16)svm->vmcb->control.exit_info_1;
1807
1808         if (svm->vmcb->control.exit_info_2 &
1809             (1ULL << SVM_EXITINFOSHIFT_TS_REASON_IRET))
1810                 reason = TASK_SWITCH_IRET;
1811         else if (svm->vmcb->control.exit_info_2 &
1812                  (1ULL << SVM_EXITINFOSHIFT_TS_REASON_JMP))
1813                 reason = TASK_SWITCH_JMP;
1814         else if (svm->vmcb->control.exit_int_info & SVM_EXITINTINFO_VALID)
1815                 reason = TASK_SWITCH_GATE;
1816         else
1817                 reason = TASK_SWITCH_CALL;
1818
1819
1820         if (reason != TASK_SWITCH_GATE ||
1821             int_type == SVM_EXITINTINFO_TYPE_SOFT ||
1822             (int_type == SVM_EXITINTINFO_TYPE_EXEPT &&
1823              (int_vec == OF_VECTOR || int_vec == BP_VECTOR))) {
1824                 if (emulate_instruction(&svm->vcpu, kvm_run, 0, 0,
1825                                         EMULTYPE_SKIP) != EMULATE_DONE)
1826                         return 0;
1827         }
1828
1829         return kvm_task_switch(&svm->vcpu, tss_selector, reason);
1830 }
1831
1832 static int cpuid_interception(struct vcpu_svm *svm, struct kvm_run *kvm_run)
1833 {
1834         svm->next_rip = kvm_rip_read(&svm->vcpu) + 2;
1835         kvm_emulate_cpuid(&svm->vcpu);
1836         return 1;
1837 }
1838
1839 static int iret_interception(struct vcpu_svm *svm, struct kvm_run *kvm_run)
1840 {
1841         ++svm->vcpu.stat.nmi_window_exits;
1842         svm->vmcb->control.intercept &= ~(1UL << INTERCEPT_IRET);
1843         svm->vcpu.arch.hflags &= ~HF_NMI_MASK;
1844         return 1;
1845 }
1846
1847 static int invlpg_interception(struct vcpu_svm *svm, struct kvm_run *kvm_run)
1848 {
1849         if (emulate_instruction(&svm->vcpu, kvm_run, 0, 0, 0) != EMULATE_DONE)
1850                 pr_unimpl(&svm->vcpu, "%s: failed\n", __func__);
1851         return 1;
1852 }
1853
1854 static int emulate_on_interception(struct vcpu_svm *svm,
1855                                    struct kvm_run *kvm_run)
1856 {
1857         if (emulate_instruction(&svm->vcpu, NULL, 0, 0, 0) != EMULATE_DONE)
1858                 pr_unimpl(&svm->vcpu, "%s: failed\n", __func__);
1859         return 1;
1860 }
1861
1862 static int cr8_write_interception(struct vcpu_svm *svm, struct kvm_run *kvm_run)
1863 {
1864         u8 cr8_prev = kvm_get_cr8(&svm->vcpu);
1865         /* instruction emulation calls kvm_set_cr8() */
1866         emulate_instruction(&svm->vcpu, NULL, 0, 0, 0);
1867         if (irqchip_in_kernel(svm->vcpu.kvm)) {
1868                 svm->vmcb->control.intercept_cr_write &= ~INTERCEPT_CR8_MASK;
1869                 return 1;
1870         }
1871         if (cr8_prev <= kvm_get_cr8(&svm->vcpu))
1872                 return 1;
1873         kvm_run->exit_reason = KVM_EXIT_SET_TPR;
1874         return 0;
1875 }
1876
1877 static int svm_get_msr(struct kvm_vcpu *vcpu, unsigned ecx, u64 *data)
1878 {
1879         struct vcpu_svm *svm = to_svm(vcpu);
1880
1881         switch (ecx) {
1882         case MSR_IA32_TIME_STAMP_COUNTER: {
1883                 u64 tsc;
1884
1885                 rdtscll(tsc);
1886                 *data = svm->vmcb->control.tsc_offset + tsc;
1887                 break;
1888         }
1889         case MSR_K6_STAR:
1890                 *data = svm->vmcb->save.star;
1891                 break;
1892 #ifdef CONFIG_X86_64
1893         case MSR_LSTAR:
1894                 *data = svm->vmcb->save.lstar;
1895                 break;
1896         case MSR_CSTAR:
1897                 *data = svm->vmcb->save.cstar;
1898                 break;
1899         case MSR_KERNEL_GS_BASE:
1900                 *data = svm->vmcb->save.kernel_gs_base;
1901                 break;
1902         case MSR_SYSCALL_MASK:
1903                 *data = svm->vmcb->save.sfmask;
1904                 break;
1905 #endif
1906         case MSR_IA32_SYSENTER_CS:
1907                 *data = svm->vmcb->save.sysenter_cs;
1908                 break;
1909         case MSR_IA32_SYSENTER_EIP:
1910                 *data = svm->vmcb->save.sysenter_eip;
1911                 break;
1912         case MSR_IA32_SYSENTER_ESP:
1913                 *data = svm->vmcb->save.sysenter_esp;
1914                 break;
1915         /* Nobody will change the following 5 values in the VMCB so
1916            we can safely return them on rdmsr. They will always be 0
1917            until LBRV is implemented. */
1918         case MSR_IA32_DEBUGCTLMSR:
1919                 *data = svm->vmcb->save.dbgctl;
1920                 break;
1921         case MSR_IA32_LASTBRANCHFROMIP:
1922                 *data = svm->vmcb->save.br_from;
1923                 break;
1924         case MSR_IA32_LASTBRANCHTOIP:
1925                 *data = svm->vmcb->save.br_to;
1926                 break;
1927         case MSR_IA32_LASTINTFROMIP:
1928                 *data = svm->vmcb->save.last_excp_from;
1929                 break;
1930         case MSR_IA32_LASTINTTOIP:
1931                 *data = svm->vmcb->save.last_excp_to;
1932                 break;
1933         case MSR_VM_HSAVE_PA:
1934                 *data = svm->hsave_msr;
1935                 break;
1936         case MSR_VM_CR:
1937                 *data = 0;
1938                 break;
1939         case MSR_IA32_UCODE_REV:
1940                 *data = 0x01000065;
1941                 break;
1942         default:
1943                 return kvm_get_msr_common(vcpu, ecx, data);
1944         }
1945         return 0;
1946 }
1947
1948 static int rdmsr_interception(struct vcpu_svm *svm, struct kvm_run *kvm_run)
1949 {
1950         u32 ecx = svm->vcpu.arch.regs[VCPU_REGS_RCX];
1951         u64 data;
1952
1953         if (svm_get_msr(&svm->vcpu, ecx, &data))
1954                 kvm_inject_gp(&svm->vcpu, 0);
1955         else {
1956                 KVMTRACE_3D(MSR_READ, &svm->vcpu, ecx, (u32)data,
1957                             (u32)(data >> 32), handler);
1958
1959                 svm->vcpu.arch.regs[VCPU_REGS_RAX] = data & 0xffffffff;
1960                 svm->vcpu.arch.regs[VCPU_REGS_RDX] = data >> 32;
1961                 svm->next_rip = kvm_rip_read(&svm->vcpu) + 2;
1962                 skip_emulated_instruction(&svm->vcpu);
1963         }
1964         return 1;
1965 }
1966
1967 static int svm_set_msr(struct kvm_vcpu *vcpu, unsigned ecx, u64 data)
1968 {
1969         struct vcpu_svm *svm = to_svm(vcpu);
1970
1971         switch (ecx) {
1972         case MSR_IA32_TIME_STAMP_COUNTER: {
1973                 u64 tsc;
1974
1975                 rdtscll(tsc);
1976                 svm->vmcb->control.tsc_offset = data - tsc;
1977                 break;
1978         }
1979         case MSR_K6_STAR:
1980                 svm->vmcb->save.star = data;
1981                 break;
1982 #ifdef CONFIG_X86_64
1983         case MSR_LSTAR:
1984                 svm->vmcb->save.lstar = data;
1985                 break;
1986         case MSR_CSTAR:
1987                 svm->vmcb->save.cstar = data;
1988                 break;
1989         case MSR_KERNEL_GS_BASE:
1990                 svm->vmcb->save.kernel_gs_base = data;
1991                 break;
1992         case MSR_SYSCALL_MASK:
1993                 svm->vmcb->save.sfmask = data;
1994                 break;
1995 #endif
1996         case MSR_IA32_SYSENTER_CS:
1997                 svm->vmcb->save.sysenter_cs = data;
1998                 break;
1999         case MSR_IA32_SYSENTER_EIP:
2000                 svm->vmcb->save.sysenter_eip = data;
2001                 break;
2002         case MSR_IA32_SYSENTER_ESP:
2003                 svm->vmcb->save.sysenter_esp = data;
2004                 break;
2005         case MSR_IA32_DEBUGCTLMSR:
2006                 if (!svm_has(SVM_FEATURE_LBRV)) {
2007                         pr_unimpl(vcpu, "%s: MSR_IA32_DEBUGCTL 0x%llx, nop\n",
2008                                         __func__, data);
2009                         break;
2010                 }
2011                 if (data & DEBUGCTL_RESERVED_BITS)
2012                         return 1;
2013
2014                 svm->vmcb->save.dbgctl = data;
2015                 if (data & (1ULL<<0))
2016                         svm_enable_lbrv(svm);
2017                 else
2018                         svm_disable_lbrv(svm);
2019                 break;
2020         case MSR_K7_EVNTSEL0:
2021         case MSR_K7_EVNTSEL1:
2022         case MSR_K7_EVNTSEL2:
2023         case MSR_K7_EVNTSEL3:
2024         case MSR_K7_PERFCTR0:
2025         case MSR_K7_PERFCTR1:
2026         case MSR_K7_PERFCTR2:
2027         case MSR_K7_PERFCTR3:
2028                 /*
2029                  * Just discard all writes to the performance counters; this
2030                  * should keep both older linux and windows 64-bit guests
2031                  * happy
2032                  */
2033                 pr_unimpl(vcpu, "unimplemented perfctr wrmsr: 0x%x data 0x%llx\n", ecx, data);
2034
2035                 break;
2036         case MSR_VM_HSAVE_PA:
2037                 svm->hsave_msr = data;
2038                 break;
2039         default:
2040                 return kvm_set_msr_common(vcpu, ecx, data);
2041         }
2042         return 0;
2043 }
2044
2045 static int wrmsr_interception(struct vcpu_svm *svm, struct kvm_run *kvm_run)
2046 {
2047         u32 ecx = svm->vcpu.arch.regs[VCPU_REGS_RCX];
2048         u64 data = (svm->vcpu.arch.regs[VCPU_REGS_RAX] & -1u)
2049                 | ((u64)(svm->vcpu.arch.regs[VCPU_REGS_RDX] & -1u) << 32);
2050
2051         KVMTRACE_3D(MSR_WRITE, &svm->vcpu, ecx, (u32)data, (u32)(data >> 32),
2052                     handler);
2053
2054         svm->next_rip = kvm_rip_read(&svm->vcpu) + 2;
2055         if (svm_set_msr(&svm->vcpu, ecx, data))
2056                 kvm_inject_gp(&svm->vcpu, 0);
2057         else
2058                 skip_emulated_instruction(&svm->vcpu);
2059         return 1;
2060 }
2061
2062 static int msr_interception(struct vcpu_svm *svm, struct kvm_run *kvm_run)
2063 {
2064         if (svm->vmcb->control.exit_info_1)
2065                 return wrmsr_interception(svm, kvm_run);
2066         else
2067                 return rdmsr_interception(svm, kvm_run);
2068 }
2069
2070 static int interrupt_window_interception(struct vcpu_svm *svm,
2071                                    struct kvm_run *kvm_run)
2072 {
2073         KVMTRACE_0D(PEND_INTR, &svm->vcpu, handler);
2074
2075         svm_clear_vintr(svm);
2076         svm->vmcb->control.int_ctl &= ~V_IRQ_MASK;
2077         /*
2078          * If the user space waits to inject interrupts, exit as soon as
2079          * possible
2080          */
2081         if (!irqchip_in_kernel(svm->vcpu.kvm) &&
2082             kvm_run->request_interrupt_window &&
2083             !kvm_cpu_has_interrupt(&svm->vcpu)) {
2084                 ++svm->vcpu.stat.irq_window_exits;
2085                 kvm_run->exit_reason = KVM_EXIT_IRQ_WINDOW_OPEN;
2086                 return 0;
2087         }
2088
2089         return 1;
2090 }
2091
2092 static int (*svm_exit_handlers[])(struct vcpu_svm *svm,
2093                                       struct kvm_run *kvm_run) = {
2094         [SVM_EXIT_READ_CR0]                     = emulate_on_interception,
2095         [SVM_EXIT_READ_CR3]                     = emulate_on_interception,
2096         [SVM_EXIT_READ_CR4]                     = emulate_on_interception,
2097         [SVM_EXIT_READ_CR8]                     = emulate_on_interception,
2098         /* for now: */
2099         [SVM_EXIT_WRITE_CR0]                    = emulate_on_interception,
2100         [SVM_EXIT_WRITE_CR3]                    = emulate_on_interception,
2101         [SVM_EXIT_WRITE_CR4]                    = emulate_on_interception,
2102         [SVM_EXIT_WRITE_CR8]                    = cr8_write_interception,
2103         [SVM_EXIT_READ_DR0]                     = emulate_on_interception,
2104         [SVM_EXIT_READ_DR1]                     = emulate_on_interception,
2105         [SVM_EXIT_READ_DR2]                     = emulate_on_interception,
2106         [SVM_EXIT_READ_DR3]                     = emulate_on_interception,
2107         [SVM_EXIT_WRITE_DR0]                    = emulate_on_interception,
2108         [SVM_EXIT_WRITE_DR1]                    = emulate_on_interception,
2109         [SVM_EXIT_WRITE_DR2]                    = emulate_on_interception,
2110         [SVM_EXIT_WRITE_DR3]                    = emulate_on_interception,
2111         [SVM_EXIT_WRITE_DR5]                    = emulate_on_interception,
2112         [SVM_EXIT_WRITE_DR7]                    = emulate_on_interception,
2113         [SVM_EXIT_EXCP_BASE + DB_VECTOR]        = db_interception,
2114         [SVM_EXIT_EXCP_BASE + BP_VECTOR]        = bp_interception,
2115         [SVM_EXIT_EXCP_BASE + UD_VECTOR]        = ud_interception,
2116         [SVM_EXIT_EXCP_BASE + PF_VECTOR]        = pf_interception,
2117         [SVM_EXIT_EXCP_BASE + NM_VECTOR]        = nm_interception,
2118         [SVM_EXIT_EXCP_BASE + MC_VECTOR]        = mc_interception,
2119         [SVM_EXIT_INTR]                         = intr_interception,
2120         [SVM_EXIT_NMI]                          = nmi_interception,
2121         [SVM_EXIT_SMI]                          = nop_on_interception,
2122         [SVM_EXIT_INIT]                         = nop_on_interception,
2123         [SVM_EXIT_VINTR]                        = interrupt_window_interception,
2124         /* [SVM_EXIT_CR0_SEL_WRITE]             = emulate_on_interception, */
2125         [SVM_EXIT_CPUID]                        = cpuid_interception,
2126         [SVM_EXIT_IRET]                         = iret_interception,
2127         [SVM_EXIT_INVD]                         = emulate_on_interception,
2128         [SVM_EXIT_HLT]                          = halt_interception,
2129         [SVM_EXIT_INVLPG]                       = invlpg_interception,
2130         [SVM_EXIT_INVLPGA]                      = invalid_op_interception,
2131         [SVM_EXIT_IOIO]                         = io_interception,
2132         [SVM_EXIT_MSR]                          = msr_interception,
2133         [SVM_EXIT_TASK_SWITCH]                  = task_switch_interception,
2134         [SVM_EXIT_SHUTDOWN]                     = shutdown_interception,
2135         [SVM_EXIT_VMRUN]                        = vmrun_interception,
2136         [SVM_EXIT_VMMCALL]                      = vmmcall_interception,
2137         [SVM_EXIT_VMLOAD]                       = vmload_interception,
2138         [SVM_EXIT_VMSAVE]                       = vmsave_interception,
2139         [SVM_EXIT_STGI]                         = stgi_interception,
2140         [SVM_EXIT_CLGI]                         = clgi_interception,
2141         [SVM_EXIT_SKINIT]                       = invalid_op_interception,
2142         [SVM_EXIT_WBINVD]                       = emulate_on_interception,
2143         [SVM_EXIT_MONITOR]                      = invalid_op_interception,
2144         [SVM_EXIT_MWAIT]                        = invalid_op_interception,
2145         [SVM_EXIT_NPF]                          = pf_interception,
2146 };
2147
2148 static int handle_exit(struct kvm_run *kvm_run, struct kvm_vcpu *vcpu)
2149 {
2150         struct vcpu_svm *svm = to_svm(vcpu);
2151         u32 exit_code = svm->vmcb->control.exit_code;
2152
2153         KVMTRACE_3D(VMEXIT, vcpu, exit_code, (u32)svm->vmcb->save.rip,
2154                     (u32)((u64)svm->vmcb->save.rip >> 32), entryexit);
2155
2156         if (is_nested(svm)) {
2157                 nsvm_printk("nested handle_exit: 0x%x | 0x%lx | 0x%lx | 0x%lx\n",
2158                             exit_code, svm->vmcb->control.exit_info_1,
2159                             svm->vmcb->control.exit_info_2, svm->vmcb->save.rip);
2160                 if (nested_svm_exit_handled(svm, true)) {
2161                         nested_svm_vmexit(svm);
2162                         nsvm_printk("-> #VMEXIT\n");
2163                         return 1;
2164                 }
2165         }
2166
2167         if (npt_enabled) {
2168                 int mmu_reload = 0;
2169                 if ((vcpu->arch.cr0 ^ svm->vmcb->save.cr0) & X86_CR0_PG) {
2170                         svm_set_cr0(vcpu, svm->vmcb->save.cr0);
2171                         mmu_reload = 1;
2172                 }
2173                 vcpu->arch.cr0 = svm->vmcb->save.cr0;
2174                 vcpu->arch.cr3 = svm->vmcb->save.cr3;
2175                 if (is_paging(vcpu) && is_pae(vcpu) && !is_long_mode(vcpu)) {
2176                         if (!load_pdptrs(vcpu, vcpu->arch.cr3)) {
2177                                 kvm_inject_gp(vcpu, 0);
2178                                 return 1;
2179                         }
2180                 }
2181                 if (mmu_reload) {
2182                         kvm_mmu_reset_context(vcpu);
2183                         kvm_mmu_load(vcpu);
2184                 }
2185         }
2186
2187
2188         if (svm->vmcb->control.exit_code == SVM_EXIT_ERR) {
2189                 kvm_run->exit_reason = KVM_EXIT_FAIL_ENTRY;
2190                 kvm_run->fail_entry.hardware_entry_failure_reason
2191                         = svm->vmcb->control.exit_code;
2192                 return 0;
2193         }
2194
2195         if (is_external_interrupt(svm->vmcb->control.exit_int_info) &&
2196             exit_code != SVM_EXIT_EXCP_BASE + PF_VECTOR &&
2197             exit_code != SVM_EXIT_NPF)
2198                 printk(KERN_ERR "%s: unexpected exit_ini_info 0x%x "
2199                        "exit_code 0x%x\n",
2200                        __func__, svm->vmcb->control.exit_int_info,
2201                        exit_code);
2202
2203         if (exit_code >= ARRAY_SIZE(svm_exit_handlers)
2204             || !svm_exit_handlers[exit_code]) {
2205                 kvm_run->exit_reason = KVM_EXIT_UNKNOWN;
2206                 kvm_run->hw.hardware_exit_reason = exit_code;
2207                 return 0;
2208         }
2209
2210         return svm_exit_handlers[exit_code](svm, kvm_run);
2211 }
2212
2213 static void reload_tss(struct kvm_vcpu *vcpu)
2214 {
2215         int cpu = raw_smp_processor_id();
2216
2217         struct svm_cpu_data *svm_data = per_cpu(svm_data, cpu);
2218         svm_data->tss_desc->type = 9; /* available 32/64-bit TSS */
2219         load_TR_desc();
2220 }
2221
2222 static void pre_svm_run(struct vcpu_svm *svm)
2223 {
2224         int cpu = raw_smp_processor_id();
2225
2226         struct svm_cpu_data *svm_data = per_cpu(svm_data, cpu);
2227
2228         svm->vmcb->control.tlb_ctl = TLB_CONTROL_DO_NOTHING;
2229         if (svm->vcpu.cpu != cpu ||
2230             svm->asid_generation != svm_data->asid_generation)
2231                 new_asid(svm, svm_data);
2232 }
2233
2234 static void svm_drop_interrupt_shadow(struct kvm_vcpu *vcpu)
2235 {
2236         struct vcpu_svm *svm = to_svm(vcpu);
2237         svm->vmcb->control.int_state &= ~SVM_INTERRUPT_SHADOW_MASK;
2238 }
2239
2240 static void svm_inject_nmi(struct kvm_vcpu *vcpu)
2241 {
2242         struct vcpu_svm *svm = to_svm(vcpu);
2243
2244         svm->vmcb->control.event_inj = SVM_EVTINJ_VALID | SVM_EVTINJ_TYPE_NMI;
2245         vcpu->arch.hflags |= HF_NMI_MASK;
2246         svm->vmcb->control.intercept |= (1UL << INTERCEPT_IRET);
2247         ++vcpu->stat.nmi_injections;
2248 }
2249
2250 static inline void svm_inject_irq(struct vcpu_svm *svm, int irq)
2251 {
2252         struct vmcb_control_area *control;
2253
2254         KVMTRACE_1D(INJ_VIRQ, &svm->vcpu, (u32)irq, handler);
2255
2256         ++svm->vcpu.stat.irq_injections;
2257         control = &svm->vmcb->control;
2258         control->int_vector = irq;
2259         control->int_ctl &= ~V_INTR_PRIO_MASK;
2260         control->int_ctl |= V_IRQ_MASK |
2261                 ((/*control->int_vector >> 4*/ 0xf) << V_INTR_PRIO_SHIFT);
2262 }
2263
2264 static void svm_queue_irq(struct kvm_vcpu *vcpu, unsigned nr)
2265 {
2266         struct vcpu_svm *svm = to_svm(vcpu);
2267
2268         svm->vmcb->control.event_inj = nr |
2269                 SVM_EVTINJ_VALID | SVM_EVTINJ_TYPE_INTR;
2270 }
2271
2272 static void svm_set_irq(struct kvm_vcpu *vcpu, int irq)
2273 {
2274         struct vcpu_svm *svm = to_svm(vcpu);
2275
2276         nested_svm_intr(svm);
2277
2278         svm_queue_irq(vcpu, irq);
2279 }
2280
2281 static void update_cr8_intercept(struct kvm_vcpu *vcpu, int tpr, int irr)
2282 {
2283         struct vcpu_svm *svm = to_svm(vcpu);
2284
2285         if (irr == -1)
2286                 return;
2287
2288         if (tpr >= irr)
2289                 svm->vmcb->control.intercept_cr_write |= INTERCEPT_CR8_MASK;
2290 }
2291
2292 static int svm_nmi_allowed(struct kvm_vcpu *vcpu)
2293 {
2294         struct vcpu_svm *svm = to_svm(vcpu);
2295         struct vmcb *vmcb = svm->vmcb;
2296         return !(vmcb->control.int_state & SVM_INTERRUPT_SHADOW_MASK) &&
2297                 !(svm->vcpu.arch.hflags & HF_NMI_MASK);
2298 }
2299
2300 static int svm_interrupt_allowed(struct kvm_vcpu *vcpu)
2301 {
2302         struct vcpu_svm *svm = to_svm(vcpu);
2303         struct vmcb *vmcb = svm->vmcb;
2304         return (vmcb->save.rflags & X86_EFLAGS_IF) &&
2305                 !(vmcb->control.int_state & SVM_INTERRUPT_SHADOW_MASK) &&
2306                 (svm->vcpu.arch.hflags & HF_GIF_MASK);
2307 }
2308
2309 static void enable_irq_window(struct kvm_vcpu *vcpu)
2310 {
2311         svm_set_vintr(to_svm(vcpu));
2312         svm_inject_irq(to_svm(vcpu), 0x0);
2313 }
2314
2315 static void enable_nmi_window(struct kvm_vcpu *vcpu)
2316 {
2317         struct vcpu_svm *svm = to_svm(vcpu);
2318
2319         if (svm->vmcb->control.int_state & SVM_INTERRUPT_SHADOW_MASK)
2320                 enable_irq_window(vcpu);
2321 }
2322
2323 static int svm_set_tss_addr(struct kvm *kvm, unsigned int addr)
2324 {
2325         return 0;
2326 }
2327
2328 static void svm_flush_tlb(struct kvm_vcpu *vcpu)
2329 {
2330         force_new_asid(vcpu);
2331 }
2332
2333 static void svm_prepare_guest_switch(struct kvm_vcpu *vcpu)
2334 {
2335 }
2336
2337 static inline void sync_cr8_to_lapic(struct kvm_vcpu *vcpu)
2338 {
2339         struct vcpu_svm *svm = to_svm(vcpu);
2340
2341         if (!(svm->vmcb->control.intercept_cr_write & INTERCEPT_CR8_MASK)) {
2342                 int cr8 = svm->vmcb->control.int_ctl & V_TPR_MASK;
2343                 kvm_set_cr8(vcpu, cr8);
2344         }
2345 }
2346
2347 static inline void sync_lapic_to_cr8(struct kvm_vcpu *vcpu)
2348 {
2349         struct vcpu_svm *svm = to_svm(vcpu);
2350         u64 cr8;
2351
2352         cr8 = kvm_get_cr8(vcpu);
2353         svm->vmcb->control.int_ctl &= ~V_TPR_MASK;
2354         svm->vmcb->control.int_ctl |= cr8 & V_TPR_MASK;
2355 }
2356
2357 static void svm_complete_interrupts(struct vcpu_svm *svm)
2358 {
2359         u8 vector;
2360         int type;
2361         u32 exitintinfo = svm->vmcb->control.exit_int_info;
2362
2363         svm->vcpu.arch.nmi_injected = false;
2364         kvm_clear_exception_queue(&svm->vcpu);
2365         kvm_clear_interrupt_queue(&svm->vcpu);
2366
2367         if (!(exitintinfo & SVM_EXITINTINFO_VALID))
2368                 return;
2369
2370         vector = exitintinfo & SVM_EXITINTINFO_VEC_MASK;
2371         type = exitintinfo & SVM_EXITINTINFO_TYPE_MASK;
2372
2373         switch (type) {
2374         case SVM_EXITINTINFO_TYPE_NMI:
2375                 svm->vcpu.arch.nmi_injected = true;
2376                 break;
2377         case SVM_EXITINTINFO_TYPE_EXEPT:
2378                 /* In case of software exception do not reinject an exception
2379                    vector, but re-execute and instruction instead */
2380                 if (vector == BP_VECTOR || vector == OF_VECTOR)
2381                         break;
2382                 if (exitintinfo & SVM_EXITINTINFO_VALID_ERR) {
2383                         u32 err = svm->vmcb->control.exit_int_info_err;
2384                         kvm_queue_exception_e(&svm->vcpu, vector, err);
2385
2386                 } else
2387                         kvm_queue_exception(&svm->vcpu, vector);
2388                 break;
2389         case SVM_EXITINTINFO_TYPE_INTR:
2390                 kvm_queue_interrupt(&svm->vcpu, vector);
2391                 break;
2392         default:
2393                 break;
2394         }
2395 }
2396
2397 #ifdef CONFIG_X86_64
2398 #define R "r"
2399 #else
2400 #define R "e"
2401 #endif
2402
2403 static void svm_vcpu_run(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run)
2404 {
2405         struct vcpu_svm *svm = to_svm(vcpu);
2406         u16 fs_selector;
2407         u16 gs_selector;
2408         u16 ldt_selector;
2409
2410         svm->vmcb->save.rax = vcpu->arch.regs[VCPU_REGS_RAX];
2411         svm->vmcb->save.rsp = vcpu->arch.regs[VCPU_REGS_RSP];
2412         svm->vmcb->save.rip = vcpu->arch.regs[VCPU_REGS_RIP];
2413
2414         pre_svm_run(svm);
2415
2416         sync_lapic_to_cr8(vcpu);
2417
2418         save_host_msrs(vcpu);
2419         fs_selector = kvm_read_fs();
2420         gs_selector = kvm_read_gs();
2421         ldt_selector = kvm_read_ldt();
2422         svm->host_cr2 = kvm_read_cr2();
2423         if (!is_nested(svm))
2424                 svm->vmcb->save.cr2 = vcpu->arch.cr2;
2425         /* required for live migration with NPT */
2426         if (npt_enabled)
2427                 svm->vmcb->save.cr3 = vcpu->arch.cr3;
2428
2429         clgi();
2430
2431         local_irq_enable();
2432
2433         asm volatile (
2434                 "push %%"R"bp; \n\t"
2435                 "mov %c[rbx](%[svm]), %%"R"bx \n\t"
2436                 "mov %c[rcx](%[svm]), %%"R"cx \n\t"
2437                 "mov %c[rdx](%[svm]), %%"R"dx \n\t"
2438                 "mov %c[rsi](%[svm]), %%"R"si \n\t"
2439                 "mov %c[rdi](%[svm]), %%"R"di \n\t"
2440                 "mov %c[rbp](%[svm]), %%"R"bp \n\t"
2441 #ifdef CONFIG_X86_64
2442                 "mov %c[r8](%[svm]),  %%r8  \n\t"
2443                 "mov %c[r9](%[svm]),  %%r9  \n\t"
2444                 "mov %c[r10](%[svm]), %%r10 \n\t"
2445                 "mov %c[r11](%[svm]), %%r11 \n\t"
2446                 "mov %c[r12](%[svm]), %%r12 \n\t"
2447                 "mov %c[r13](%[svm]), %%r13 \n\t"
2448                 "mov %c[r14](%[svm]), %%r14 \n\t"
2449                 "mov %c[r15](%[svm]), %%r15 \n\t"
2450 #endif
2451
2452                 /* Enter guest mode */
2453                 "push %%"R"ax \n\t"
2454                 "mov %c[vmcb](%[svm]), %%"R"ax \n\t"
2455                 __ex(SVM_VMLOAD) "\n\t"
2456                 __ex(SVM_VMRUN) "\n\t"
2457                 __ex(SVM_VMSAVE) "\n\t"
2458                 "pop %%"R"ax \n\t"
2459
2460                 /* Save guest registers, load host registers */
2461                 "mov %%"R"bx, %c[rbx](%[svm]) \n\t"
2462                 "mov %%"R"cx, %c[rcx](%[svm]) \n\t"
2463                 "mov %%"R"dx, %c[rdx](%[svm]) \n\t"
2464                 "mov %%"R"si, %c[rsi](%[svm]) \n\t"
2465                 "mov %%"R"di, %c[rdi](%[svm]) \n\t"
2466                 "mov %%"R"bp, %c[rbp](%[svm]) \n\t"
2467 #ifdef CONFIG_X86_64
2468                 "mov %%r8,  %c[r8](%[svm]) \n\t"
2469                 "mov %%r9,  %c[r9](%[svm]) \n\t"
2470                 "mov %%r10, %c[r10](%[svm]) \n\t"
2471                 "mov %%r11, %c[r11](%[svm]) \n\t"
2472                 "mov %%r12, %c[r12](%[svm]) \n\t"
2473                 "mov %%r13, %c[r13](%[svm]) \n\t"
2474                 "mov %%r14, %c[r14](%[svm]) \n\t"
2475                 "mov %%r15, %c[r15](%[svm]) \n\t"
2476 #endif
2477                 "pop %%"R"bp"
2478                 :
2479                 : [svm]"a"(svm),
2480                   [vmcb]"i"(offsetof(struct vcpu_svm, vmcb_pa)),
2481                   [rbx]"i"(offsetof(struct vcpu_svm, vcpu.arch.regs[VCPU_REGS_RBX])),
2482                   [rcx]"i"(offsetof(struct vcpu_svm, vcpu.arch.regs[VCPU_REGS_RCX])),
2483                   [rdx]"i"(offsetof(struct vcpu_svm, vcpu.arch.regs[VCPU_REGS_RDX])),
2484                   [rsi]"i"(offsetof(struct vcpu_svm, vcpu.arch.regs[VCPU_REGS_RSI])),
2485                   [rdi]"i"(offsetof(struct vcpu_svm, vcpu.arch.regs[VCPU_REGS_RDI])),
2486                   [rbp]"i"(offsetof(struct vcpu_svm, vcpu.arch.regs[VCPU_REGS_RBP]))
2487 #ifdef CONFIG_X86_64
2488                   , [r8]"i"(offsetof(struct vcpu_svm, vcpu.arch.regs[VCPU_REGS_R8])),
2489                   [r9]"i"(offsetof(struct vcpu_svm, vcpu.arch.regs[VCPU_REGS_R9])),
2490                   [r10]"i"(offsetof(struct vcpu_svm, vcpu.arch.regs[VCPU_REGS_R10])),
2491                   [r11]"i"(offsetof(struct vcpu_svm, vcpu.arch.regs[VCPU_REGS_R11])),
2492                   [r12]"i"(offsetof(struct vcpu_svm, vcpu.arch.regs[VCPU_REGS_R12])),
2493                   [r13]"i"(offsetof(struct vcpu_svm, vcpu.arch.regs[VCPU_REGS_R13])),
2494                   [r14]"i"(offsetof(struct vcpu_svm, vcpu.arch.regs[VCPU_REGS_R14])),
2495                   [r15]"i"(offsetof(struct vcpu_svm, vcpu.arch.regs[VCPU_REGS_R15]))
2496 #endif
2497                 : "cc", "memory"
2498                 , R"bx", R"cx", R"dx", R"si", R"di"
2499 #ifdef CONFIG_X86_64
2500                 , "r8", "r9", "r10", "r11" , "r12", "r13", "r14", "r15"
2501 #endif
2502                 );
2503
2504         vcpu->arch.cr2 = svm->vmcb->save.cr2;
2505         vcpu->arch.regs[VCPU_REGS_RAX] = svm->vmcb->save.rax;
2506         vcpu->arch.regs[VCPU_REGS_RSP] = svm->vmcb->save.rsp;
2507         vcpu->arch.regs[VCPU_REGS_RIP] = svm->vmcb->save.rip;
2508
2509         kvm_write_cr2(svm->host_cr2);
2510
2511         kvm_load_fs(fs_selector);
2512         kvm_load_gs(gs_selector);
2513         kvm_load_ldt(ldt_selector);
2514         load_host_msrs(vcpu);
2515
2516         reload_tss(vcpu);
2517
2518         local_irq_disable();
2519
2520         stgi();
2521
2522         sync_cr8_to_lapic(vcpu);
2523
2524         svm->next_rip = 0;
2525
2526         svm_complete_interrupts(svm);
2527 }
2528
2529 #undef R
2530
2531 static void svm_set_cr3(struct kvm_vcpu *vcpu, unsigned long root)
2532 {
2533         struct vcpu_svm *svm = to_svm(vcpu);
2534
2535         if (npt_enabled) {
2536                 svm->vmcb->control.nested_cr3 = root;
2537                 force_new_asid(vcpu);
2538                 return;
2539         }
2540
2541         svm->vmcb->save.cr3 = root;
2542         force_new_asid(vcpu);
2543
2544         if (vcpu->fpu_active) {
2545                 svm->vmcb->control.intercept_exceptions |= (1 << NM_VECTOR);
2546                 svm->vmcb->save.cr0 |= X86_CR0_TS;
2547                 vcpu->fpu_active = 0;
2548         }
2549 }
2550
2551 static int is_disabled(void)
2552 {
2553         u64 vm_cr;
2554
2555         rdmsrl(MSR_VM_CR, vm_cr);
2556         if (vm_cr & (1 << SVM_VM_CR_SVM_DISABLE))
2557                 return 1;
2558
2559         return 0;
2560 }
2561
2562 static void
2563 svm_patch_hypercall(struct kvm_vcpu *vcpu, unsigned char *hypercall)
2564 {
2565         /*
2566          * Patch in the VMMCALL instruction:
2567          */
2568         hypercall[0] = 0x0f;
2569         hypercall[1] = 0x01;
2570         hypercall[2] = 0xd9;
2571 }
2572
2573 static void svm_check_processor_compat(void *rtn)
2574 {
2575         *(int *)rtn = 0;
2576 }
2577
2578 static bool svm_cpu_has_accelerated_tpr(void)
2579 {
2580         return false;
2581 }
2582
2583 static int get_npt_level(void)
2584 {
2585 #ifdef CONFIG_X86_64
2586         return PT64_ROOT_LEVEL;
2587 #else
2588         return PT32E_ROOT_LEVEL;
2589 #endif
2590 }
2591
2592 static u64 svm_get_mt_mask(struct kvm_vcpu *vcpu, gfn_t gfn, bool is_mmio)
2593 {
2594         return 0;
2595 }
2596
2597 static struct kvm_x86_ops svm_x86_ops = {
2598         .cpu_has_kvm_support = has_svm,
2599         .disabled_by_bios = is_disabled,
2600         .hardware_setup = svm_hardware_setup,
2601         .hardware_unsetup = svm_hardware_unsetup,
2602         .check_processor_compatibility = svm_check_processor_compat,
2603         .hardware_enable = svm_hardware_enable,
2604         .hardware_disable = svm_hardware_disable,
2605         .cpu_has_accelerated_tpr = svm_cpu_has_accelerated_tpr,
2606
2607         .vcpu_create = svm_create_vcpu,
2608         .vcpu_free = svm_free_vcpu,
2609         .vcpu_reset = svm_vcpu_reset,
2610
2611         .prepare_guest_switch = svm_prepare_guest_switch,
2612         .vcpu_load = svm_vcpu_load,
2613         .vcpu_put = svm_vcpu_put,
2614
2615         .set_guest_debug = svm_guest_debug,
2616         .get_msr = svm_get_msr,
2617         .set_msr = svm_set_msr,
2618         .get_segment_base = svm_get_segment_base,
2619         .get_segment = svm_get_segment,
2620         .set_segment = svm_set_segment,
2621         .get_cpl = svm_get_cpl,
2622         .get_cs_db_l_bits = kvm_get_cs_db_l_bits,
2623         .decache_cr4_guest_bits = svm_decache_cr4_guest_bits,
2624         .set_cr0 = svm_set_cr0,
2625         .set_cr3 = svm_set_cr3,
2626         .set_cr4 = svm_set_cr4,
2627         .set_efer = svm_set_efer,
2628         .get_idt = svm_get_idt,
2629         .set_idt = svm_set_idt,
2630         .get_gdt = svm_get_gdt,
2631         .set_gdt = svm_set_gdt,
2632         .get_dr = svm_get_dr,
2633         .set_dr = svm_set_dr,
2634         .get_rflags = svm_get_rflags,
2635         .set_rflags = svm_set_rflags,
2636
2637         .tlb_flush = svm_flush_tlb,
2638
2639         .run = svm_vcpu_run,
2640         .handle_exit = handle_exit,
2641         .skip_emulated_instruction = skip_emulated_instruction,
2642         .patch_hypercall = svm_patch_hypercall,
2643         .set_irq = svm_set_irq,
2644         .set_nmi = svm_inject_nmi,
2645         .queue_exception = svm_queue_exception,
2646         .interrupt_allowed = svm_interrupt_allowed,
2647         .nmi_allowed = svm_nmi_allowed,
2648         .enable_nmi_window = enable_nmi_window,
2649         .enable_irq_window = enable_irq_window,
2650         .update_cr8_intercept = update_cr8_intercept,
2651         .drop_interrupt_shadow = svm_drop_interrupt_shadow,
2652
2653         .set_tss_addr = svm_set_tss_addr,
2654         .get_tdp_level = get_npt_level,
2655         .get_mt_mask = svm_get_mt_mask,
2656 };
2657
2658 static int __init svm_init(void)
2659 {
2660         return kvm_init(&svm_x86_ops, sizeof(struct vcpu_svm),
2661                               THIS_MODULE);
2662 }
2663
2664 static void __exit svm_exit(void)
2665 {
2666         kvm_exit();
2667 }
2668
2669 module_init(svm_init)
2670 module_exit(svm_exit)