KVM: VMX: Zero ept module parameter if ept is not present
[safe/jmp/linux-2.6] / arch / x86 / kvm / vmx.c
1 /*
2  * Kernel-based Virtual Machine driver for Linux
3  *
4  * This module enables machines with Intel VT-x extensions to run virtual
5  * machines without emulation or binary translation.
6  *
7  * Copyright (C) 2006 Qumranet, Inc.
8  *
9  * Authors:
10  *   Avi Kivity   <avi@qumranet.com>
11  *   Yaniv Kamay  <yaniv@qumranet.com>
12  *
13  * This work is licensed under the terms of the GNU GPL, version 2.  See
14  * the COPYING file in the top-level directory.
15  *
16  */
17
18 #include "irq.h"
19 #include "mmu.h"
20
21 #include <linux/kvm_host.h>
22 #include <linux/module.h>
23 #include <linux/kernel.h>
24 #include <linux/mm.h>
25 #include <linux/highmem.h>
26 #include <linux/sched.h>
27 #include <linux/moduleparam.h>
28 #include "kvm_cache_regs.h"
29 #include "x86.h"
30
31 #include <asm/io.h>
32 #include <asm/desc.h>
33 #include <asm/vmx.h>
34 #include <asm/virtext.h>
35
36 #define __ex(x) __kvm_handle_fault_on_reboot(x)
37
38 MODULE_AUTHOR("Qumranet");
39 MODULE_LICENSE("GPL");
40
41 static int __read_mostly bypass_guest_pf = 1;
42 module_param(bypass_guest_pf, bool, S_IRUGO);
43
44 static int __read_mostly enable_vpid = 1;
45 module_param_named(vpid, enable_vpid, bool, 0444);
46
47 static int __read_mostly flexpriority_enabled = 1;
48 module_param_named(flexpriority, flexpriority_enabled, bool, S_IRUGO);
49
50 static int __read_mostly enable_ept = 1;
51 module_param_named(ept, enable_ept, bool, S_IRUGO);
52
53 static int __read_mostly emulate_invalid_guest_state = 0;
54 module_param(emulate_invalid_guest_state, bool, S_IRUGO);
55
56 struct vmcs {
57         u32 revision_id;
58         u32 abort;
59         char data[0];
60 };
61
62 struct vcpu_vmx {
63         struct kvm_vcpu       vcpu;
64         struct list_head      local_vcpus_link;
65         unsigned long         host_rsp;
66         int                   launched;
67         u8                    fail;
68         u32                   idt_vectoring_info;
69         struct kvm_msr_entry *guest_msrs;
70         struct kvm_msr_entry *host_msrs;
71         int                   nmsrs;
72         int                   save_nmsrs;
73         int                   msr_offset_efer;
74 #ifdef CONFIG_X86_64
75         int                   msr_offset_kernel_gs_base;
76 #endif
77         struct vmcs          *vmcs;
78         struct {
79                 int           loaded;
80                 u16           fs_sel, gs_sel, ldt_sel;
81                 int           gs_ldt_reload_needed;
82                 int           fs_reload_needed;
83                 int           guest_efer_loaded;
84         } host_state;
85         struct {
86                 struct {
87                         bool pending;
88                         u8 vector;
89                         unsigned rip;
90                 } irq;
91         } rmode;
92         int vpid;
93         bool emulation_required;
94         enum emulation_result invalid_state_emulation_result;
95
96         /* Support for vnmi-less CPUs */
97         int soft_vnmi_blocked;
98         ktime_t entry_time;
99         s64 vnmi_blocked_time;
100 };
101
102 static inline struct vcpu_vmx *to_vmx(struct kvm_vcpu *vcpu)
103 {
104         return container_of(vcpu, struct vcpu_vmx, vcpu);
105 }
106
107 static int init_rmode(struct kvm *kvm);
108 static u64 construct_eptp(unsigned long root_hpa);
109
110 static DEFINE_PER_CPU(struct vmcs *, vmxarea);
111 static DEFINE_PER_CPU(struct vmcs *, current_vmcs);
112 static DEFINE_PER_CPU(struct list_head, vcpus_on_cpu);
113
114 static unsigned long *vmx_io_bitmap_a;
115 static unsigned long *vmx_io_bitmap_b;
116 static unsigned long *vmx_msr_bitmap_legacy;
117 static unsigned long *vmx_msr_bitmap_longmode;
118
119 static DECLARE_BITMAP(vmx_vpid_bitmap, VMX_NR_VPIDS);
120 static DEFINE_SPINLOCK(vmx_vpid_lock);
121
122 static struct vmcs_config {
123         int size;
124         int order;
125         u32 revision_id;
126         u32 pin_based_exec_ctrl;
127         u32 cpu_based_exec_ctrl;
128         u32 cpu_based_2nd_exec_ctrl;
129         u32 vmexit_ctrl;
130         u32 vmentry_ctrl;
131 } vmcs_config;
132
133 static struct vmx_capability {
134         u32 ept;
135         u32 vpid;
136 } vmx_capability;
137
138 #define VMX_SEGMENT_FIELD(seg)                                  \
139         [VCPU_SREG_##seg] = {                                   \
140                 .selector = GUEST_##seg##_SELECTOR,             \
141                 .base = GUEST_##seg##_BASE,                     \
142                 .limit = GUEST_##seg##_LIMIT,                   \
143                 .ar_bytes = GUEST_##seg##_AR_BYTES,             \
144         }
145
146 static struct kvm_vmx_segment_field {
147         unsigned selector;
148         unsigned base;
149         unsigned limit;
150         unsigned ar_bytes;
151 } kvm_vmx_segment_fields[] = {
152         VMX_SEGMENT_FIELD(CS),
153         VMX_SEGMENT_FIELD(DS),
154         VMX_SEGMENT_FIELD(ES),
155         VMX_SEGMENT_FIELD(FS),
156         VMX_SEGMENT_FIELD(GS),
157         VMX_SEGMENT_FIELD(SS),
158         VMX_SEGMENT_FIELD(TR),
159         VMX_SEGMENT_FIELD(LDTR),
160 };
161
162 /*
163  * Keep MSR_K6_STAR at the end, as setup_msrs() will try to optimize it
164  * away by decrementing the array size.
165  */
166 static const u32 vmx_msr_index[] = {
167 #ifdef CONFIG_X86_64
168         MSR_SYSCALL_MASK, MSR_LSTAR, MSR_CSTAR, MSR_KERNEL_GS_BASE,
169 #endif
170         MSR_EFER, MSR_K6_STAR,
171 };
172 #define NR_VMX_MSR ARRAY_SIZE(vmx_msr_index)
173
174 static void load_msrs(struct kvm_msr_entry *e, int n)
175 {
176         int i;
177
178         for (i = 0; i < n; ++i)
179                 wrmsrl(e[i].index, e[i].data);
180 }
181
182 static void save_msrs(struct kvm_msr_entry *e, int n)
183 {
184         int i;
185
186         for (i = 0; i < n; ++i)
187                 rdmsrl(e[i].index, e[i].data);
188 }
189
190 static inline int is_page_fault(u32 intr_info)
191 {
192         return (intr_info & (INTR_INFO_INTR_TYPE_MASK | INTR_INFO_VECTOR_MASK |
193                              INTR_INFO_VALID_MASK)) ==
194                 (INTR_TYPE_HARD_EXCEPTION | PF_VECTOR | INTR_INFO_VALID_MASK);
195 }
196
197 static inline int is_no_device(u32 intr_info)
198 {
199         return (intr_info & (INTR_INFO_INTR_TYPE_MASK | INTR_INFO_VECTOR_MASK |
200                              INTR_INFO_VALID_MASK)) ==
201                 (INTR_TYPE_HARD_EXCEPTION | NM_VECTOR | INTR_INFO_VALID_MASK);
202 }
203
204 static inline int is_invalid_opcode(u32 intr_info)
205 {
206         return (intr_info & (INTR_INFO_INTR_TYPE_MASK | INTR_INFO_VECTOR_MASK |
207                              INTR_INFO_VALID_MASK)) ==
208                 (INTR_TYPE_HARD_EXCEPTION | UD_VECTOR | INTR_INFO_VALID_MASK);
209 }
210
211 static inline int is_external_interrupt(u32 intr_info)
212 {
213         return (intr_info & (INTR_INFO_INTR_TYPE_MASK | INTR_INFO_VALID_MASK))
214                 == (INTR_TYPE_EXT_INTR | INTR_INFO_VALID_MASK);
215 }
216
217 static inline int cpu_has_vmx_msr_bitmap(void)
218 {
219         return (vmcs_config.cpu_based_exec_ctrl & CPU_BASED_USE_MSR_BITMAPS);
220 }
221
222 static inline int cpu_has_vmx_tpr_shadow(void)
223 {
224         return (vmcs_config.cpu_based_exec_ctrl & CPU_BASED_TPR_SHADOW);
225 }
226
227 static inline int vm_need_tpr_shadow(struct kvm *kvm)
228 {
229         return ((cpu_has_vmx_tpr_shadow()) && (irqchip_in_kernel(kvm)));
230 }
231
232 static inline int cpu_has_secondary_exec_ctrls(void)
233 {
234         return (vmcs_config.cpu_based_exec_ctrl &
235                 CPU_BASED_ACTIVATE_SECONDARY_CONTROLS);
236 }
237
238 static inline bool cpu_has_vmx_virtualize_apic_accesses(void)
239 {
240         return flexpriority_enabled
241                 && (vmcs_config.cpu_based_2nd_exec_ctrl &
242                     SECONDARY_EXEC_VIRTUALIZE_APIC_ACCESSES);
243 }
244
245 static inline int cpu_has_vmx_invept_individual_addr(void)
246 {
247         return (!!(vmx_capability.ept & VMX_EPT_EXTENT_INDIVIDUAL_BIT));
248 }
249
250 static inline int cpu_has_vmx_invept_context(void)
251 {
252         return (!!(vmx_capability.ept & VMX_EPT_EXTENT_CONTEXT_BIT));
253 }
254
255 static inline int cpu_has_vmx_invept_global(void)
256 {
257         return (!!(vmx_capability.ept & VMX_EPT_EXTENT_GLOBAL_BIT));
258 }
259
260 static inline int cpu_has_vmx_ept(void)
261 {
262         return (vmcs_config.cpu_based_2nd_exec_ctrl &
263                 SECONDARY_EXEC_ENABLE_EPT);
264 }
265
266 static inline int vm_need_ept(void)
267 {
268         return enable_ept;
269 }
270
271 static inline int vm_need_virtualize_apic_accesses(struct kvm *kvm)
272 {
273         return ((cpu_has_vmx_virtualize_apic_accesses()) &&
274                 (irqchip_in_kernel(kvm)));
275 }
276
277 static inline int cpu_has_vmx_vpid(void)
278 {
279         return (vmcs_config.cpu_based_2nd_exec_ctrl &
280                 SECONDARY_EXEC_ENABLE_VPID);
281 }
282
283 static inline int cpu_has_virtual_nmis(void)
284 {
285         return vmcs_config.pin_based_exec_ctrl & PIN_BASED_VIRTUAL_NMIS;
286 }
287
288 static int __find_msr_index(struct vcpu_vmx *vmx, u32 msr)
289 {
290         int i;
291
292         for (i = 0; i < vmx->nmsrs; ++i)
293                 if (vmx->guest_msrs[i].index == msr)
294                         return i;
295         return -1;
296 }
297
298 static inline void __invvpid(int ext, u16 vpid, gva_t gva)
299 {
300     struct {
301         u64 vpid : 16;
302         u64 rsvd : 48;
303         u64 gva;
304     } operand = { vpid, 0, gva };
305
306     asm volatile (__ex(ASM_VMX_INVVPID)
307                   /* CF==1 or ZF==1 --> rc = -1 */
308                   "; ja 1f ; ud2 ; 1:"
309                   : : "a"(&operand), "c"(ext) : "cc", "memory");
310 }
311
312 static inline void __invept(int ext, u64 eptp, gpa_t gpa)
313 {
314         struct {
315                 u64 eptp, gpa;
316         } operand = {eptp, gpa};
317
318         asm volatile (__ex(ASM_VMX_INVEPT)
319                         /* CF==1 or ZF==1 --> rc = -1 */
320                         "; ja 1f ; ud2 ; 1:\n"
321                         : : "a" (&operand), "c" (ext) : "cc", "memory");
322 }
323
324 static struct kvm_msr_entry *find_msr_entry(struct vcpu_vmx *vmx, u32 msr)
325 {
326         int i;
327
328         i = __find_msr_index(vmx, msr);
329         if (i >= 0)
330                 return &vmx->guest_msrs[i];
331         return NULL;
332 }
333
334 static void vmcs_clear(struct vmcs *vmcs)
335 {
336         u64 phys_addr = __pa(vmcs);
337         u8 error;
338
339         asm volatile (__ex(ASM_VMX_VMCLEAR_RAX) "; setna %0"
340                       : "=g"(error) : "a"(&phys_addr), "m"(phys_addr)
341                       : "cc", "memory");
342         if (error)
343                 printk(KERN_ERR "kvm: vmclear fail: %p/%llx\n",
344                        vmcs, phys_addr);
345 }
346
347 static void __vcpu_clear(void *arg)
348 {
349         struct vcpu_vmx *vmx = arg;
350         int cpu = raw_smp_processor_id();
351
352         if (vmx->vcpu.cpu == cpu)
353                 vmcs_clear(vmx->vmcs);
354         if (per_cpu(current_vmcs, cpu) == vmx->vmcs)
355                 per_cpu(current_vmcs, cpu) = NULL;
356         rdtscll(vmx->vcpu.arch.host_tsc);
357         list_del(&vmx->local_vcpus_link);
358         vmx->vcpu.cpu = -1;
359         vmx->launched = 0;
360 }
361
362 static void vcpu_clear(struct vcpu_vmx *vmx)
363 {
364         if (vmx->vcpu.cpu == -1)
365                 return;
366         smp_call_function_single(vmx->vcpu.cpu, __vcpu_clear, vmx, 1);
367 }
368
369 static inline void vpid_sync_vcpu_all(struct vcpu_vmx *vmx)
370 {
371         if (vmx->vpid == 0)
372                 return;
373
374         __invvpid(VMX_VPID_EXTENT_SINGLE_CONTEXT, vmx->vpid, 0);
375 }
376
377 static inline void ept_sync_global(void)
378 {
379         if (cpu_has_vmx_invept_global())
380                 __invept(VMX_EPT_EXTENT_GLOBAL, 0, 0);
381 }
382
383 static inline void ept_sync_context(u64 eptp)
384 {
385         if (vm_need_ept()) {
386                 if (cpu_has_vmx_invept_context())
387                         __invept(VMX_EPT_EXTENT_CONTEXT, eptp, 0);
388                 else
389                         ept_sync_global();
390         }
391 }
392
393 static inline void ept_sync_individual_addr(u64 eptp, gpa_t gpa)
394 {
395         if (vm_need_ept()) {
396                 if (cpu_has_vmx_invept_individual_addr())
397                         __invept(VMX_EPT_EXTENT_INDIVIDUAL_ADDR,
398                                         eptp, gpa);
399                 else
400                         ept_sync_context(eptp);
401         }
402 }
403
404 static unsigned long vmcs_readl(unsigned long field)
405 {
406         unsigned long value;
407
408         asm volatile (__ex(ASM_VMX_VMREAD_RDX_RAX)
409                       : "=a"(value) : "d"(field) : "cc");
410         return value;
411 }
412
413 static u16 vmcs_read16(unsigned long field)
414 {
415         return vmcs_readl(field);
416 }
417
418 static u32 vmcs_read32(unsigned long field)
419 {
420         return vmcs_readl(field);
421 }
422
423 static u64 vmcs_read64(unsigned long field)
424 {
425 #ifdef CONFIG_X86_64
426         return vmcs_readl(field);
427 #else
428         return vmcs_readl(field) | ((u64)vmcs_readl(field+1) << 32);
429 #endif
430 }
431
432 static noinline void vmwrite_error(unsigned long field, unsigned long value)
433 {
434         printk(KERN_ERR "vmwrite error: reg %lx value %lx (err %d)\n",
435                field, value, vmcs_read32(VM_INSTRUCTION_ERROR));
436         dump_stack();
437 }
438
439 static void vmcs_writel(unsigned long field, unsigned long value)
440 {
441         u8 error;
442
443         asm volatile (__ex(ASM_VMX_VMWRITE_RAX_RDX) "; setna %0"
444                        : "=q"(error) : "a"(value), "d"(field) : "cc");
445         if (unlikely(error))
446                 vmwrite_error(field, value);
447 }
448
449 static void vmcs_write16(unsigned long field, u16 value)
450 {
451         vmcs_writel(field, value);
452 }
453
454 static void vmcs_write32(unsigned long field, u32 value)
455 {
456         vmcs_writel(field, value);
457 }
458
459 static void vmcs_write64(unsigned long field, u64 value)
460 {
461         vmcs_writel(field, value);
462 #ifndef CONFIG_X86_64
463         asm volatile ("");
464         vmcs_writel(field+1, value >> 32);
465 #endif
466 }
467
468 static void vmcs_clear_bits(unsigned long field, u32 mask)
469 {
470         vmcs_writel(field, vmcs_readl(field) & ~mask);
471 }
472
473 static void vmcs_set_bits(unsigned long field, u32 mask)
474 {
475         vmcs_writel(field, vmcs_readl(field) | mask);
476 }
477
478 static void update_exception_bitmap(struct kvm_vcpu *vcpu)
479 {
480         u32 eb;
481
482         eb = (1u << PF_VECTOR) | (1u << UD_VECTOR);
483         if (!vcpu->fpu_active)
484                 eb |= 1u << NM_VECTOR;
485         if (vcpu->guest_debug & KVM_GUESTDBG_ENABLE) {
486                 if (vcpu->guest_debug &
487                     (KVM_GUESTDBG_SINGLESTEP | KVM_GUESTDBG_USE_HW_BP))
488                         eb |= 1u << DB_VECTOR;
489                 if (vcpu->guest_debug & KVM_GUESTDBG_USE_SW_BP)
490                         eb |= 1u << BP_VECTOR;
491         }
492         if (vcpu->arch.rmode.active)
493                 eb = ~0;
494         if (vm_need_ept())
495                 eb &= ~(1u << PF_VECTOR); /* bypass_guest_pf = 0 */
496         vmcs_write32(EXCEPTION_BITMAP, eb);
497 }
498
499 static void reload_tss(void)
500 {
501         /*
502          * VT restores TR but not its size.  Useless.
503          */
504         struct descriptor_table gdt;
505         struct desc_struct *descs;
506
507         kvm_get_gdt(&gdt);
508         descs = (void *)gdt.base;
509         descs[GDT_ENTRY_TSS].type = 9; /* available TSS */
510         load_TR_desc();
511 }
512
513 static void load_transition_efer(struct vcpu_vmx *vmx)
514 {
515         int efer_offset = vmx->msr_offset_efer;
516         u64 host_efer = vmx->host_msrs[efer_offset].data;
517         u64 guest_efer = vmx->guest_msrs[efer_offset].data;
518         u64 ignore_bits;
519
520         if (efer_offset < 0)
521                 return;
522         /*
523          * NX is emulated; LMA and LME handled by hardware; SCE meaninless
524          * outside long mode
525          */
526         ignore_bits = EFER_NX | EFER_SCE;
527 #ifdef CONFIG_X86_64
528         ignore_bits |= EFER_LMA | EFER_LME;
529         /* SCE is meaningful only in long mode on Intel */
530         if (guest_efer & EFER_LMA)
531                 ignore_bits &= ~(u64)EFER_SCE;
532 #endif
533         if ((guest_efer & ~ignore_bits) == (host_efer & ~ignore_bits))
534                 return;
535
536         vmx->host_state.guest_efer_loaded = 1;
537         guest_efer &= ~ignore_bits;
538         guest_efer |= host_efer & ignore_bits;
539         wrmsrl(MSR_EFER, guest_efer);
540         vmx->vcpu.stat.efer_reload++;
541 }
542
543 static void reload_host_efer(struct vcpu_vmx *vmx)
544 {
545         if (vmx->host_state.guest_efer_loaded) {
546                 vmx->host_state.guest_efer_loaded = 0;
547                 load_msrs(vmx->host_msrs + vmx->msr_offset_efer, 1);
548         }
549 }
550
551 static void vmx_save_host_state(struct kvm_vcpu *vcpu)
552 {
553         struct vcpu_vmx *vmx = to_vmx(vcpu);
554
555         if (vmx->host_state.loaded)
556                 return;
557
558         vmx->host_state.loaded = 1;
559         /*
560          * Set host fs and gs selectors.  Unfortunately, 22.2.3 does not
561          * allow segment selectors with cpl > 0 or ti == 1.
562          */
563         vmx->host_state.ldt_sel = kvm_read_ldt();
564         vmx->host_state.gs_ldt_reload_needed = vmx->host_state.ldt_sel;
565         vmx->host_state.fs_sel = kvm_read_fs();
566         if (!(vmx->host_state.fs_sel & 7)) {
567                 vmcs_write16(HOST_FS_SELECTOR, vmx->host_state.fs_sel);
568                 vmx->host_state.fs_reload_needed = 0;
569         } else {
570                 vmcs_write16(HOST_FS_SELECTOR, 0);
571                 vmx->host_state.fs_reload_needed = 1;
572         }
573         vmx->host_state.gs_sel = kvm_read_gs();
574         if (!(vmx->host_state.gs_sel & 7))
575                 vmcs_write16(HOST_GS_SELECTOR, vmx->host_state.gs_sel);
576         else {
577                 vmcs_write16(HOST_GS_SELECTOR, 0);
578                 vmx->host_state.gs_ldt_reload_needed = 1;
579         }
580
581 #ifdef CONFIG_X86_64
582         vmcs_writel(HOST_FS_BASE, read_msr(MSR_FS_BASE));
583         vmcs_writel(HOST_GS_BASE, read_msr(MSR_GS_BASE));
584 #else
585         vmcs_writel(HOST_FS_BASE, segment_base(vmx->host_state.fs_sel));
586         vmcs_writel(HOST_GS_BASE, segment_base(vmx->host_state.gs_sel));
587 #endif
588
589 #ifdef CONFIG_X86_64
590         if (is_long_mode(&vmx->vcpu))
591                 save_msrs(vmx->host_msrs +
592                           vmx->msr_offset_kernel_gs_base, 1);
593
594 #endif
595         load_msrs(vmx->guest_msrs, vmx->save_nmsrs);
596         load_transition_efer(vmx);
597 }
598
599 static void __vmx_load_host_state(struct vcpu_vmx *vmx)
600 {
601         unsigned long flags;
602
603         if (!vmx->host_state.loaded)
604                 return;
605
606         ++vmx->vcpu.stat.host_state_reload;
607         vmx->host_state.loaded = 0;
608         if (vmx->host_state.fs_reload_needed)
609                 kvm_load_fs(vmx->host_state.fs_sel);
610         if (vmx->host_state.gs_ldt_reload_needed) {
611                 kvm_load_ldt(vmx->host_state.ldt_sel);
612                 /*
613                  * If we have to reload gs, we must take care to
614                  * preserve our gs base.
615                  */
616                 local_irq_save(flags);
617                 kvm_load_gs(vmx->host_state.gs_sel);
618 #ifdef CONFIG_X86_64
619                 wrmsrl(MSR_GS_BASE, vmcs_readl(HOST_GS_BASE));
620 #endif
621                 local_irq_restore(flags);
622         }
623         reload_tss();
624         save_msrs(vmx->guest_msrs, vmx->save_nmsrs);
625         load_msrs(vmx->host_msrs, vmx->save_nmsrs);
626         reload_host_efer(vmx);
627 }
628
629 static void vmx_load_host_state(struct vcpu_vmx *vmx)
630 {
631         preempt_disable();
632         __vmx_load_host_state(vmx);
633         preempt_enable();
634 }
635
636 /*
637  * Switches to specified vcpu, until a matching vcpu_put(), but assumes
638  * vcpu mutex is already taken.
639  */
640 static void vmx_vcpu_load(struct kvm_vcpu *vcpu, int cpu)
641 {
642         struct vcpu_vmx *vmx = to_vmx(vcpu);
643         u64 phys_addr = __pa(vmx->vmcs);
644         u64 tsc_this, delta, new_offset;
645
646         if (vcpu->cpu != cpu) {
647                 vcpu_clear(vmx);
648                 kvm_migrate_timers(vcpu);
649                 vpid_sync_vcpu_all(vmx);
650                 local_irq_disable();
651                 list_add(&vmx->local_vcpus_link,
652                          &per_cpu(vcpus_on_cpu, cpu));
653                 local_irq_enable();
654         }
655
656         if (per_cpu(current_vmcs, cpu) != vmx->vmcs) {
657                 u8 error;
658
659                 per_cpu(current_vmcs, cpu) = vmx->vmcs;
660                 asm volatile (__ex(ASM_VMX_VMPTRLD_RAX) "; setna %0"
661                               : "=g"(error) : "a"(&phys_addr), "m"(phys_addr)
662                               : "cc");
663                 if (error)
664                         printk(KERN_ERR "kvm: vmptrld %p/%llx fail\n",
665                                vmx->vmcs, phys_addr);
666         }
667
668         if (vcpu->cpu != cpu) {
669                 struct descriptor_table dt;
670                 unsigned long sysenter_esp;
671
672                 vcpu->cpu = cpu;
673                 /*
674                  * Linux uses per-cpu TSS and GDT, so set these when switching
675                  * processors.
676                  */
677                 vmcs_writel(HOST_TR_BASE, kvm_read_tr_base()); /* 22.2.4 */
678                 kvm_get_gdt(&dt);
679                 vmcs_writel(HOST_GDTR_BASE, dt.base);   /* 22.2.4 */
680
681                 rdmsrl(MSR_IA32_SYSENTER_ESP, sysenter_esp);
682                 vmcs_writel(HOST_IA32_SYSENTER_ESP, sysenter_esp); /* 22.2.3 */
683
684                 /*
685                  * Make sure the time stamp counter is monotonous.
686                  */
687                 rdtscll(tsc_this);
688                 if (tsc_this < vcpu->arch.host_tsc) {
689                         delta = vcpu->arch.host_tsc - tsc_this;
690                         new_offset = vmcs_read64(TSC_OFFSET) + delta;
691                         vmcs_write64(TSC_OFFSET, new_offset);
692                 }
693         }
694 }
695
696 static void vmx_vcpu_put(struct kvm_vcpu *vcpu)
697 {
698         __vmx_load_host_state(to_vmx(vcpu));
699 }
700
701 static void vmx_fpu_activate(struct kvm_vcpu *vcpu)
702 {
703         if (vcpu->fpu_active)
704                 return;
705         vcpu->fpu_active = 1;
706         vmcs_clear_bits(GUEST_CR0, X86_CR0_TS);
707         if (vcpu->arch.cr0 & X86_CR0_TS)
708                 vmcs_set_bits(GUEST_CR0, X86_CR0_TS);
709         update_exception_bitmap(vcpu);
710 }
711
712 static void vmx_fpu_deactivate(struct kvm_vcpu *vcpu)
713 {
714         if (!vcpu->fpu_active)
715                 return;
716         vcpu->fpu_active = 0;
717         vmcs_set_bits(GUEST_CR0, X86_CR0_TS);
718         update_exception_bitmap(vcpu);
719 }
720
721 static unsigned long vmx_get_rflags(struct kvm_vcpu *vcpu)
722 {
723         return vmcs_readl(GUEST_RFLAGS);
724 }
725
726 static void vmx_set_rflags(struct kvm_vcpu *vcpu, unsigned long rflags)
727 {
728         if (vcpu->arch.rmode.active)
729                 rflags |= X86_EFLAGS_IOPL | X86_EFLAGS_VM;
730         vmcs_writel(GUEST_RFLAGS, rflags);
731 }
732
733 static void skip_emulated_instruction(struct kvm_vcpu *vcpu)
734 {
735         unsigned long rip;
736         u32 interruptibility;
737
738         rip = kvm_rip_read(vcpu);
739         rip += vmcs_read32(VM_EXIT_INSTRUCTION_LEN);
740         kvm_rip_write(vcpu, rip);
741
742         /*
743          * We emulated an instruction, so temporary interrupt blocking
744          * should be removed, if set.
745          */
746         interruptibility = vmcs_read32(GUEST_INTERRUPTIBILITY_INFO);
747         if (interruptibility & 3)
748                 vmcs_write32(GUEST_INTERRUPTIBILITY_INFO,
749                              interruptibility & ~3);
750         vcpu->arch.interrupt_window_open = 1;
751 }
752
753 static void vmx_queue_exception(struct kvm_vcpu *vcpu, unsigned nr,
754                                 bool has_error_code, u32 error_code)
755 {
756         struct vcpu_vmx *vmx = to_vmx(vcpu);
757         u32 intr_info = nr | INTR_INFO_VALID_MASK;
758
759         if (has_error_code) {
760                 vmcs_write32(VM_ENTRY_EXCEPTION_ERROR_CODE, error_code);
761                 intr_info |= INTR_INFO_DELIVER_CODE_MASK;
762         }
763
764         if (vcpu->arch.rmode.active) {
765                 vmx->rmode.irq.pending = true;
766                 vmx->rmode.irq.vector = nr;
767                 vmx->rmode.irq.rip = kvm_rip_read(vcpu);
768                 if (nr == BP_VECTOR || nr == OF_VECTOR)
769                         vmx->rmode.irq.rip++;
770                 intr_info |= INTR_TYPE_SOFT_INTR;
771                 vmcs_write32(VM_ENTRY_INTR_INFO_FIELD, intr_info);
772                 vmcs_write32(VM_ENTRY_INSTRUCTION_LEN, 1);
773                 kvm_rip_write(vcpu, vmx->rmode.irq.rip - 1);
774                 return;
775         }
776
777         if (nr == BP_VECTOR || nr == OF_VECTOR) {
778                 vmcs_write32(VM_ENTRY_INSTRUCTION_LEN, 1);
779                 intr_info |= INTR_TYPE_SOFT_EXCEPTION;
780         } else
781                 intr_info |= INTR_TYPE_HARD_EXCEPTION;
782
783         vmcs_write32(VM_ENTRY_INTR_INFO_FIELD, intr_info);
784 }
785
786 static bool vmx_exception_injected(struct kvm_vcpu *vcpu)
787 {
788         return false;
789 }
790
791 /*
792  * Swap MSR entry in host/guest MSR entry array.
793  */
794 #ifdef CONFIG_X86_64
795 static void move_msr_up(struct vcpu_vmx *vmx, int from, int to)
796 {
797         struct kvm_msr_entry tmp;
798
799         tmp = vmx->guest_msrs[to];
800         vmx->guest_msrs[to] = vmx->guest_msrs[from];
801         vmx->guest_msrs[from] = tmp;
802         tmp = vmx->host_msrs[to];
803         vmx->host_msrs[to] = vmx->host_msrs[from];
804         vmx->host_msrs[from] = tmp;
805 }
806 #endif
807
808 /*
809  * Set up the vmcs to automatically save and restore system
810  * msrs.  Don't touch the 64-bit msrs if the guest is in legacy
811  * mode, as fiddling with msrs is very expensive.
812  */
813 static void setup_msrs(struct vcpu_vmx *vmx)
814 {
815         int save_nmsrs;
816         unsigned long *msr_bitmap;
817
818         vmx_load_host_state(vmx);
819         save_nmsrs = 0;
820 #ifdef CONFIG_X86_64
821         if (is_long_mode(&vmx->vcpu)) {
822                 int index;
823
824                 index = __find_msr_index(vmx, MSR_SYSCALL_MASK);
825                 if (index >= 0)
826                         move_msr_up(vmx, index, save_nmsrs++);
827                 index = __find_msr_index(vmx, MSR_LSTAR);
828                 if (index >= 0)
829                         move_msr_up(vmx, index, save_nmsrs++);
830                 index = __find_msr_index(vmx, MSR_CSTAR);
831                 if (index >= 0)
832                         move_msr_up(vmx, index, save_nmsrs++);
833                 index = __find_msr_index(vmx, MSR_KERNEL_GS_BASE);
834                 if (index >= 0)
835                         move_msr_up(vmx, index, save_nmsrs++);
836                 /*
837                  * MSR_K6_STAR is only needed on long mode guests, and only
838                  * if efer.sce is enabled.
839                  */
840                 index = __find_msr_index(vmx, MSR_K6_STAR);
841                 if ((index >= 0) && (vmx->vcpu.arch.shadow_efer & EFER_SCE))
842                         move_msr_up(vmx, index, save_nmsrs++);
843         }
844 #endif
845         vmx->save_nmsrs = save_nmsrs;
846
847 #ifdef CONFIG_X86_64
848         vmx->msr_offset_kernel_gs_base =
849                 __find_msr_index(vmx, MSR_KERNEL_GS_BASE);
850 #endif
851         vmx->msr_offset_efer = __find_msr_index(vmx, MSR_EFER);
852
853         if (cpu_has_vmx_msr_bitmap()) {
854                 if (is_long_mode(&vmx->vcpu))
855                         msr_bitmap = vmx_msr_bitmap_longmode;
856                 else
857                         msr_bitmap = vmx_msr_bitmap_legacy;
858
859                 vmcs_write64(MSR_BITMAP, __pa(msr_bitmap));
860         }
861 }
862
863 /*
864  * reads and returns guest's timestamp counter "register"
865  * guest_tsc = host_tsc + tsc_offset    -- 21.3
866  */
867 static u64 guest_read_tsc(void)
868 {
869         u64 host_tsc, tsc_offset;
870
871         rdtscll(host_tsc);
872         tsc_offset = vmcs_read64(TSC_OFFSET);
873         return host_tsc + tsc_offset;
874 }
875
876 /*
877  * writes 'guest_tsc' into guest's timestamp counter "register"
878  * guest_tsc = host_tsc + tsc_offset ==> tsc_offset = guest_tsc - host_tsc
879  */
880 static void guest_write_tsc(u64 guest_tsc, u64 host_tsc)
881 {
882         vmcs_write64(TSC_OFFSET, guest_tsc - host_tsc);
883 }
884
885 /*
886  * Reads an msr value (of 'msr_index') into 'pdata'.
887  * Returns 0 on success, non-0 otherwise.
888  * Assumes vcpu_load() was already called.
889  */
890 static int vmx_get_msr(struct kvm_vcpu *vcpu, u32 msr_index, u64 *pdata)
891 {
892         u64 data;
893         struct kvm_msr_entry *msr;
894
895         if (!pdata) {
896                 printk(KERN_ERR "BUG: get_msr called with NULL pdata\n");
897                 return -EINVAL;
898         }
899
900         switch (msr_index) {
901 #ifdef CONFIG_X86_64
902         case MSR_FS_BASE:
903                 data = vmcs_readl(GUEST_FS_BASE);
904                 break;
905         case MSR_GS_BASE:
906                 data = vmcs_readl(GUEST_GS_BASE);
907                 break;
908         case MSR_EFER:
909                 return kvm_get_msr_common(vcpu, msr_index, pdata);
910 #endif
911         case MSR_IA32_TIME_STAMP_COUNTER:
912                 data = guest_read_tsc();
913                 break;
914         case MSR_IA32_SYSENTER_CS:
915                 data = vmcs_read32(GUEST_SYSENTER_CS);
916                 break;
917         case MSR_IA32_SYSENTER_EIP:
918                 data = vmcs_readl(GUEST_SYSENTER_EIP);
919                 break;
920         case MSR_IA32_SYSENTER_ESP:
921                 data = vmcs_readl(GUEST_SYSENTER_ESP);
922                 break;
923         default:
924                 vmx_load_host_state(to_vmx(vcpu));
925                 msr = find_msr_entry(to_vmx(vcpu), msr_index);
926                 if (msr) {
927                         data = msr->data;
928                         break;
929                 }
930                 return kvm_get_msr_common(vcpu, msr_index, pdata);
931         }
932
933         *pdata = data;
934         return 0;
935 }
936
937 /*
938  * Writes msr value into into the appropriate "register".
939  * Returns 0 on success, non-0 otherwise.
940  * Assumes vcpu_load() was already called.
941  */
942 static int vmx_set_msr(struct kvm_vcpu *vcpu, u32 msr_index, u64 data)
943 {
944         struct vcpu_vmx *vmx = to_vmx(vcpu);
945         struct kvm_msr_entry *msr;
946         u64 host_tsc;
947         int ret = 0;
948
949         switch (msr_index) {
950         case MSR_EFER:
951                 vmx_load_host_state(vmx);
952                 ret = kvm_set_msr_common(vcpu, msr_index, data);
953                 break;
954 #ifdef CONFIG_X86_64
955         case MSR_FS_BASE:
956                 vmcs_writel(GUEST_FS_BASE, data);
957                 break;
958         case MSR_GS_BASE:
959                 vmcs_writel(GUEST_GS_BASE, data);
960                 break;
961 #endif
962         case MSR_IA32_SYSENTER_CS:
963                 vmcs_write32(GUEST_SYSENTER_CS, data);
964                 break;
965         case MSR_IA32_SYSENTER_EIP:
966                 vmcs_writel(GUEST_SYSENTER_EIP, data);
967                 break;
968         case MSR_IA32_SYSENTER_ESP:
969                 vmcs_writel(GUEST_SYSENTER_ESP, data);
970                 break;
971         case MSR_IA32_TIME_STAMP_COUNTER:
972                 rdtscll(host_tsc);
973                 guest_write_tsc(data, host_tsc);
974                 break;
975         case MSR_P6_PERFCTR0:
976         case MSR_P6_PERFCTR1:
977         case MSR_P6_EVNTSEL0:
978         case MSR_P6_EVNTSEL1:
979                 /*
980                  * Just discard all writes to the performance counters; this
981                  * should keep both older linux and windows 64-bit guests
982                  * happy
983                  */
984                 pr_unimpl(vcpu, "unimplemented perfctr wrmsr: 0x%x data 0x%llx\n", msr_index, data);
985
986                 break;
987         case MSR_IA32_CR_PAT:
988                 if (vmcs_config.vmentry_ctrl & VM_ENTRY_LOAD_IA32_PAT) {
989                         vmcs_write64(GUEST_IA32_PAT, data);
990                         vcpu->arch.pat = data;
991                         break;
992                 }
993                 /* Otherwise falls through to kvm_set_msr_common */
994         default:
995                 vmx_load_host_state(vmx);
996                 msr = find_msr_entry(vmx, msr_index);
997                 if (msr) {
998                         msr->data = data;
999                         break;
1000                 }
1001                 ret = kvm_set_msr_common(vcpu, msr_index, data);
1002         }
1003
1004         return ret;
1005 }
1006
1007 static void vmx_cache_reg(struct kvm_vcpu *vcpu, enum kvm_reg reg)
1008 {
1009         __set_bit(reg, (unsigned long *)&vcpu->arch.regs_avail);
1010         switch (reg) {
1011         case VCPU_REGS_RSP:
1012                 vcpu->arch.regs[VCPU_REGS_RSP] = vmcs_readl(GUEST_RSP);
1013                 break;
1014         case VCPU_REGS_RIP:
1015                 vcpu->arch.regs[VCPU_REGS_RIP] = vmcs_readl(GUEST_RIP);
1016                 break;
1017         default:
1018                 break;
1019         }
1020 }
1021
1022 static int set_guest_debug(struct kvm_vcpu *vcpu, struct kvm_guest_debug *dbg)
1023 {
1024         int old_debug = vcpu->guest_debug;
1025         unsigned long flags;
1026
1027         vcpu->guest_debug = dbg->control;
1028         if (!(vcpu->guest_debug & KVM_GUESTDBG_ENABLE))
1029                 vcpu->guest_debug = 0;
1030
1031         if (vcpu->guest_debug & KVM_GUESTDBG_USE_HW_BP)
1032                 vmcs_writel(GUEST_DR7, dbg->arch.debugreg[7]);
1033         else
1034                 vmcs_writel(GUEST_DR7, vcpu->arch.dr7);
1035
1036         flags = vmcs_readl(GUEST_RFLAGS);
1037         if (vcpu->guest_debug & KVM_GUESTDBG_SINGLESTEP)
1038                 flags |= X86_EFLAGS_TF | X86_EFLAGS_RF;
1039         else if (old_debug & KVM_GUESTDBG_SINGLESTEP)
1040                 flags &= ~(X86_EFLAGS_TF | X86_EFLAGS_RF);
1041         vmcs_writel(GUEST_RFLAGS, flags);
1042
1043         update_exception_bitmap(vcpu);
1044
1045         return 0;
1046 }
1047
1048 static int vmx_get_irq(struct kvm_vcpu *vcpu)
1049 {
1050         if (!vcpu->arch.interrupt.pending)
1051                 return -1;
1052         return vcpu->arch.interrupt.nr;
1053 }
1054
1055 static __init int cpu_has_kvm_support(void)
1056 {
1057         return cpu_has_vmx();
1058 }
1059
1060 static __init int vmx_disabled_by_bios(void)
1061 {
1062         u64 msr;
1063
1064         rdmsrl(MSR_IA32_FEATURE_CONTROL, msr);
1065         return (msr & (FEATURE_CONTROL_LOCKED |
1066                        FEATURE_CONTROL_VMXON_ENABLED))
1067             == FEATURE_CONTROL_LOCKED;
1068         /* locked but not enabled */
1069 }
1070
1071 static void hardware_enable(void *garbage)
1072 {
1073         int cpu = raw_smp_processor_id();
1074         u64 phys_addr = __pa(per_cpu(vmxarea, cpu));
1075         u64 old;
1076
1077         INIT_LIST_HEAD(&per_cpu(vcpus_on_cpu, cpu));
1078         rdmsrl(MSR_IA32_FEATURE_CONTROL, old);
1079         if ((old & (FEATURE_CONTROL_LOCKED |
1080                     FEATURE_CONTROL_VMXON_ENABLED))
1081             != (FEATURE_CONTROL_LOCKED |
1082                 FEATURE_CONTROL_VMXON_ENABLED))
1083                 /* enable and lock */
1084                 wrmsrl(MSR_IA32_FEATURE_CONTROL, old |
1085                        FEATURE_CONTROL_LOCKED |
1086                        FEATURE_CONTROL_VMXON_ENABLED);
1087         write_cr4(read_cr4() | X86_CR4_VMXE); /* FIXME: not cpu hotplug safe */
1088         asm volatile (ASM_VMX_VMXON_RAX
1089                       : : "a"(&phys_addr), "m"(phys_addr)
1090                       : "memory", "cc");
1091 }
1092
1093 static void vmclear_local_vcpus(void)
1094 {
1095         int cpu = raw_smp_processor_id();
1096         struct vcpu_vmx *vmx, *n;
1097
1098         list_for_each_entry_safe(vmx, n, &per_cpu(vcpus_on_cpu, cpu),
1099                                  local_vcpus_link)
1100                 __vcpu_clear(vmx);
1101 }
1102
1103
1104 /* Just like cpu_vmxoff(), but with the __kvm_handle_fault_on_reboot()
1105  * tricks.
1106  */
1107 static void kvm_cpu_vmxoff(void)
1108 {
1109         asm volatile (__ex(ASM_VMX_VMXOFF) : : : "cc");
1110         write_cr4(read_cr4() & ~X86_CR4_VMXE);
1111 }
1112
1113 static void hardware_disable(void *garbage)
1114 {
1115         vmclear_local_vcpus();
1116         kvm_cpu_vmxoff();
1117 }
1118
1119 static __init int adjust_vmx_controls(u32 ctl_min, u32 ctl_opt,
1120                                       u32 msr, u32 *result)
1121 {
1122         u32 vmx_msr_low, vmx_msr_high;
1123         u32 ctl = ctl_min | ctl_opt;
1124
1125         rdmsr(msr, vmx_msr_low, vmx_msr_high);
1126
1127         ctl &= vmx_msr_high; /* bit == 0 in high word ==> must be zero */
1128         ctl |= vmx_msr_low;  /* bit == 1 in low word  ==> must be one  */
1129
1130         /* Ensure minimum (required) set of control bits are supported. */
1131         if (ctl_min & ~ctl)
1132                 return -EIO;
1133
1134         *result = ctl;
1135         return 0;
1136 }
1137
1138 static __init int setup_vmcs_config(struct vmcs_config *vmcs_conf)
1139 {
1140         u32 vmx_msr_low, vmx_msr_high;
1141         u32 min, opt, min2, opt2;
1142         u32 _pin_based_exec_control = 0;
1143         u32 _cpu_based_exec_control = 0;
1144         u32 _cpu_based_2nd_exec_control = 0;
1145         u32 _vmexit_control = 0;
1146         u32 _vmentry_control = 0;
1147
1148         min = PIN_BASED_EXT_INTR_MASK | PIN_BASED_NMI_EXITING;
1149         opt = PIN_BASED_VIRTUAL_NMIS;
1150         if (adjust_vmx_controls(min, opt, MSR_IA32_VMX_PINBASED_CTLS,
1151                                 &_pin_based_exec_control) < 0)
1152                 return -EIO;
1153
1154         min = CPU_BASED_HLT_EXITING |
1155 #ifdef CONFIG_X86_64
1156               CPU_BASED_CR8_LOAD_EXITING |
1157               CPU_BASED_CR8_STORE_EXITING |
1158 #endif
1159               CPU_BASED_CR3_LOAD_EXITING |
1160               CPU_BASED_CR3_STORE_EXITING |
1161               CPU_BASED_USE_IO_BITMAPS |
1162               CPU_BASED_MOV_DR_EXITING |
1163               CPU_BASED_USE_TSC_OFFSETING |
1164               CPU_BASED_INVLPG_EXITING;
1165         opt = CPU_BASED_TPR_SHADOW |
1166               CPU_BASED_USE_MSR_BITMAPS |
1167               CPU_BASED_ACTIVATE_SECONDARY_CONTROLS;
1168         if (adjust_vmx_controls(min, opt, MSR_IA32_VMX_PROCBASED_CTLS,
1169                                 &_cpu_based_exec_control) < 0)
1170                 return -EIO;
1171 #ifdef CONFIG_X86_64
1172         if ((_cpu_based_exec_control & CPU_BASED_TPR_SHADOW))
1173                 _cpu_based_exec_control &= ~CPU_BASED_CR8_LOAD_EXITING &
1174                                            ~CPU_BASED_CR8_STORE_EXITING;
1175 #endif
1176         if (_cpu_based_exec_control & CPU_BASED_ACTIVATE_SECONDARY_CONTROLS) {
1177                 min2 = 0;
1178                 opt2 = SECONDARY_EXEC_VIRTUALIZE_APIC_ACCESSES |
1179                         SECONDARY_EXEC_WBINVD_EXITING |
1180                         SECONDARY_EXEC_ENABLE_VPID |
1181                         SECONDARY_EXEC_ENABLE_EPT;
1182                 if (adjust_vmx_controls(min2, opt2,
1183                                         MSR_IA32_VMX_PROCBASED_CTLS2,
1184                                         &_cpu_based_2nd_exec_control) < 0)
1185                         return -EIO;
1186         }
1187 #ifndef CONFIG_X86_64
1188         if (!(_cpu_based_2nd_exec_control &
1189                                 SECONDARY_EXEC_VIRTUALIZE_APIC_ACCESSES))
1190                 _cpu_based_exec_control &= ~CPU_BASED_TPR_SHADOW;
1191 #endif
1192         if (_cpu_based_2nd_exec_control & SECONDARY_EXEC_ENABLE_EPT) {
1193                 /* CR3 accesses and invlpg don't need to cause VM Exits when EPT
1194                    enabled */
1195                 min &= ~(CPU_BASED_CR3_LOAD_EXITING |
1196                          CPU_BASED_CR3_STORE_EXITING |
1197                          CPU_BASED_INVLPG_EXITING);
1198                 if (adjust_vmx_controls(min, opt, MSR_IA32_VMX_PROCBASED_CTLS,
1199                                         &_cpu_based_exec_control) < 0)
1200                         return -EIO;
1201                 rdmsr(MSR_IA32_VMX_EPT_VPID_CAP,
1202                       vmx_capability.ept, vmx_capability.vpid);
1203         }
1204
1205         if (!cpu_has_vmx_vpid())
1206                 enable_vpid = 0;
1207
1208         if (!cpu_has_vmx_ept())
1209                 enable_ept = 0;
1210
1211         min = 0;
1212 #ifdef CONFIG_X86_64
1213         min |= VM_EXIT_HOST_ADDR_SPACE_SIZE;
1214 #endif
1215         opt = VM_EXIT_SAVE_IA32_PAT | VM_EXIT_LOAD_IA32_PAT;
1216         if (adjust_vmx_controls(min, opt, MSR_IA32_VMX_EXIT_CTLS,
1217                                 &_vmexit_control) < 0)
1218                 return -EIO;
1219
1220         min = 0;
1221         opt = VM_ENTRY_LOAD_IA32_PAT;
1222         if (adjust_vmx_controls(min, opt, MSR_IA32_VMX_ENTRY_CTLS,
1223                                 &_vmentry_control) < 0)
1224                 return -EIO;
1225
1226         rdmsr(MSR_IA32_VMX_BASIC, vmx_msr_low, vmx_msr_high);
1227
1228         /* IA-32 SDM Vol 3B: VMCS size is never greater than 4kB. */
1229         if ((vmx_msr_high & 0x1fff) > PAGE_SIZE)
1230                 return -EIO;
1231
1232 #ifdef CONFIG_X86_64
1233         /* IA-32 SDM Vol 3B: 64-bit CPUs always have VMX_BASIC_MSR[48]==0. */
1234         if (vmx_msr_high & (1u<<16))
1235                 return -EIO;
1236 #endif
1237
1238         /* Require Write-Back (WB) memory type for VMCS accesses. */
1239         if (((vmx_msr_high >> 18) & 15) != 6)
1240                 return -EIO;
1241
1242         vmcs_conf->size = vmx_msr_high & 0x1fff;
1243         vmcs_conf->order = get_order(vmcs_config.size);
1244         vmcs_conf->revision_id = vmx_msr_low;
1245
1246         vmcs_conf->pin_based_exec_ctrl = _pin_based_exec_control;
1247         vmcs_conf->cpu_based_exec_ctrl = _cpu_based_exec_control;
1248         vmcs_conf->cpu_based_2nd_exec_ctrl = _cpu_based_2nd_exec_control;
1249         vmcs_conf->vmexit_ctrl         = _vmexit_control;
1250         vmcs_conf->vmentry_ctrl        = _vmentry_control;
1251
1252         return 0;
1253 }
1254
1255 static struct vmcs *alloc_vmcs_cpu(int cpu)
1256 {
1257         int node = cpu_to_node(cpu);
1258         struct page *pages;
1259         struct vmcs *vmcs;
1260
1261         pages = alloc_pages_node(node, GFP_KERNEL, vmcs_config.order);
1262         if (!pages)
1263                 return NULL;
1264         vmcs = page_address(pages);
1265         memset(vmcs, 0, vmcs_config.size);
1266         vmcs->revision_id = vmcs_config.revision_id; /* vmcs revision id */
1267         return vmcs;
1268 }
1269
1270 static struct vmcs *alloc_vmcs(void)
1271 {
1272         return alloc_vmcs_cpu(raw_smp_processor_id());
1273 }
1274
1275 static void free_vmcs(struct vmcs *vmcs)
1276 {
1277         free_pages((unsigned long)vmcs, vmcs_config.order);
1278 }
1279
1280 static void free_kvm_area(void)
1281 {
1282         int cpu;
1283
1284         for_each_online_cpu(cpu)
1285                 free_vmcs(per_cpu(vmxarea, cpu));
1286 }
1287
1288 static __init int alloc_kvm_area(void)
1289 {
1290         int cpu;
1291
1292         for_each_online_cpu(cpu) {
1293                 struct vmcs *vmcs;
1294
1295                 vmcs = alloc_vmcs_cpu(cpu);
1296                 if (!vmcs) {
1297                         free_kvm_area();
1298                         return -ENOMEM;
1299                 }
1300
1301                 per_cpu(vmxarea, cpu) = vmcs;
1302         }
1303         return 0;
1304 }
1305
1306 static __init int hardware_setup(void)
1307 {
1308         if (setup_vmcs_config(&vmcs_config) < 0)
1309                 return -EIO;
1310
1311         if (boot_cpu_has(X86_FEATURE_NX))
1312                 kvm_enable_efer_bits(EFER_NX);
1313
1314         return alloc_kvm_area();
1315 }
1316
1317 static __exit void hardware_unsetup(void)
1318 {
1319         free_kvm_area();
1320 }
1321
1322 static void fix_pmode_dataseg(int seg, struct kvm_save_segment *save)
1323 {
1324         struct kvm_vmx_segment_field *sf = &kvm_vmx_segment_fields[seg];
1325
1326         if (vmcs_readl(sf->base) == save->base && (save->base & AR_S_MASK)) {
1327                 vmcs_write16(sf->selector, save->selector);
1328                 vmcs_writel(sf->base, save->base);
1329                 vmcs_write32(sf->limit, save->limit);
1330                 vmcs_write32(sf->ar_bytes, save->ar);
1331         } else {
1332                 u32 dpl = (vmcs_read16(sf->selector) & SELECTOR_RPL_MASK)
1333                         << AR_DPL_SHIFT;
1334                 vmcs_write32(sf->ar_bytes, 0x93 | dpl);
1335         }
1336 }
1337
1338 static void enter_pmode(struct kvm_vcpu *vcpu)
1339 {
1340         unsigned long flags;
1341         struct vcpu_vmx *vmx = to_vmx(vcpu);
1342
1343         vmx->emulation_required = 1;
1344         vcpu->arch.rmode.active = 0;
1345
1346         vmcs_writel(GUEST_TR_BASE, vcpu->arch.rmode.tr.base);
1347         vmcs_write32(GUEST_TR_LIMIT, vcpu->arch.rmode.tr.limit);
1348         vmcs_write32(GUEST_TR_AR_BYTES, vcpu->arch.rmode.tr.ar);
1349
1350         flags = vmcs_readl(GUEST_RFLAGS);
1351         flags &= ~(X86_EFLAGS_IOPL | X86_EFLAGS_VM);
1352         flags |= (vcpu->arch.rmode.save_iopl << IOPL_SHIFT);
1353         vmcs_writel(GUEST_RFLAGS, flags);
1354
1355         vmcs_writel(GUEST_CR4, (vmcs_readl(GUEST_CR4) & ~X86_CR4_VME) |
1356                         (vmcs_readl(CR4_READ_SHADOW) & X86_CR4_VME));
1357
1358         update_exception_bitmap(vcpu);
1359
1360         if (emulate_invalid_guest_state)
1361                 return;
1362
1363         fix_pmode_dataseg(VCPU_SREG_ES, &vcpu->arch.rmode.es);
1364         fix_pmode_dataseg(VCPU_SREG_DS, &vcpu->arch.rmode.ds);
1365         fix_pmode_dataseg(VCPU_SREG_GS, &vcpu->arch.rmode.gs);
1366         fix_pmode_dataseg(VCPU_SREG_FS, &vcpu->arch.rmode.fs);
1367
1368         vmcs_write16(GUEST_SS_SELECTOR, 0);
1369         vmcs_write32(GUEST_SS_AR_BYTES, 0x93);
1370
1371         vmcs_write16(GUEST_CS_SELECTOR,
1372                      vmcs_read16(GUEST_CS_SELECTOR) & ~SELECTOR_RPL_MASK);
1373         vmcs_write32(GUEST_CS_AR_BYTES, 0x9b);
1374 }
1375
1376 static gva_t rmode_tss_base(struct kvm *kvm)
1377 {
1378         if (!kvm->arch.tss_addr) {
1379                 gfn_t base_gfn = kvm->memslots[0].base_gfn +
1380                                  kvm->memslots[0].npages - 3;
1381                 return base_gfn << PAGE_SHIFT;
1382         }
1383         return kvm->arch.tss_addr;
1384 }
1385
1386 static void fix_rmode_seg(int seg, struct kvm_save_segment *save)
1387 {
1388         struct kvm_vmx_segment_field *sf = &kvm_vmx_segment_fields[seg];
1389
1390         save->selector = vmcs_read16(sf->selector);
1391         save->base = vmcs_readl(sf->base);
1392         save->limit = vmcs_read32(sf->limit);
1393         save->ar = vmcs_read32(sf->ar_bytes);
1394         vmcs_write16(sf->selector, save->base >> 4);
1395         vmcs_write32(sf->base, save->base & 0xfffff);
1396         vmcs_write32(sf->limit, 0xffff);
1397         vmcs_write32(sf->ar_bytes, 0xf3);
1398 }
1399
1400 static void enter_rmode(struct kvm_vcpu *vcpu)
1401 {
1402         unsigned long flags;
1403         struct vcpu_vmx *vmx = to_vmx(vcpu);
1404
1405         vmx->emulation_required = 1;
1406         vcpu->arch.rmode.active = 1;
1407
1408         vcpu->arch.rmode.tr.base = vmcs_readl(GUEST_TR_BASE);
1409         vmcs_writel(GUEST_TR_BASE, rmode_tss_base(vcpu->kvm));
1410
1411         vcpu->arch.rmode.tr.limit = vmcs_read32(GUEST_TR_LIMIT);
1412         vmcs_write32(GUEST_TR_LIMIT, RMODE_TSS_SIZE - 1);
1413
1414         vcpu->arch.rmode.tr.ar = vmcs_read32(GUEST_TR_AR_BYTES);
1415         vmcs_write32(GUEST_TR_AR_BYTES, 0x008b);
1416
1417         flags = vmcs_readl(GUEST_RFLAGS);
1418         vcpu->arch.rmode.save_iopl
1419                 = (flags & X86_EFLAGS_IOPL) >> IOPL_SHIFT;
1420
1421         flags |= X86_EFLAGS_IOPL | X86_EFLAGS_VM;
1422
1423         vmcs_writel(GUEST_RFLAGS, flags);
1424         vmcs_writel(GUEST_CR4, vmcs_readl(GUEST_CR4) | X86_CR4_VME);
1425         update_exception_bitmap(vcpu);
1426
1427         if (emulate_invalid_guest_state)
1428                 goto continue_rmode;
1429
1430         vmcs_write16(GUEST_SS_SELECTOR, vmcs_readl(GUEST_SS_BASE) >> 4);
1431         vmcs_write32(GUEST_SS_LIMIT, 0xffff);
1432         vmcs_write32(GUEST_SS_AR_BYTES, 0xf3);
1433
1434         vmcs_write32(GUEST_CS_AR_BYTES, 0xf3);
1435         vmcs_write32(GUEST_CS_LIMIT, 0xffff);
1436         if (vmcs_readl(GUEST_CS_BASE) == 0xffff0000)
1437                 vmcs_writel(GUEST_CS_BASE, 0xf0000);
1438         vmcs_write16(GUEST_CS_SELECTOR, vmcs_readl(GUEST_CS_BASE) >> 4);
1439
1440         fix_rmode_seg(VCPU_SREG_ES, &vcpu->arch.rmode.es);
1441         fix_rmode_seg(VCPU_SREG_DS, &vcpu->arch.rmode.ds);
1442         fix_rmode_seg(VCPU_SREG_GS, &vcpu->arch.rmode.gs);
1443         fix_rmode_seg(VCPU_SREG_FS, &vcpu->arch.rmode.fs);
1444
1445 continue_rmode:
1446         kvm_mmu_reset_context(vcpu);
1447         init_rmode(vcpu->kvm);
1448 }
1449
1450 static void vmx_set_efer(struct kvm_vcpu *vcpu, u64 efer)
1451 {
1452         struct vcpu_vmx *vmx = to_vmx(vcpu);
1453         struct kvm_msr_entry *msr = find_msr_entry(vmx, MSR_EFER);
1454
1455         vcpu->arch.shadow_efer = efer;
1456         if (!msr)
1457                 return;
1458         if (efer & EFER_LMA) {
1459                 vmcs_write32(VM_ENTRY_CONTROLS,
1460                              vmcs_read32(VM_ENTRY_CONTROLS) |
1461                              VM_ENTRY_IA32E_MODE);
1462                 msr->data = efer;
1463         } else {
1464                 vmcs_write32(VM_ENTRY_CONTROLS,
1465                              vmcs_read32(VM_ENTRY_CONTROLS) &
1466                              ~VM_ENTRY_IA32E_MODE);
1467
1468                 msr->data = efer & ~EFER_LME;
1469         }
1470         setup_msrs(vmx);
1471 }
1472
1473 #ifdef CONFIG_X86_64
1474
1475 static void enter_lmode(struct kvm_vcpu *vcpu)
1476 {
1477         u32 guest_tr_ar;
1478
1479         guest_tr_ar = vmcs_read32(GUEST_TR_AR_BYTES);
1480         if ((guest_tr_ar & AR_TYPE_MASK) != AR_TYPE_BUSY_64_TSS) {
1481                 printk(KERN_DEBUG "%s: tss fixup for long mode. \n",
1482                        __func__);
1483                 vmcs_write32(GUEST_TR_AR_BYTES,
1484                              (guest_tr_ar & ~AR_TYPE_MASK)
1485                              | AR_TYPE_BUSY_64_TSS);
1486         }
1487         vcpu->arch.shadow_efer |= EFER_LMA;
1488         vmx_set_efer(vcpu, vcpu->arch.shadow_efer);
1489 }
1490
1491 static void exit_lmode(struct kvm_vcpu *vcpu)
1492 {
1493         vcpu->arch.shadow_efer &= ~EFER_LMA;
1494
1495         vmcs_write32(VM_ENTRY_CONTROLS,
1496                      vmcs_read32(VM_ENTRY_CONTROLS)
1497                      & ~VM_ENTRY_IA32E_MODE);
1498 }
1499
1500 #endif
1501
1502 static void vmx_flush_tlb(struct kvm_vcpu *vcpu)
1503 {
1504         vpid_sync_vcpu_all(to_vmx(vcpu));
1505         if (vm_need_ept())
1506                 ept_sync_context(construct_eptp(vcpu->arch.mmu.root_hpa));
1507 }
1508
1509 static void vmx_decache_cr4_guest_bits(struct kvm_vcpu *vcpu)
1510 {
1511         vcpu->arch.cr4 &= KVM_GUEST_CR4_MASK;
1512         vcpu->arch.cr4 |= vmcs_readl(GUEST_CR4) & ~KVM_GUEST_CR4_MASK;
1513 }
1514
1515 static void ept_load_pdptrs(struct kvm_vcpu *vcpu)
1516 {
1517         if (is_paging(vcpu) && is_pae(vcpu) && !is_long_mode(vcpu)) {
1518                 if (!load_pdptrs(vcpu, vcpu->arch.cr3)) {
1519                         printk(KERN_ERR "EPT: Fail to load pdptrs!\n");
1520                         return;
1521                 }
1522                 vmcs_write64(GUEST_PDPTR0, vcpu->arch.pdptrs[0]);
1523                 vmcs_write64(GUEST_PDPTR1, vcpu->arch.pdptrs[1]);
1524                 vmcs_write64(GUEST_PDPTR2, vcpu->arch.pdptrs[2]);
1525                 vmcs_write64(GUEST_PDPTR3, vcpu->arch.pdptrs[3]);
1526         }
1527 }
1528
1529 static void vmx_set_cr4(struct kvm_vcpu *vcpu, unsigned long cr4);
1530
1531 static void ept_update_paging_mode_cr0(unsigned long *hw_cr0,
1532                                         unsigned long cr0,
1533                                         struct kvm_vcpu *vcpu)
1534 {
1535         if (!(cr0 & X86_CR0_PG)) {
1536                 /* From paging/starting to nonpaging */
1537                 vmcs_write32(CPU_BASED_VM_EXEC_CONTROL,
1538                              vmcs_read32(CPU_BASED_VM_EXEC_CONTROL) |
1539                              (CPU_BASED_CR3_LOAD_EXITING |
1540                               CPU_BASED_CR3_STORE_EXITING));
1541                 vcpu->arch.cr0 = cr0;
1542                 vmx_set_cr4(vcpu, vcpu->arch.cr4);
1543                 *hw_cr0 |= X86_CR0_PE | X86_CR0_PG;
1544                 *hw_cr0 &= ~X86_CR0_WP;
1545         } else if (!is_paging(vcpu)) {
1546                 /* From nonpaging to paging */
1547                 vmcs_write32(CPU_BASED_VM_EXEC_CONTROL,
1548                              vmcs_read32(CPU_BASED_VM_EXEC_CONTROL) &
1549                              ~(CPU_BASED_CR3_LOAD_EXITING |
1550                                CPU_BASED_CR3_STORE_EXITING));
1551                 vcpu->arch.cr0 = cr0;
1552                 vmx_set_cr4(vcpu, vcpu->arch.cr4);
1553                 if (!(vcpu->arch.cr0 & X86_CR0_WP))
1554                         *hw_cr0 &= ~X86_CR0_WP;
1555         }
1556 }
1557
1558 static void ept_update_paging_mode_cr4(unsigned long *hw_cr4,
1559                                         struct kvm_vcpu *vcpu)
1560 {
1561         if (!is_paging(vcpu)) {
1562                 *hw_cr4 &= ~X86_CR4_PAE;
1563                 *hw_cr4 |= X86_CR4_PSE;
1564         } else if (!(vcpu->arch.cr4 & X86_CR4_PAE))
1565                 *hw_cr4 &= ~X86_CR4_PAE;
1566 }
1567
1568 static void vmx_set_cr0(struct kvm_vcpu *vcpu, unsigned long cr0)
1569 {
1570         unsigned long hw_cr0 = (cr0 & ~KVM_GUEST_CR0_MASK) |
1571                                 KVM_VM_CR0_ALWAYS_ON;
1572
1573         vmx_fpu_deactivate(vcpu);
1574
1575         if (vcpu->arch.rmode.active && (cr0 & X86_CR0_PE))
1576                 enter_pmode(vcpu);
1577
1578         if (!vcpu->arch.rmode.active && !(cr0 & X86_CR0_PE))
1579                 enter_rmode(vcpu);
1580
1581 #ifdef CONFIG_X86_64
1582         if (vcpu->arch.shadow_efer & EFER_LME) {
1583                 if (!is_paging(vcpu) && (cr0 & X86_CR0_PG))
1584                         enter_lmode(vcpu);
1585                 if (is_paging(vcpu) && !(cr0 & X86_CR0_PG))
1586                         exit_lmode(vcpu);
1587         }
1588 #endif
1589
1590         if (vm_need_ept())
1591                 ept_update_paging_mode_cr0(&hw_cr0, cr0, vcpu);
1592
1593         vmcs_writel(CR0_READ_SHADOW, cr0);
1594         vmcs_writel(GUEST_CR0, hw_cr0);
1595         vcpu->arch.cr0 = cr0;
1596
1597         if (!(cr0 & X86_CR0_TS) || !(cr0 & X86_CR0_PE))
1598                 vmx_fpu_activate(vcpu);
1599 }
1600
1601 static u64 construct_eptp(unsigned long root_hpa)
1602 {
1603         u64 eptp;
1604
1605         /* TODO write the value reading from MSR */
1606         eptp = VMX_EPT_DEFAULT_MT |
1607                 VMX_EPT_DEFAULT_GAW << VMX_EPT_GAW_EPTP_SHIFT;
1608         eptp |= (root_hpa & PAGE_MASK);
1609
1610         return eptp;
1611 }
1612
1613 static void vmx_set_cr3(struct kvm_vcpu *vcpu, unsigned long cr3)
1614 {
1615         unsigned long guest_cr3;
1616         u64 eptp;
1617
1618         guest_cr3 = cr3;
1619         if (vm_need_ept()) {
1620                 eptp = construct_eptp(cr3);
1621                 vmcs_write64(EPT_POINTER, eptp);
1622                 ept_sync_context(eptp);
1623                 ept_load_pdptrs(vcpu);
1624                 guest_cr3 = is_paging(vcpu) ? vcpu->arch.cr3 :
1625                         VMX_EPT_IDENTITY_PAGETABLE_ADDR;
1626         }
1627
1628         vmx_flush_tlb(vcpu);
1629         vmcs_writel(GUEST_CR3, guest_cr3);
1630         if (vcpu->arch.cr0 & X86_CR0_PE)
1631                 vmx_fpu_deactivate(vcpu);
1632 }
1633
1634 static void vmx_set_cr4(struct kvm_vcpu *vcpu, unsigned long cr4)
1635 {
1636         unsigned long hw_cr4 = cr4 | (vcpu->arch.rmode.active ?
1637                     KVM_RMODE_VM_CR4_ALWAYS_ON : KVM_PMODE_VM_CR4_ALWAYS_ON);
1638
1639         vcpu->arch.cr4 = cr4;
1640         if (vm_need_ept())
1641                 ept_update_paging_mode_cr4(&hw_cr4, vcpu);
1642
1643         vmcs_writel(CR4_READ_SHADOW, cr4);
1644         vmcs_writel(GUEST_CR4, hw_cr4);
1645 }
1646
1647 static u64 vmx_get_segment_base(struct kvm_vcpu *vcpu, int seg)
1648 {
1649         struct kvm_vmx_segment_field *sf = &kvm_vmx_segment_fields[seg];
1650
1651         return vmcs_readl(sf->base);
1652 }
1653
1654 static void vmx_get_segment(struct kvm_vcpu *vcpu,
1655                             struct kvm_segment *var, int seg)
1656 {
1657         struct kvm_vmx_segment_field *sf = &kvm_vmx_segment_fields[seg];
1658         u32 ar;
1659
1660         var->base = vmcs_readl(sf->base);
1661         var->limit = vmcs_read32(sf->limit);
1662         var->selector = vmcs_read16(sf->selector);
1663         ar = vmcs_read32(sf->ar_bytes);
1664         if ((ar & AR_UNUSABLE_MASK) && !emulate_invalid_guest_state)
1665                 ar = 0;
1666         var->type = ar & 15;
1667         var->s = (ar >> 4) & 1;
1668         var->dpl = (ar >> 5) & 3;
1669         var->present = (ar >> 7) & 1;
1670         var->avl = (ar >> 12) & 1;
1671         var->l = (ar >> 13) & 1;
1672         var->db = (ar >> 14) & 1;
1673         var->g = (ar >> 15) & 1;
1674         var->unusable = (ar >> 16) & 1;
1675 }
1676
1677 static int vmx_get_cpl(struct kvm_vcpu *vcpu)
1678 {
1679         struct kvm_segment kvm_seg;
1680
1681         if (!(vcpu->arch.cr0 & X86_CR0_PE)) /* if real mode */
1682                 return 0;
1683
1684         if (vmx_get_rflags(vcpu) & X86_EFLAGS_VM) /* if virtual 8086 */
1685                 return 3;
1686
1687         vmx_get_segment(vcpu, &kvm_seg, VCPU_SREG_CS);
1688         return kvm_seg.selector & 3;
1689 }
1690
1691 static u32 vmx_segment_access_rights(struct kvm_segment *var)
1692 {
1693         u32 ar;
1694
1695         if (var->unusable)
1696                 ar = 1 << 16;
1697         else {
1698                 ar = var->type & 15;
1699                 ar |= (var->s & 1) << 4;
1700                 ar |= (var->dpl & 3) << 5;
1701                 ar |= (var->present & 1) << 7;
1702                 ar |= (var->avl & 1) << 12;
1703                 ar |= (var->l & 1) << 13;
1704                 ar |= (var->db & 1) << 14;
1705                 ar |= (var->g & 1) << 15;
1706         }
1707         if (ar == 0) /* a 0 value means unusable */
1708                 ar = AR_UNUSABLE_MASK;
1709
1710         return ar;
1711 }
1712
1713 static void vmx_set_segment(struct kvm_vcpu *vcpu,
1714                             struct kvm_segment *var, int seg)
1715 {
1716         struct kvm_vmx_segment_field *sf = &kvm_vmx_segment_fields[seg];
1717         u32 ar;
1718
1719         if (vcpu->arch.rmode.active && seg == VCPU_SREG_TR) {
1720                 vcpu->arch.rmode.tr.selector = var->selector;
1721                 vcpu->arch.rmode.tr.base = var->base;
1722                 vcpu->arch.rmode.tr.limit = var->limit;
1723                 vcpu->arch.rmode.tr.ar = vmx_segment_access_rights(var);
1724                 return;
1725         }
1726         vmcs_writel(sf->base, var->base);
1727         vmcs_write32(sf->limit, var->limit);
1728         vmcs_write16(sf->selector, var->selector);
1729         if (vcpu->arch.rmode.active && var->s) {
1730                 /*
1731                  * Hack real-mode segments into vm86 compatibility.
1732                  */
1733                 if (var->base == 0xffff0000 && var->selector == 0xf000)
1734                         vmcs_writel(sf->base, 0xf0000);
1735                 ar = 0xf3;
1736         } else
1737                 ar = vmx_segment_access_rights(var);
1738         vmcs_write32(sf->ar_bytes, ar);
1739 }
1740
1741 static void vmx_get_cs_db_l_bits(struct kvm_vcpu *vcpu, int *db, int *l)
1742 {
1743         u32 ar = vmcs_read32(GUEST_CS_AR_BYTES);
1744
1745         *db = (ar >> 14) & 1;
1746         *l = (ar >> 13) & 1;
1747 }
1748
1749 static void vmx_get_idt(struct kvm_vcpu *vcpu, struct descriptor_table *dt)
1750 {
1751         dt->limit = vmcs_read32(GUEST_IDTR_LIMIT);
1752         dt->base = vmcs_readl(GUEST_IDTR_BASE);
1753 }
1754
1755 static void vmx_set_idt(struct kvm_vcpu *vcpu, struct descriptor_table *dt)
1756 {
1757         vmcs_write32(GUEST_IDTR_LIMIT, dt->limit);
1758         vmcs_writel(GUEST_IDTR_BASE, dt->base);
1759 }
1760
1761 static void vmx_get_gdt(struct kvm_vcpu *vcpu, struct descriptor_table *dt)
1762 {
1763         dt->limit = vmcs_read32(GUEST_GDTR_LIMIT);
1764         dt->base = vmcs_readl(GUEST_GDTR_BASE);
1765 }
1766
1767 static void vmx_set_gdt(struct kvm_vcpu *vcpu, struct descriptor_table *dt)
1768 {
1769         vmcs_write32(GUEST_GDTR_LIMIT, dt->limit);
1770         vmcs_writel(GUEST_GDTR_BASE, dt->base);
1771 }
1772
1773 static bool rmode_segment_valid(struct kvm_vcpu *vcpu, int seg)
1774 {
1775         struct kvm_segment var;
1776         u32 ar;
1777
1778         vmx_get_segment(vcpu, &var, seg);
1779         ar = vmx_segment_access_rights(&var);
1780
1781         if (var.base != (var.selector << 4))
1782                 return false;
1783         if (var.limit != 0xffff)
1784                 return false;
1785         if (ar != 0xf3)
1786                 return false;
1787
1788         return true;
1789 }
1790
1791 static bool code_segment_valid(struct kvm_vcpu *vcpu)
1792 {
1793         struct kvm_segment cs;
1794         unsigned int cs_rpl;
1795
1796         vmx_get_segment(vcpu, &cs, VCPU_SREG_CS);
1797         cs_rpl = cs.selector & SELECTOR_RPL_MASK;
1798
1799         if (cs.unusable)
1800                 return false;
1801         if (~cs.type & (AR_TYPE_CODE_MASK|AR_TYPE_ACCESSES_MASK))
1802                 return false;
1803         if (!cs.s)
1804                 return false;
1805         if (cs.type & AR_TYPE_WRITEABLE_MASK) {
1806                 if (cs.dpl > cs_rpl)
1807                         return false;
1808         } else {
1809                 if (cs.dpl != cs_rpl)
1810                         return false;
1811         }
1812         if (!cs.present)
1813                 return false;
1814
1815         /* TODO: Add Reserved field check, this'll require a new member in the kvm_segment_field structure */
1816         return true;
1817 }
1818
1819 static bool stack_segment_valid(struct kvm_vcpu *vcpu)
1820 {
1821         struct kvm_segment ss;
1822         unsigned int ss_rpl;
1823
1824         vmx_get_segment(vcpu, &ss, VCPU_SREG_SS);
1825         ss_rpl = ss.selector & SELECTOR_RPL_MASK;
1826
1827         if (ss.unusable)
1828                 return true;
1829         if (ss.type != 3 && ss.type != 7)
1830                 return false;
1831         if (!ss.s)
1832                 return false;
1833         if (ss.dpl != ss_rpl) /* DPL != RPL */
1834                 return false;
1835         if (!ss.present)
1836                 return false;
1837
1838         return true;
1839 }
1840
1841 static bool data_segment_valid(struct kvm_vcpu *vcpu, int seg)
1842 {
1843         struct kvm_segment var;
1844         unsigned int rpl;
1845
1846         vmx_get_segment(vcpu, &var, seg);
1847         rpl = var.selector & SELECTOR_RPL_MASK;
1848
1849         if (var.unusable)
1850                 return true;
1851         if (!var.s)
1852                 return false;
1853         if (!var.present)
1854                 return false;
1855         if (~var.type & (AR_TYPE_CODE_MASK|AR_TYPE_WRITEABLE_MASK)) {
1856                 if (var.dpl < rpl) /* DPL < RPL */
1857                         return false;
1858         }
1859
1860         /* TODO: Add other members to kvm_segment_field to allow checking for other access
1861          * rights flags
1862          */
1863         return true;
1864 }
1865
1866 static bool tr_valid(struct kvm_vcpu *vcpu)
1867 {
1868         struct kvm_segment tr;
1869
1870         vmx_get_segment(vcpu, &tr, VCPU_SREG_TR);
1871
1872         if (tr.unusable)
1873                 return false;
1874         if (tr.selector & SELECTOR_TI_MASK)     /* TI = 1 */
1875                 return false;
1876         if (tr.type != 3 && tr.type != 11) /* TODO: Check if guest is in IA32e mode */
1877                 return false;
1878         if (!tr.present)
1879                 return false;
1880
1881         return true;
1882 }
1883
1884 static bool ldtr_valid(struct kvm_vcpu *vcpu)
1885 {
1886         struct kvm_segment ldtr;
1887
1888         vmx_get_segment(vcpu, &ldtr, VCPU_SREG_LDTR);
1889
1890         if (ldtr.unusable)
1891                 return true;
1892         if (ldtr.selector & SELECTOR_TI_MASK)   /* TI = 1 */
1893                 return false;
1894         if (ldtr.type != 2)
1895                 return false;
1896         if (!ldtr.present)
1897                 return false;
1898
1899         return true;
1900 }
1901
1902 static bool cs_ss_rpl_check(struct kvm_vcpu *vcpu)
1903 {
1904         struct kvm_segment cs, ss;
1905
1906         vmx_get_segment(vcpu, &cs, VCPU_SREG_CS);
1907         vmx_get_segment(vcpu, &ss, VCPU_SREG_SS);
1908
1909         return ((cs.selector & SELECTOR_RPL_MASK) ==
1910                  (ss.selector & SELECTOR_RPL_MASK));
1911 }
1912
1913 /*
1914  * Check if guest state is valid. Returns true if valid, false if
1915  * not.
1916  * We assume that registers are always usable
1917  */
1918 static bool guest_state_valid(struct kvm_vcpu *vcpu)
1919 {
1920         /* real mode guest state checks */
1921         if (!(vcpu->arch.cr0 & X86_CR0_PE)) {
1922                 if (!rmode_segment_valid(vcpu, VCPU_SREG_CS))
1923                         return false;
1924                 if (!rmode_segment_valid(vcpu, VCPU_SREG_SS))
1925                         return false;
1926                 if (!rmode_segment_valid(vcpu, VCPU_SREG_DS))
1927                         return false;
1928                 if (!rmode_segment_valid(vcpu, VCPU_SREG_ES))
1929                         return false;
1930                 if (!rmode_segment_valid(vcpu, VCPU_SREG_FS))
1931                         return false;
1932                 if (!rmode_segment_valid(vcpu, VCPU_SREG_GS))
1933                         return false;
1934         } else {
1935         /* protected mode guest state checks */
1936                 if (!cs_ss_rpl_check(vcpu))
1937                         return false;
1938                 if (!code_segment_valid(vcpu))
1939                         return false;
1940                 if (!stack_segment_valid(vcpu))
1941                         return false;
1942                 if (!data_segment_valid(vcpu, VCPU_SREG_DS))
1943                         return false;
1944                 if (!data_segment_valid(vcpu, VCPU_SREG_ES))
1945                         return false;
1946                 if (!data_segment_valid(vcpu, VCPU_SREG_FS))
1947                         return false;
1948                 if (!data_segment_valid(vcpu, VCPU_SREG_GS))
1949                         return false;
1950                 if (!tr_valid(vcpu))
1951                         return false;
1952                 if (!ldtr_valid(vcpu))
1953                         return false;
1954         }
1955         /* TODO:
1956          * - Add checks on RIP
1957          * - Add checks on RFLAGS
1958          */
1959
1960         return true;
1961 }
1962
1963 static int init_rmode_tss(struct kvm *kvm)
1964 {
1965         gfn_t fn = rmode_tss_base(kvm) >> PAGE_SHIFT;
1966         u16 data = 0;
1967         int ret = 0;
1968         int r;
1969
1970         r = kvm_clear_guest_page(kvm, fn, 0, PAGE_SIZE);
1971         if (r < 0)
1972                 goto out;
1973         data = TSS_BASE_SIZE + TSS_REDIRECTION_SIZE;
1974         r = kvm_write_guest_page(kvm, fn++, &data,
1975                         TSS_IOPB_BASE_OFFSET, sizeof(u16));
1976         if (r < 0)
1977                 goto out;
1978         r = kvm_clear_guest_page(kvm, fn++, 0, PAGE_SIZE);
1979         if (r < 0)
1980                 goto out;
1981         r = kvm_clear_guest_page(kvm, fn, 0, PAGE_SIZE);
1982         if (r < 0)
1983                 goto out;
1984         data = ~0;
1985         r = kvm_write_guest_page(kvm, fn, &data,
1986                                  RMODE_TSS_SIZE - 2 * PAGE_SIZE - 1,
1987                                  sizeof(u8));
1988         if (r < 0)
1989                 goto out;
1990
1991         ret = 1;
1992 out:
1993         return ret;
1994 }
1995
1996 static int init_rmode_identity_map(struct kvm *kvm)
1997 {
1998         int i, r, ret;
1999         pfn_t identity_map_pfn;
2000         u32 tmp;
2001
2002         if (!vm_need_ept())
2003                 return 1;
2004         if (unlikely(!kvm->arch.ept_identity_pagetable)) {
2005                 printk(KERN_ERR "EPT: identity-mapping pagetable "
2006                         "haven't been allocated!\n");
2007                 return 0;
2008         }
2009         if (likely(kvm->arch.ept_identity_pagetable_done))
2010                 return 1;
2011         ret = 0;
2012         identity_map_pfn = VMX_EPT_IDENTITY_PAGETABLE_ADDR >> PAGE_SHIFT;
2013         r = kvm_clear_guest_page(kvm, identity_map_pfn, 0, PAGE_SIZE);
2014         if (r < 0)
2015                 goto out;
2016         /* Set up identity-mapping pagetable for EPT in real mode */
2017         for (i = 0; i < PT32_ENT_PER_PAGE; i++) {
2018                 tmp = (i << 22) + (_PAGE_PRESENT | _PAGE_RW | _PAGE_USER |
2019                         _PAGE_ACCESSED | _PAGE_DIRTY | _PAGE_PSE);
2020                 r = kvm_write_guest_page(kvm, identity_map_pfn,
2021                                 &tmp, i * sizeof(tmp), sizeof(tmp));
2022                 if (r < 0)
2023                         goto out;
2024         }
2025         kvm->arch.ept_identity_pagetable_done = true;
2026         ret = 1;
2027 out:
2028         return ret;
2029 }
2030
2031 static void seg_setup(int seg)
2032 {
2033         struct kvm_vmx_segment_field *sf = &kvm_vmx_segment_fields[seg];
2034
2035         vmcs_write16(sf->selector, 0);
2036         vmcs_writel(sf->base, 0);
2037         vmcs_write32(sf->limit, 0xffff);
2038         vmcs_write32(sf->ar_bytes, 0xf3);
2039 }
2040
2041 static int alloc_apic_access_page(struct kvm *kvm)
2042 {
2043         struct kvm_userspace_memory_region kvm_userspace_mem;
2044         int r = 0;
2045
2046         down_write(&kvm->slots_lock);
2047         if (kvm->arch.apic_access_page)
2048                 goto out;
2049         kvm_userspace_mem.slot = APIC_ACCESS_PAGE_PRIVATE_MEMSLOT;
2050         kvm_userspace_mem.flags = 0;
2051         kvm_userspace_mem.guest_phys_addr = 0xfee00000ULL;
2052         kvm_userspace_mem.memory_size = PAGE_SIZE;
2053         r = __kvm_set_memory_region(kvm, &kvm_userspace_mem, 0);
2054         if (r)
2055                 goto out;
2056
2057         kvm->arch.apic_access_page = gfn_to_page(kvm, 0xfee00);
2058 out:
2059         up_write(&kvm->slots_lock);
2060         return r;
2061 }
2062
2063 static int alloc_identity_pagetable(struct kvm *kvm)
2064 {
2065         struct kvm_userspace_memory_region kvm_userspace_mem;
2066         int r = 0;
2067
2068         down_write(&kvm->slots_lock);
2069         if (kvm->arch.ept_identity_pagetable)
2070                 goto out;
2071         kvm_userspace_mem.slot = IDENTITY_PAGETABLE_PRIVATE_MEMSLOT;
2072         kvm_userspace_mem.flags = 0;
2073         kvm_userspace_mem.guest_phys_addr = VMX_EPT_IDENTITY_PAGETABLE_ADDR;
2074         kvm_userspace_mem.memory_size = PAGE_SIZE;
2075         r = __kvm_set_memory_region(kvm, &kvm_userspace_mem, 0);
2076         if (r)
2077                 goto out;
2078
2079         kvm->arch.ept_identity_pagetable = gfn_to_page(kvm,
2080                         VMX_EPT_IDENTITY_PAGETABLE_ADDR >> PAGE_SHIFT);
2081 out:
2082         up_write(&kvm->slots_lock);
2083         return r;
2084 }
2085
2086 static void allocate_vpid(struct vcpu_vmx *vmx)
2087 {
2088         int vpid;
2089
2090         vmx->vpid = 0;
2091         if (!enable_vpid)
2092                 return;
2093         spin_lock(&vmx_vpid_lock);
2094         vpid = find_first_zero_bit(vmx_vpid_bitmap, VMX_NR_VPIDS);
2095         if (vpid < VMX_NR_VPIDS) {
2096                 vmx->vpid = vpid;
2097                 __set_bit(vpid, vmx_vpid_bitmap);
2098         }
2099         spin_unlock(&vmx_vpid_lock);
2100 }
2101
2102 static void __vmx_disable_intercept_for_msr(unsigned long *msr_bitmap, u32 msr)
2103 {
2104         int f = sizeof(unsigned long);
2105
2106         if (!cpu_has_vmx_msr_bitmap())
2107                 return;
2108
2109         /*
2110          * See Intel PRM Vol. 3, 20.6.9 (MSR-Bitmap Address). Early manuals
2111          * have the write-low and read-high bitmap offsets the wrong way round.
2112          * We can control MSRs 0x00000000-0x00001fff and 0xc0000000-0xc0001fff.
2113          */
2114         if (msr <= 0x1fff) {
2115                 __clear_bit(msr, msr_bitmap + 0x000 / f); /* read-low */
2116                 __clear_bit(msr, msr_bitmap + 0x800 / f); /* write-low */
2117         } else if ((msr >= 0xc0000000) && (msr <= 0xc0001fff)) {
2118                 msr &= 0x1fff;
2119                 __clear_bit(msr, msr_bitmap + 0x400 / f); /* read-high */
2120                 __clear_bit(msr, msr_bitmap + 0xc00 / f); /* write-high */
2121         }
2122 }
2123
2124 static void vmx_disable_intercept_for_msr(u32 msr, bool longmode_only)
2125 {
2126         if (!longmode_only)
2127                 __vmx_disable_intercept_for_msr(vmx_msr_bitmap_legacy, msr);
2128         __vmx_disable_intercept_for_msr(vmx_msr_bitmap_longmode, msr);
2129 }
2130
2131 /*
2132  * Sets up the vmcs for emulated real mode.
2133  */
2134 static int vmx_vcpu_setup(struct vcpu_vmx *vmx)
2135 {
2136         u32 host_sysenter_cs, msr_low, msr_high;
2137         u32 junk;
2138         u64 host_pat, tsc_this, tsc_base;
2139         unsigned long a;
2140         struct descriptor_table dt;
2141         int i;
2142         unsigned long kvm_vmx_return;
2143         u32 exec_control;
2144
2145         /* I/O */
2146         vmcs_write64(IO_BITMAP_A, __pa(vmx_io_bitmap_a));
2147         vmcs_write64(IO_BITMAP_B, __pa(vmx_io_bitmap_b));
2148
2149         if (cpu_has_vmx_msr_bitmap())
2150                 vmcs_write64(MSR_BITMAP, __pa(vmx_msr_bitmap_legacy));
2151
2152         vmcs_write64(VMCS_LINK_POINTER, -1ull); /* 22.3.1.5 */
2153
2154         /* Control */
2155         vmcs_write32(PIN_BASED_VM_EXEC_CONTROL,
2156                 vmcs_config.pin_based_exec_ctrl);
2157
2158         exec_control = vmcs_config.cpu_based_exec_ctrl;
2159         if (!vm_need_tpr_shadow(vmx->vcpu.kvm)) {
2160                 exec_control &= ~CPU_BASED_TPR_SHADOW;
2161 #ifdef CONFIG_X86_64
2162                 exec_control |= CPU_BASED_CR8_STORE_EXITING |
2163                                 CPU_BASED_CR8_LOAD_EXITING;
2164 #endif
2165         }
2166         if (!vm_need_ept())
2167                 exec_control |= CPU_BASED_CR3_STORE_EXITING |
2168                                 CPU_BASED_CR3_LOAD_EXITING  |
2169                                 CPU_BASED_INVLPG_EXITING;
2170         vmcs_write32(CPU_BASED_VM_EXEC_CONTROL, exec_control);
2171
2172         if (cpu_has_secondary_exec_ctrls()) {
2173                 exec_control = vmcs_config.cpu_based_2nd_exec_ctrl;
2174                 if (!vm_need_virtualize_apic_accesses(vmx->vcpu.kvm))
2175                         exec_control &=
2176                                 ~SECONDARY_EXEC_VIRTUALIZE_APIC_ACCESSES;
2177                 if (vmx->vpid == 0)
2178                         exec_control &= ~SECONDARY_EXEC_ENABLE_VPID;
2179                 if (!vm_need_ept())
2180                         exec_control &= ~SECONDARY_EXEC_ENABLE_EPT;
2181                 vmcs_write32(SECONDARY_VM_EXEC_CONTROL, exec_control);
2182         }
2183
2184         vmcs_write32(PAGE_FAULT_ERROR_CODE_MASK, !!bypass_guest_pf);
2185         vmcs_write32(PAGE_FAULT_ERROR_CODE_MATCH, !!bypass_guest_pf);
2186         vmcs_write32(CR3_TARGET_COUNT, 0);           /* 22.2.1 */
2187
2188         vmcs_writel(HOST_CR0, read_cr0());  /* 22.2.3 */
2189         vmcs_writel(HOST_CR4, read_cr4());  /* 22.2.3, 22.2.5 */
2190         vmcs_writel(HOST_CR3, read_cr3());  /* 22.2.3  FIXME: shadow tables */
2191
2192         vmcs_write16(HOST_CS_SELECTOR, __KERNEL_CS);  /* 22.2.4 */
2193         vmcs_write16(HOST_DS_SELECTOR, __KERNEL_DS);  /* 22.2.4 */
2194         vmcs_write16(HOST_ES_SELECTOR, __KERNEL_DS);  /* 22.2.4 */
2195         vmcs_write16(HOST_FS_SELECTOR, kvm_read_fs());    /* 22.2.4 */
2196         vmcs_write16(HOST_GS_SELECTOR, kvm_read_gs());    /* 22.2.4 */
2197         vmcs_write16(HOST_SS_SELECTOR, __KERNEL_DS);  /* 22.2.4 */
2198 #ifdef CONFIG_X86_64
2199         rdmsrl(MSR_FS_BASE, a);
2200         vmcs_writel(HOST_FS_BASE, a); /* 22.2.4 */
2201         rdmsrl(MSR_GS_BASE, a);
2202         vmcs_writel(HOST_GS_BASE, a); /* 22.2.4 */
2203 #else
2204         vmcs_writel(HOST_FS_BASE, 0); /* 22.2.4 */
2205         vmcs_writel(HOST_GS_BASE, 0); /* 22.2.4 */
2206 #endif
2207
2208         vmcs_write16(HOST_TR_SELECTOR, GDT_ENTRY_TSS*8);  /* 22.2.4 */
2209
2210         kvm_get_idt(&dt);
2211         vmcs_writel(HOST_IDTR_BASE, dt.base);   /* 22.2.4 */
2212
2213         asm("mov $.Lkvm_vmx_return, %0" : "=r"(kvm_vmx_return));
2214         vmcs_writel(HOST_RIP, kvm_vmx_return); /* 22.2.5 */
2215         vmcs_write32(VM_EXIT_MSR_STORE_COUNT, 0);
2216         vmcs_write32(VM_EXIT_MSR_LOAD_COUNT, 0);
2217         vmcs_write32(VM_ENTRY_MSR_LOAD_COUNT, 0);
2218
2219         rdmsr(MSR_IA32_SYSENTER_CS, host_sysenter_cs, junk);
2220         vmcs_write32(HOST_IA32_SYSENTER_CS, host_sysenter_cs);
2221         rdmsrl(MSR_IA32_SYSENTER_ESP, a);
2222         vmcs_writel(HOST_IA32_SYSENTER_ESP, a);   /* 22.2.3 */
2223         rdmsrl(MSR_IA32_SYSENTER_EIP, a);
2224         vmcs_writel(HOST_IA32_SYSENTER_EIP, a);   /* 22.2.3 */
2225
2226         if (vmcs_config.vmexit_ctrl & VM_EXIT_LOAD_IA32_PAT) {
2227                 rdmsr(MSR_IA32_CR_PAT, msr_low, msr_high);
2228                 host_pat = msr_low | ((u64) msr_high << 32);
2229                 vmcs_write64(HOST_IA32_PAT, host_pat);
2230         }
2231         if (vmcs_config.vmentry_ctrl & VM_ENTRY_LOAD_IA32_PAT) {
2232                 rdmsr(MSR_IA32_CR_PAT, msr_low, msr_high);
2233                 host_pat = msr_low | ((u64) msr_high << 32);
2234                 /* Write the default value follow host pat */
2235                 vmcs_write64(GUEST_IA32_PAT, host_pat);
2236                 /* Keep arch.pat sync with GUEST_IA32_PAT */
2237                 vmx->vcpu.arch.pat = host_pat;
2238         }
2239
2240         for (i = 0; i < NR_VMX_MSR; ++i) {
2241                 u32 index = vmx_msr_index[i];
2242                 u32 data_low, data_high;
2243                 u64 data;
2244                 int j = vmx->nmsrs;
2245
2246                 if (rdmsr_safe(index, &data_low, &data_high) < 0)
2247                         continue;
2248                 if (wrmsr_safe(index, data_low, data_high) < 0)
2249                         continue;
2250                 data = data_low | ((u64)data_high << 32);
2251                 vmx->host_msrs[j].index = index;
2252                 vmx->host_msrs[j].reserved = 0;
2253                 vmx->host_msrs[j].data = data;
2254                 vmx->guest_msrs[j] = vmx->host_msrs[j];
2255                 ++vmx->nmsrs;
2256         }
2257
2258         vmcs_write32(VM_EXIT_CONTROLS, vmcs_config.vmexit_ctrl);
2259
2260         /* 22.2.1, 20.8.1 */
2261         vmcs_write32(VM_ENTRY_CONTROLS, vmcs_config.vmentry_ctrl);
2262
2263         vmcs_writel(CR0_GUEST_HOST_MASK, ~0UL);
2264         vmcs_writel(CR4_GUEST_HOST_MASK, KVM_GUEST_CR4_MASK);
2265
2266         tsc_base = vmx->vcpu.kvm->arch.vm_init_tsc;
2267         rdtscll(tsc_this);
2268         if (tsc_this < vmx->vcpu.kvm->arch.vm_init_tsc)
2269                 tsc_base = tsc_this;
2270
2271         guest_write_tsc(0, tsc_base);
2272
2273         return 0;
2274 }
2275
2276 static int init_rmode(struct kvm *kvm)
2277 {
2278         if (!init_rmode_tss(kvm))
2279                 return 0;
2280         if (!init_rmode_identity_map(kvm))
2281                 return 0;
2282         return 1;
2283 }
2284
2285 static int vmx_vcpu_reset(struct kvm_vcpu *vcpu)
2286 {
2287         struct vcpu_vmx *vmx = to_vmx(vcpu);
2288         u64 msr;
2289         int ret;
2290
2291         vcpu->arch.regs_avail = ~((1 << VCPU_REGS_RIP) | (1 << VCPU_REGS_RSP));
2292         down_read(&vcpu->kvm->slots_lock);
2293         if (!init_rmode(vmx->vcpu.kvm)) {
2294                 ret = -ENOMEM;
2295                 goto out;
2296         }
2297
2298         vmx->vcpu.arch.rmode.active = 0;
2299
2300         vmx->soft_vnmi_blocked = 0;
2301
2302         vmx->vcpu.arch.regs[VCPU_REGS_RDX] = get_rdx_init_val();
2303         kvm_set_cr8(&vmx->vcpu, 0);
2304         msr = 0xfee00000 | MSR_IA32_APICBASE_ENABLE;
2305         if (vmx->vcpu.vcpu_id == 0)
2306                 msr |= MSR_IA32_APICBASE_BSP;
2307         kvm_set_apic_base(&vmx->vcpu, msr);
2308
2309         fx_init(&vmx->vcpu);
2310
2311         seg_setup(VCPU_SREG_CS);
2312         /*
2313          * GUEST_CS_BASE should really be 0xffff0000, but VT vm86 mode
2314          * insists on having GUEST_CS_BASE == GUEST_CS_SELECTOR << 4.  Sigh.
2315          */
2316         if (vmx->vcpu.vcpu_id == 0) {
2317                 vmcs_write16(GUEST_CS_SELECTOR, 0xf000);
2318                 vmcs_writel(GUEST_CS_BASE, 0x000f0000);
2319         } else {
2320                 vmcs_write16(GUEST_CS_SELECTOR, vmx->vcpu.arch.sipi_vector << 8);
2321                 vmcs_writel(GUEST_CS_BASE, vmx->vcpu.arch.sipi_vector << 12);
2322         }
2323
2324         seg_setup(VCPU_SREG_DS);
2325         seg_setup(VCPU_SREG_ES);
2326         seg_setup(VCPU_SREG_FS);
2327         seg_setup(VCPU_SREG_GS);
2328         seg_setup(VCPU_SREG_SS);
2329
2330         vmcs_write16(GUEST_TR_SELECTOR, 0);
2331         vmcs_writel(GUEST_TR_BASE, 0);
2332         vmcs_write32(GUEST_TR_LIMIT, 0xffff);
2333         vmcs_write32(GUEST_TR_AR_BYTES, 0x008b);
2334
2335         vmcs_write16(GUEST_LDTR_SELECTOR, 0);
2336         vmcs_writel(GUEST_LDTR_BASE, 0);
2337         vmcs_write32(GUEST_LDTR_LIMIT, 0xffff);
2338         vmcs_write32(GUEST_LDTR_AR_BYTES, 0x00082);
2339
2340         vmcs_write32(GUEST_SYSENTER_CS, 0);
2341         vmcs_writel(GUEST_SYSENTER_ESP, 0);
2342         vmcs_writel(GUEST_SYSENTER_EIP, 0);
2343
2344         vmcs_writel(GUEST_RFLAGS, 0x02);
2345         if (vmx->vcpu.vcpu_id == 0)
2346                 kvm_rip_write(vcpu, 0xfff0);
2347         else
2348                 kvm_rip_write(vcpu, 0);
2349         kvm_register_write(vcpu, VCPU_REGS_RSP, 0);
2350
2351         vmcs_writel(GUEST_DR7, 0x400);
2352
2353         vmcs_writel(GUEST_GDTR_BASE, 0);
2354         vmcs_write32(GUEST_GDTR_LIMIT, 0xffff);
2355
2356         vmcs_writel(GUEST_IDTR_BASE, 0);
2357         vmcs_write32(GUEST_IDTR_LIMIT, 0xffff);
2358
2359         vmcs_write32(GUEST_ACTIVITY_STATE, 0);
2360         vmcs_write32(GUEST_INTERRUPTIBILITY_INFO, 0);
2361         vmcs_write32(GUEST_PENDING_DBG_EXCEPTIONS, 0);
2362
2363         /* Special registers */
2364         vmcs_write64(GUEST_IA32_DEBUGCTL, 0);
2365
2366         setup_msrs(vmx);
2367
2368         vmcs_write32(VM_ENTRY_INTR_INFO_FIELD, 0);  /* 22.2.1 */
2369
2370         if (cpu_has_vmx_tpr_shadow()) {
2371                 vmcs_write64(VIRTUAL_APIC_PAGE_ADDR, 0);
2372                 if (vm_need_tpr_shadow(vmx->vcpu.kvm))
2373                         vmcs_write64(VIRTUAL_APIC_PAGE_ADDR,
2374                                 page_to_phys(vmx->vcpu.arch.apic->regs_page));
2375                 vmcs_write32(TPR_THRESHOLD, 0);
2376         }
2377
2378         if (vm_need_virtualize_apic_accesses(vmx->vcpu.kvm))
2379                 vmcs_write64(APIC_ACCESS_ADDR,
2380                              page_to_phys(vmx->vcpu.kvm->arch.apic_access_page));
2381
2382         if (vmx->vpid != 0)
2383                 vmcs_write16(VIRTUAL_PROCESSOR_ID, vmx->vpid);
2384
2385         vmx->vcpu.arch.cr0 = 0x60000010;
2386         vmx_set_cr0(&vmx->vcpu, vmx->vcpu.arch.cr0); /* enter rmode */
2387         vmx_set_cr4(&vmx->vcpu, 0);
2388         vmx_set_efer(&vmx->vcpu, 0);
2389         vmx_fpu_activate(&vmx->vcpu);
2390         update_exception_bitmap(&vmx->vcpu);
2391
2392         vpid_sync_vcpu_all(vmx);
2393
2394         ret = 0;
2395
2396         /* HACK: Don't enable emulation on guest boot/reset */
2397         vmx->emulation_required = 0;
2398
2399 out:
2400         up_read(&vcpu->kvm->slots_lock);
2401         return ret;
2402 }
2403
2404 static void enable_irq_window(struct kvm_vcpu *vcpu)
2405 {
2406         u32 cpu_based_vm_exec_control;
2407
2408         cpu_based_vm_exec_control = vmcs_read32(CPU_BASED_VM_EXEC_CONTROL);
2409         cpu_based_vm_exec_control |= CPU_BASED_VIRTUAL_INTR_PENDING;
2410         vmcs_write32(CPU_BASED_VM_EXEC_CONTROL, cpu_based_vm_exec_control);
2411 }
2412
2413 static void enable_nmi_window(struct kvm_vcpu *vcpu)
2414 {
2415         u32 cpu_based_vm_exec_control;
2416
2417         if (!cpu_has_virtual_nmis()) {
2418                 enable_irq_window(vcpu);
2419                 return;
2420         }
2421
2422         cpu_based_vm_exec_control = vmcs_read32(CPU_BASED_VM_EXEC_CONTROL);
2423         cpu_based_vm_exec_control |= CPU_BASED_VIRTUAL_NMI_PENDING;
2424         vmcs_write32(CPU_BASED_VM_EXEC_CONTROL, cpu_based_vm_exec_control);
2425 }
2426
2427 static void vmx_inject_irq(struct kvm_vcpu *vcpu, int irq)
2428 {
2429         struct vcpu_vmx *vmx = to_vmx(vcpu);
2430
2431         KVMTRACE_1D(INJ_VIRQ, vcpu, (u32)irq, handler);
2432
2433         ++vcpu->stat.irq_injections;
2434         if (vcpu->arch.rmode.active) {
2435                 vmx->rmode.irq.pending = true;
2436                 vmx->rmode.irq.vector = irq;
2437                 vmx->rmode.irq.rip = kvm_rip_read(vcpu);
2438                 vmcs_write32(VM_ENTRY_INTR_INFO_FIELD,
2439                              irq | INTR_TYPE_SOFT_INTR | INTR_INFO_VALID_MASK);
2440                 vmcs_write32(VM_ENTRY_INSTRUCTION_LEN, 1);
2441                 kvm_rip_write(vcpu, vmx->rmode.irq.rip - 1);
2442                 return;
2443         }
2444         vmcs_write32(VM_ENTRY_INTR_INFO_FIELD,
2445                         irq | INTR_TYPE_EXT_INTR | INTR_INFO_VALID_MASK);
2446 }
2447
2448 static void vmx_inject_nmi(struct kvm_vcpu *vcpu)
2449 {
2450         struct vcpu_vmx *vmx = to_vmx(vcpu);
2451
2452         if (!cpu_has_virtual_nmis()) {
2453                 /*
2454                  * Tracking the NMI-blocked state in software is built upon
2455                  * finding the next open IRQ window. This, in turn, depends on
2456                  * well-behaving guests: They have to keep IRQs disabled at
2457                  * least as long as the NMI handler runs. Otherwise we may
2458                  * cause NMI nesting, maybe breaking the guest. But as this is
2459                  * highly unlikely, we can live with the residual risk.
2460                  */
2461                 vmx->soft_vnmi_blocked = 1;
2462                 vmx->vnmi_blocked_time = 0;
2463         }
2464
2465         ++vcpu->stat.nmi_injections;
2466         if (vcpu->arch.rmode.active) {
2467                 vmx->rmode.irq.pending = true;
2468                 vmx->rmode.irq.vector = NMI_VECTOR;
2469                 vmx->rmode.irq.rip = kvm_rip_read(vcpu);
2470                 vmcs_write32(VM_ENTRY_INTR_INFO_FIELD,
2471                              NMI_VECTOR | INTR_TYPE_SOFT_INTR |
2472                              INTR_INFO_VALID_MASK);
2473                 vmcs_write32(VM_ENTRY_INSTRUCTION_LEN, 1);
2474                 kvm_rip_write(vcpu, vmx->rmode.irq.rip - 1);
2475                 return;
2476         }
2477         vmcs_write32(VM_ENTRY_INTR_INFO_FIELD,
2478                         INTR_TYPE_NMI_INTR | INTR_INFO_VALID_MASK | NMI_VECTOR);
2479 }
2480
2481 static void vmx_update_window_states(struct kvm_vcpu *vcpu)
2482 {
2483         u32 guest_intr = vmcs_read32(GUEST_INTERRUPTIBILITY_INFO);
2484
2485         vcpu->arch.nmi_window_open =
2486                 !(guest_intr & (GUEST_INTR_STATE_STI |
2487                                 GUEST_INTR_STATE_MOV_SS |
2488                                 GUEST_INTR_STATE_NMI));
2489         if (!cpu_has_virtual_nmis() && to_vmx(vcpu)->soft_vnmi_blocked)
2490                 vcpu->arch.nmi_window_open = 0;
2491
2492         vcpu->arch.interrupt_window_open =
2493                 ((vmcs_readl(GUEST_RFLAGS) & X86_EFLAGS_IF) &&
2494                  !(guest_intr & (GUEST_INTR_STATE_STI |
2495                                  GUEST_INTR_STATE_MOV_SS)));
2496 }
2497
2498 static void do_interrupt_requests(struct kvm_vcpu *vcpu,
2499                                        struct kvm_run *kvm_run)
2500 {
2501         vmx_update_window_states(vcpu);
2502
2503         if (vcpu->guest_debug & KVM_GUESTDBG_SINGLESTEP)
2504                 vmcs_clear_bits(GUEST_INTERRUPTIBILITY_INFO,
2505                                 GUEST_INTR_STATE_STI |
2506                                 GUEST_INTR_STATE_MOV_SS);
2507
2508         if (vcpu->arch.nmi_pending && !vcpu->arch.nmi_injected) {
2509                 if (vcpu->arch.interrupt.pending) {
2510                         enable_nmi_window(vcpu);
2511                 } else if (vcpu->arch.nmi_window_open) {
2512                         vcpu->arch.nmi_pending = false;
2513                         vcpu->arch.nmi_injected = true;
2514                 } else {
2515                         enable_nmi_window(vcpu);
2516                         return;
2517                 }
2518         }
2519         if (vcpu->arch.nmi_injected) {
2520                 vmx_inject_nmi(vcpu);
2521                 if (vcpu->arch.nmi_pending)
2522                         enable_nmi_window(vcpu);
2523                 else if (vcpu->arch.irq_summary
2524                          || kvm_run->request_interrupt_window)
2525                         enable_irq_window(vcpu);
2526                 return;
2527         }
2528
2529         if (vcpu->arch.interrupt_window_open) {
2530                 if (vcpu->arch.irq_summary && !vcpu->arch.interrupt.pending)
2531                         kvm_queue_interrupt(vcpu, kvm_pop_irq(vcpu));
2532
2533                 if (vcpu->arch.interrupt.pending)
2534                         vmx_inject_irq(vcpu, vcpu->arch.interrupt.nr);
2535         }
2536         if (!vcpu->arch.interrupt_window_open &&
2537             (vcpu->arch.irq_summary || kvm_run->request_interrupt_window))
2538                 enable_irq_window(vcpu);
2539 }
2540
2541 static int vmx_set_tss_addr(struct kvm *kvm, unsigned int addr)
2542 {
2543         int ret;
2544         struct kvm_userspace_memory_region tss_mem = {
2545                 .slot = TSS_PRIVATE_MEMSLOT,
2546                 .guest_phys_addr = addr,
2547                 .memory_size = PAGE_SIZE * 3,
2548                 .flags = 0,
2549         };
2550
2551         ret = kvm_set_memory_region(kvm, &tss_mem, 0);
2552         if (ret)
2553                 return ret;
2554         kvm->arch.tss_addr = addr;
2555         return 0;
2556 }
2557
2558 static int handle_rmode_exception(struct kvm_vcpu *vcpu,
2559                                   int vec, u32 err_code)
2560 {
2561         /*
2562          * Instruction with address size override prefix opcode 0x67
2563          * Cause the #SS fault with 0 error code in VM86 mode.
2564          */
2565         if (((vec == GP_VECTOR) || (vec == SS_VECTOR)) && err_code == 0)
2566                 if (emulate_instruction(vcpu, NULL, 0, 0, 0) == EMULATE_DONE)
2567                         return 1;
2568         /*
2569          * Forward all other exceptions that are valid in real mode.
2570          * FIXME: Breaks guest debugging in real mode, needs to be fixed with
2571          *        the required debugging infrastructure rework.
2572          */
2573         switch (vec) {
2574         case DB_VECTOR:
2575                 if (vcpu->guest_debug &
2576                     (KVM_GUESTDBG_SINGLESTEP | KVM_GUESTDBG_USE_HW_BP))
2577                         return 0;
2578                 kvm_queue_exception(vcpu, vec);
2579                 return 1;
2580         case BP_VECTOR:
2581                 if (vcpu->guest_debug & KVM_GUESTDBG_USE_SW_BP)
2582                         return 0;
2583                 /* fall through */
2584         case DE_VECTOR:
2585         case OF_VECTOR:
2586         case BR_VECTOR:
2587         case UD_VECTOR:
2588         case DF_VECTOR:
2589         case SS_VECTOR:
2590         case GP_VECTOR:
2591         case MF_VECTOR:
2592                 kvm_queue_exception(vcpu, vec);
2593                 return 1;
2594         }
2595         return 0;
2596 }
2597
2598 static int handle_exception(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run)
2599 {
2600         struct vcpu_vmx *vmx = to_vmx(vcpu);
2601         u32 intr_info, ex_no, error_code;
2602         unsigned long cr2, rip, dr6;
2603         u32 vect_info;
2604         enum emulation_result er;
2605
2606         vect_info = vmx->idt_vectoring_info;
2607         intr_info = vmcs_read32(VM_EXIT_INTR_INFO);
2608
2609         if ((vect_info & VECTORING_INFO_VALID_MASK) &&
2610                                                 !is_page_fault(intr_info))
2611                 printk(KERN_ERR "%s: unexpected, vectoring info 0x%x "
2612                        "intr info 0x%x\n", __func__, vect_info, intr_info);
2613
2614         if (!irqchip_in_kernel(vcpu->kvm) && is_external_interrupt(vect_info)) {
2615                 int irq = vect_info & VECTORING_INFO_VECTOR_MASK;
2616                 kvm_push_irq(vcpu, irq);
2617         }
2618
2619         if ((intr_info & INTR_INFO_INTR_TYPE_MASK) == INTR_TYPE_NMI_INTR)
2620                 return 1;  /* already handled by vmx_vcpu_run() */
2621
2622         if (is_no_device(intr_info)) {
2623                 vmx_fpu_activate(vcpu);
2624                 return 1;
2625         }
2626
2627         if (is_invalid_opcode(intr_info)) {
2628                 er = emulate_instruction(vcpu, kvm_run, 0, 0, EMULTYPE_TRAP_UD);
2629                 if (er != EMULATE_DONE)
2630                         kvm_queue_exception(vcpu, UD_VECTOR);
2631                 return 1;
2632         }
2633
2634         error_code = 0;
2635         rip = kvm_rip_read(vcpu);
2636         if (intr_info & INTR_INFO_DELIVER_CODE_MASK)
2637                 error_code = vmcs_read32(VM_EXIT_INTR_ERROR_CODE);
2638         if (is_page_fault(intr_info)) {
2639                 /* EPT won't cause page fault directly */
2640                 if (vm_need_ept())
2641                         BUG();
2642                 cr2 = vmcs_readl(EXIT_QUALIFICATION);
2643                 KVMTRACE_3D(PAGE_FAULT, vcpu, error_code, (u32)cr2,
2644                             (u32)((u64)cr2 >> 32), handler);
2645                 if (vcpu->arch.interrupt.pending || vcpu->arch.exception.pending)
2646                         kvm_mmu_unprotect_page_virt(vcpu, cr2);
2647                 return kvm_mmu_page_fault(vcpu, cr2, error_code);
2648         }
2649
2650         if (vcpu->arch.rmode.active &&
2651             handle_rmode_exception(vcpu, intr_info & INTR_INFO_VECTOR_MASK,
2652                                                                 error_code)) {
2653                 if (vcpu->arch.halt_request) {
2654                         vcpu->arch.halt_request = 0;
2655                         return kvm_emulate_halt(vcpu);
2656                 }
2657                 return 1;
2658         }
2659
2660         ex_no = intr_info & INTR_INFO_VECTOR_MASK;
2661         switch (ex_no) {
2662         case DB_VECTOR:
2663                 dr6 = vmcs_readl(EXIT_QUALIFICATION);
2664                 if (!(vcpu->guest_debug &
2665                       (KVM_GUESTDBG_SINGLESTEP | KVM_GUESTDBG_USE_HW_BP))) {
2666                         vcpu->arch.dr6 = dr6 | DR6_FIXED_1;
2667                         kvm_queue_exception(vcpu, DB_VECTOR);
2668                         return 1;
2669                 }
2670                 kvm_run->debug.arch.dr6 = dr6 | DR6_FIXED_1;
2671                 kvm_run->debug.arch.dr7 = vmcs_readl(GUEST_DR7);
2672                 /* fall through */
2673         case BP_VECTOR:
2674                 kvm_run->exit_reason = KVM_EXIT_DEBUG;
2675                 kvm_run->debug.arch.pc = vmcs_readl(GUEST_CS_BASE) + rip;
2676                 kvm_run->debug.arch.exception = ex_no;
2677                 break;
2678         default:
2679                 kvm_run->exit_reason = KVM_EXIT_EXCEPTION;
2680                 kvm_run->ex.exception = ex_no;
2681                 kvm_run->ex.error_code = error_code;
2682                 break;
2683         }
2684         return 0;
2685 }
2686
2687 static int handle_external_interrupt(struct kvm_vcpu *vcpu,
2688                                      struct kvm_run *kvm_run)
2689 {
2690         ++vcpu->stat.irq_exits;
2691         KVMTRACE_1D(INTR, vcpu, vmcs_read32(VM_EXIT_INTR_INFO), handler);
2692         return 1;
2693 }
2694
2695 static int handle_triple_fault(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run)
2696 {
2697         kvm_run->exit_reason = KVM_EXIT_SHUTDOWN;
2698         return 0;
2699 }
2700
2701 static int handle_io(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run)
2702 {
2703         unsigned long exit_qualification;
2704         int size, in, string;
2705         unsigned port;
2706
2707         ++vcpu->stat.io_exits;
2708         exit_qualification = vmcs_readl(EXIT_QUALIFICATION);
2709         string = (exit_qualification & 16) != 0;
2710
2711         if (string) {
2712                 if (emulate_instruction(vcpu,
2713                                         kvm_run, 0, 0, 0) == EMULATE_DO_MMIO)
2714                         return 0;
2715                 return 1;
2716         }
2717
2718         size = (exit_qualification & 7) + 1;
2719         in = (exit_qualification & 8) != 0;
2720         port = exit_qualification >> 16;
2721
2722         skip_emulated_instruction(vcpu);
2723         return kvm_emulate_pio(vcpu, kvm_run, in, size, port);
2724 }
2725
2726 static void
2727 vmx_patch_hypercall(struct kvm_vcpu *vcpu, unsigned char *hypercall)
2728 {
2729         /*
2730          * Patch in the VMCALL instruction:
2731          */
2732         hypercall[0] = 0x0f;
2733         hypercall[1] = 0x01;
2734         hypercall[2] = 0xc1;
2735 }
2736
2737 static int handle_cr(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run)
2738 {
2739         unsigned long exit_qualification;
2740         int cr;
2741         int reg;
2742
2743         exit_qualification = vmcs_readl(EXIT_QUALIFICATION);
2744         cr = exit_qualification & 15;
2745         reg = (exit_qualification >> 8) & 15;
2746         switch ((exit_qualification >> 4) & 3) {
2747         case 0: /* mov to cr */
2748                 KVMTRACE_3D(CR_WRITE, vcpu, (u32)cr,
2749                             (u32)kvm_register_read(vcpu, reg),
2750                             (u32)((u64)kvm_register_read(vcpu, reg) >> 32),
2751                             handler);
2752                 switch (cr) {
2753                 case 0:
2754                         kvm_set_cr0(vcpu, kvm_register_read(vcpu, reg));
2755                         skip_emulated_instruction(vcpu);
2756                         return 1;
2757                 case 3:
2758                         kvm_set_cr3(vcpu, kvm_register_read(vcpu, reg));
2759                         skip_emulated_instruction(vcpu);
2760                         return 1;
2761                 case 4:
2762                         kvm_set_cr4(vcpu, kvm_register_read(vcpu, reg));
2763                         skip_emulated_instruction(vcpu);
2764                         return 1;
2765                 case 8:
2766                         kvm_set_cr8(vcpu, kvm_register_read(vcpu, reg));
2767                         skip_emulated_instruction(vcpu);
2768                         if (irqchip_in_kernel(vcpu->kvm))
2769                                 return 1;
2770                         kvm_run->exit_reason = KVM_EXIT_SET_TPR;
2771                         return 0;
2772                 };
2773                 break;
2774         case 2: /* clts */
2775                 vmx_fpu_deactivate(vcpu);
2776                 vcpu->arch.cr0 &= ~X86_CR0_TS;
2777                 vmcs_writel(CR0_READ_SHADOW, vcpu->arch.cr0);
2778                 vmx_fpu_activate(vcpu);
2779                 KVMTRACE_0D(CLTS, vcpu, handler);
2780                 skip_emulated_instruction(vcpu);
2781                 return 1;
2782         case 1: /*mov from cr*/
2783                 switch (cr) {
2784                 case 3:
2785                         kvm_register_write(vcpu, reg, vcpu->arch.cr3);
2786                         KVMTRACE_3D(CR_READ, vcpu, (u32)cr,
2787                                     (u32)kvm_register_read(vcpu, reg),
2788                                     (u32)((u64)kvm_register_read(vcpu, reg) >> 32),
2789                                     handler);
2790                         skip_emulated_instruction(vcpu);
2791                         return 1;
2792                 case 8:
2793                         kvm_register_write(vcpu, reg, kvm_get_cr8(vcpu));
2794                         KVMTRACE_2D(CR_READ, vcpu, (u32)cr,
2795                                     (u32)kvm_register_read(vcpu, reg), handler);
2796                         skip_emulated_instruction(vcpu);
2797                         return 1;
2798                 }
2799                 break;
2800         case 3: /* lmsw */
2801                 kvm_lmsw(vcpu, (exit_qualification >> LMSW_SOURCE_DATA_SHIFT) & 0x0f);
2802
2803                 skip_emulated_instruction(vcpu);
2804                 return 1;
2805         default:
2806                 break;
2807         }
2808         kvm_run->exit_reason = 0;
2809         pr_unimpl(vcpu, "unhandled control register: op %d cr %d\n",
2810                (int)(exit_qualification >> 4) & 3, cr);
2811         return 0;
2812 }
2813
2814 static int handle_dr(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run)
2815 {
2816         unsigned long exit_qualification;
2817         unsigned long val;
2818         int dr, reg;
2819
2820         dr = vmcs_readl(GUEST_DR7);
2821         if (dr & DR7_GD) {
2822                 /*
2823                  * As the vm-exit takes precedence over the debug trap, we
2824                  * need to emulate the latter, either for the host or the
2825                  * guest debugging itself.
2826                  */
2827                 if (vcpu->guest_debug & KVM_GUESTDBG_USE_HW_BP) {
2828                         kvm_run->debug.arch.dr6 = vcpu->arch.dr6;
2829                         kvm_run->debug.arch.dr7 = dr;
2830                         kvm_run->debug.arch.pc =
2831                                 vmcs_readl(GUEST_CS_BASE) +
2832                                 vmcs_readl(GUEST_RIP);
2833                         kvm_run->debug.arch.exception = DB_VECTOR;
2834                         kvm_run->exit_reason = KVM_EXIT_DEBUG;
2835                         return 0;
2836                 } else {
2837                         vcpu->arch.dr7 &= ~DR7_GD;
2838                         vcpu->arch.dr6 |= DR6_BD;
2839                         vmcs_writel(GUEST_DR7, vcpu->arch.dr7);
2840                         kvm_queue_exception(vcpu, DB_VECTOR);
2841                         return 1;
2842                 }
2843         }
2844
2845         exit_qualification = vmcs_readl(EXIT_QUALIFICATION);
2846         dr = exit_qualification & DEBUG_REG_ACCESS_NUM;
2847         reg = DEBUG_REG_ACCESS_REG(exit_qualification);
2848         if (exit_qualification & TYPE_MOV_FROM_DR) {
2849                 switch (dr) {
2850                 case 0 ... 3:
2851                         val = vcpu->arch.db[dr];
2852                         break;
2853                 case 6:
2854                         val = vcpu->arch.dr6;
2855                         break;
2856                 case 7:
2857                         val = vcpu->arch.dr7;
2858                         break;
2859                 default:
2860                         val = 0;
2861                 }
2862                 kvm_register_write(vcpu, reg, val);
2863                 KVMTRACE_2D(DR_READ, vcpu, (u32)dr, (u32)val, handler);
2864         } else {
2865                 val = vcpu->arch.regs[reg];
2866                 switch (dr) {
2867                 case 0 ... 3:
2868                         vcpu->arch.db[dr] = val;
2869                         if (!(vcpu->guest_debug & KVM_GUESTDBG_USE_HW_BP))
2870                                 vcpu->arch.eff_db[dr] = val;
2871                         break;
2872                 case 4 ... 5:
2873                         if (vcpu->arch.cr4 & X86_CR4_DE)
2874                                 kvm_queue_exception(vcpu, UD_VECTOR);
2875                         break;
2876                 case 6:
2877                         if (val & 0xffffffff00000000ULL) {
2878                                 kvm_queue_exception(vcpu, GP_VECTOR);
2879                                 break;
2880                         }
2881                         vcpu->arch.dr6 = (val & DR6_VOLATILE) | DR6_FIXED_1;
2882                         break;
2883                 case 7:
2884                         if (val & 0xffffffff00000000ULL) {
2885                                 kvm_queue_exception(vcpu, GP_VECTOR);
2886                                 break;
2887                         }
2888                         vcpu->arch.dr7 = (val & DR7_VOLATILE) | DR7_FIXED_1;
2889                         if (!(vcpu->guest_debug & KVM_GUESTDBG_USE_HW_BP)) {
2890                                 vmcs_writel(GUEST_DR7, vcpu->arch.dr7);
2891                                 vcpu->arch.switch_db_regs =
2892                                         (val & DR7_BP_EN_MASK);
2893                         }
2894                         break;
2895                 }
2896                 KVMTRACE_2D(DR_WRITE, vcpu, (u32)dr, (u32)val, handler);
2897         }
2898         skip_emulated_instruction(vcpu);
2899         return 1;
2900 }
2901
2902 static int handle_cpuid(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run)
2903 {
2904         kvm_emulate_cpuid(vcpu);
2905         return 1;
2906 }
2907
2908 static int handle_rdmsr(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run)
2909 {
2910         u32 ecx = vcpu->arch.regs[VCPU_REGS_RCX];
2911         u64 data;
2912
2913         if (vmx_get_msr(vcpu, ecx, &data)) {
2914                 kvm_inject_gp(vcpu, 0);
2915                 return 1;
2916         }
2917
2918         KVMTRACE_3D(MSR_READ, vcpu, ecx, (u32)data, (u32)(data >> 32),
2919                     handler);
2920
2921         /* FIXME: handling of bits 32:63 of rax, rdx */
2922         vcpu->arch.regs[VCPU_REGS_RAX] = data & -1u;
2923         vcpu->arch.regs[VCPU_REGS_RDX] = (data >> 32) & -1u;
2924         skip_emulated_instruction(vcpu);
2925         return 1;
2926 }
2927
2928 static int handle_wrmsr(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run)
2929 {
2930         u32 ecx = vcpu->arch.regs[VCPU_REGS_RCX];
2931         u64 data = (vcpu->arch.regs[VCPU_REGS_RAX] & -1u)
2932                 | ((u64)(vcpu->arch.regs[VCPU_REGS_RDX] & -1u) << 32);
2933
2934         KVMTRACE_3D(MSR_WRITE, vcpu, ecx, (u32)data, (u32)(data >> 32),
2935                     handler);
2936
2937         if (vmx_set_msr(vcpu, ecx, data) != 0) {
2938                 kvm_inject_gp(vcpu, 0);
2939                 return 1;
2940         }
2941
2942         skip_emulated_instruction(vcpu);
2943         return 1;
2944 }
2945
2946 static int handle_tpr_below_threshold(struct kvm_vcpu *vcpu,
2947                                       struct kvm_run *kvm_run)
2948 {
2949         return 1;
2950 }
2951
2952 static int handle_interrupt_window(struct kvm_vcpu *vcpu,
2953                                    struct kvm_run *kvm_run)
2954 {
2955         u32 cpu_based_vm_exec_control;
2956
2957         /* clear pending irq */
2958         cpu_based_vm_exec_control = vmcs_read32(CPU_BASED_VM_EXEC_CONTROL);
2959         cpu_based_vm_exec_control &= ~CPU_BASED_VIRTUAL_INTR_PENDING;
2960         vmcs_write32(CPU_BASED_VM_EXEC_CONTROL, cpu_based_vm_exec_control);
2961
2962         KVMTRACE_0D(PEND_INTR, vcpu, handler);
2963         ++vcpu->stat.irq_window_exits;
2964
2965         /*
2966          * If the user space waits to inject interrupts, exit as soon as
2967          * possible
2968          */
2969         if (kvm_run->request_interrupt_window &&
2970             !vcpu->arch.irq_summary) {
2971                 kvm_run->exit_reason = KVM_EXIT_IRQ_WINDOW_OPEN;
2972                 return 0;
2973         }
2974         return 1;
2975 }
2976
2977 static int handle_halt(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run)
2978 {
2979         skip_emulated_instruction(vcpu);
2980         return kvm_emulate_halt(vcpu);
2981 }
2982
2983 static int handle_vmcall(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run)
2984 {
2985         skip_emulated_instruction(vcpu);
2986         kvm_emulate_hypercall(vcpu);
2987         return 1;
2988 }
2989
2990 static int handle_invlpg(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run)
2991 {
2992         u64 exit_qualification = vmcs_read64(EXIT_QUALIFICATION);
2993
2994         kvm_mmu_invlpg(vcpu, exit_qualification);
2995         skip_emulated_instruction(vcpu);
2996         return 1;
2997 }
2998
2999 static int handle_wbinvd(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run)
3000 {
3001         skip_emulated_instruction(vcpu);
3002         /* TODO: Add support for VT-d/pass-through device */
3003         return 1;
3004 }
3005
3006 static int handle_apic_access(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run)
3007 {
3008         u64 exit_qualification;
3009         enum emulation_result er;
3010         unsigned long offset;
3011
3012         exit_qualification = vmcs_read64(EXIT_QUALIFICATION);
3013         offset = exit_qualification & 0xffful;
3014
3015         er = emulate_instruction(vcpu, kvm_run, 0, 0, 0);
3016
3017         if (er !=  EMULATE_DONE) {
3018                 printk(KERN_ERR
3019                        "Fail to handle apic access vmexit! Offset is 0x%lx\n",
3020                        offset);
3021                 return -ENOTSUPP;
3022         }
3023         return 1;
3024 }
3025
3026 static int handle_task_switch(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run)
3027 {
3028         struct vcpu_vmx *vmx = to_vmx(vcpu);
3029         unsigned long exit_qualification;
3030         u16 tss_selector;
3031         int reason;
3032
3033         exit_qualification = vmcs_readl(EXIT_QUALIFICATION);
3034
3035         reason = (u32)exit_qualification >> 30;
3036         if (reason == TASK_SWITCH_GATE && vmx->vcpu.arch.nmi_injected &&
3037             (vmx->idt_vectoring_info & VECTORING_INFO_VALID_MASK) &&
3038             (vmx->idt_vectoring_info & VECTORING_INFO_TYPE_MASK)
3039             == INTR_TYPE_NMI_INTR) {
3040                 vcpu->arch.nmi_injected = false;
3041                 if (cpu_has_virtual_nmis())
3042                         vmcs_set_bits(GUEST_INTERRUPTIBILITY_INFO,
3043                                       GUEST_INTR_STATE_NMI);
3044         }
3045         tss_selector = exit_qualification;
3046
3047         if (!kvm_task_switch(vcpu, tss_selector, reason))
3048                 return 0;
3049
3050         /* clear all local breakpoint enable flags */
3051         vmcs_writel(GUEST_DR7, vmcs_readl(GUEST_DR7) & ~55);
3052
3053         /*
3054          * TODO: What about debug traps on tss switch?
3055          *       Are we supposed to inject them and update dr6?
3056          */
3057
3058         return 1;
3059 }
3060
3061 static int handle_ept_violation(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run)
3062 {
3063         u64 exit_qualification;
3064         gpa_t gpa;
3065         int gla_validity;
3066
3067         exit_qualification = vmcs_read64(EXIT_QUALIFICATION);
3068
3069         if (exit_qualification & (1 << 6)) {
3070                 printk(KERN_ERR "EPT: GPA exceeds GAW!\n");
3071                 return -ENOTSUPP;
3072         }
3073
3074         gla_validity = (exit_qualification >> 7) & 0x3;
3075         if (gla_validity != 0x3 && gla_validity != 0x1 && gla_validity != 0) {
3076                 printk(KERN_ERR "EPT: Handling EPT violation failed!\n");
3077                 printk(KERN_ERR "EPT: GPA: 0x%lx, GVA: 0x%lx\n",
3078                         (long unsigned int)vmcs_read64(GUEST_PHYSICAL_ADDRESS),
3079                         (long unsigned int)vmcs_read64(GUEST_LINEAR_ADDRESS));
3080                 printk(KERN_ERR "EPT: Exit qualification is 0x%lx\n",
3081                         (long unsigned int)exit_qualification);
3082                 kvm_run->exit_reason = KVM_EXIT_UNKNOWN;
3083                 kvm_run->hw.hardware_exit_reason = 0;
3084                 return -ENOTSUPP;
3085         }
3086
3087         gpa = vmcs_read64(GUEST_PHYSICAL_ADDRESS);
3088         return kvm_mmu_page_fault(vcpu, gpa & PAGE_MASK, 0);
3089 }
3090
3091 static int handle_nmi_window(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run)
3092 {
3093         u32 cpu_based_vm_exec_control;
3094
3095         /* clear pending NMI */
3096         cpu_based_vm_exec_control = vmcs_read32(CPU_BASED_VM_EXEC_CONTROL);
3097         cpu_based_vm_exec_control &= ~CPU_BASED_VIRTUAL_NMI_PENDING;
3098         vmcs_write32(CPU_BASED_VM_EXEC_CONTROL, cpu_based_vm_exec_control);
3099         ++vcpu->stat.nmi_window_exits;
3100
3101         return 1;
3102 }
3103
3104 static void handle_invalid_guest_state(struct kvm_vcpu *vcpu,
3105                                 struct kvm_run *kvm_run)
3106 {
3107         struct vcpu_vmx *vmx = to_vmx(vcpu);
3108         enum emulation_result err = EMULATE_DONE;
3109
3110         preempt_enable();
3111         local_irq_enable();
3112
3113         while (!guest_state_valid(vcpu)) {
3114                 err = emulate_instruction(vcpu, kvm_run, 0, 0, 0);
3115
3116                 if (err == EMULATE_DO_MMIO)
3117                         break;
3118
3119                 if (err != EMULATE_DONE) {
3120                         kvm_report_emulation_failure(vcpu, "emulation failure");
3121                         return;
3122                 }
3123
3124                 if (signal_pending(current))
3125                         break;
3126                 if (need_resched())
3127                         schedule();
3128         }
3129
3130         local_irq_disable();
3131         preempt_disable();
3132
3133         vmx->invalid_state_emulation_result = err;
3134 }
3135
3136 /*
3137  * The exit handlers return 1 if the exit was handled fully and guest execution
3138  * may resume.  Otherwise they set the kvm_run parameter to indicate what needs
3139  * to be done to userspace and return 0.
3140  */
3141 static int (*kvm_vmx_exit_handlers[])(struct kvm_vcpu *vcpu,
3142                                       struct kvm_run *kvm_run) = {
3143         [EXIT_REASON_EXCEPTION_NMI]           = handle_exception,
3144         [EXIT_REASON_EXTERNAL_INTERRUPT]      = handle_external_interrupt,
3145         [EXIT_REASON_TRIPLE_FAULT]            = handle_triple_fault,
3146         [EXIT_REASON_NMI_WINDOW]              = handle_nmi_window,
3147         [EXIT_REASON_IO_INSTRUCTION]          = handle_io,
3148         [EXIT_REASON_CR_ACCESS]               = handle_cr,
3149         [EXIT_REASON_DR_ACCESS]               = handle_dr,
3150         [EXIT_REASON_CPUID]                   = handle_cpuid,
3151         [EXIT_REASON_MSR_READ]                = handle_rdmsr,
3152         [EXIT_REASON_MSR_WRITE]               = handle_wrmsr,
3153         [EXIT_REASON_PENDING_INTERRUPT]       = handle_interrupt_window,
3154         [EXIT_REASON_HLT]                     = handle_halt,
3155         [EXIT_REASON_INVLPG]                  = handle_invlpg,
3156         [EXIT_REASON_VMCALL]                  = handle_vmcall,
3157         [EXIT_REASON_TPR_BELOW_THRESHOLD]     = handle_tpr_below_threshold,
3158         [EXIT_REASON_APIC_ACCESS]             = handle_apic_access,
3159         [EXIT_REASON_WBINVD]                  = handle_wbinvd,
3160         [EXIT_REASON_TASK_SWITCH]             = handle_task_switch,
3161         [EXIT_REASON_EPT_VIOLATION]           = handle_ept_violation,
3162 };
3163
3164 static const int kvm_vmx_max_exit_handlers =
3165         ARRAY_SIZE(kvm_vmx_exit_handlers);
3166
3167 /*
3168  * The guest has exited.  See if we can fix it or if we need userspace
3169  * assistance.
3170  */
3171 static int vmx_handle_exit(struct kvm_run *kvm_run, struct kvm_vcpu *vcpu)
3172 {
3173         u32 exit_reason = vmcs_read32(VM_EXIT_REASON);
3174         struct vcpu_vmx *vmx = to_vmx(vcpu);
3175         u32 vectoring_info = vmx->idt_vectoring_info;
3176
3177         KVMTRACE_3D(VMEXIT, vcpu, exit_reason, (u32)kvm_rip_read(vcpu),
3178                     (u32)((u64)kvm_rip_read(vcpu) >> 32), entryexit);
3179
3180         /* If we need to emulate an MMIO from handle_invalid_guest_state
3181          * we just return 0 */
3182         if (vmx->emulation_required && emulate_invalid_guest_state) {
3183                 if (guest_state_valid(vcpu))
3184                         vmx->emulation_required = 0;
3185                 return vmx->invalid_state_emulation_result != EMULATE_DO_MMIO;
3186         }
3187
3188         /* Access CR3 don't cause VMExit in paging mode, so we need
3189          * to sync with guest real CR3. */
3190         if (vm_need_ept() && is_paging(vcpu)) {
3191                 vcpu->arch.cr3 = vmcs_readl(GUEST_CR3);
3192                 ept_load_pdptrs(vcpu);
3193         }
3194
3195         if (unlikely(vmx->fail)) {
3196                 kvm_run->exit_reason = KVM_EXIT_FAIL_ENTRY;
3197                 kvm_run->fail_entry.hardware_entry_failure_reason
3198                         = vmcs_read32(VM_INSTRUCTION_ERROR);
3199                 return 0;
3200         }
3201
3202         if ((vectoring_info & VECTORING_INFO_VALID_MASK) &&
3203                         (exit_reason != EXIT_REASON_EXCEPTION_NMI &&
3204                         exit_reason != EXIT_REASON_EPT_VIOLATION &&
3205                         exit_reason != EXIT_REASON_TASK_SWITCH))
3206                 printk(KERN_WARNING "%s: unexpected, valid vectoring info "
3207                        "(0x%x) and exit reason is 0x%x\n",
3208                        __func__, vectoring_info, exit_reason);
3209
3210         if (unlikely(!cpu_has_virtual_nmis() && vmx->soft_vnmi_blocked)) {
3211                 if (vcpu->arch.interrupt_window_open) {
3212                         vmx->soft_vnmi_blocked = 0;
3213                         vcpu->arch.nmi_window_open = 1;
3214                 } else if (vmx->vnmi_blocked_time > 1000000000LL &&
3215                            vcpu->arch.nmi_pending) {
3216                         /*
3217                          * This CPU don't support us in finding the end of an
3218                          * NMI-blocked window if the guest runs with IRQs
3219                          * disabled. So we pull the trigger after 1 s of
3220                          * futile waiting, but inform the user about this.
3221                          */
3222                         printk(KERN_WARNING "%s: Breaking out of NMI-blocked "
3223                                "state on VCPU %d after 1 s timeout\n",
3224                                __func__, vcpu->vcpu_id);
3225                         vmx->soft_vnmi_blocked = 0;
3226                         vmx->vcpu.arch.nmi_window_open = 1;
3227                 }
3228         }
3229
3230         if (exit_reason < kvm_vmx_max_exit_handlers
3231             && kvm_vmx_exit_handlers[exit_reason])
3232                 return kvm_vmx_exit_handlers[exit_reason](vcpu, kvm_run);
3233         else {
3234                 kvm_run->exit_reason = KVM_EXIT_UNKNOWN;
3235                 kvm_run->hw.hardware_exit_reason = exit_reason;
3236         }
3237         return 0;
3238 }
3239
3240 static void update_tpr_threshold(struct kvm_vcpu *vcpu)
3241 {
3242         int max_irr, tpr;
3243
3244         if (!vm_need_tpr_shadow(vcpu->kvm))
3245                 return;
3246
3247         if (!kvm_lapic_enabled(vcpu) ||
3248             ((max_irr = kvm_lapic_find_highest_irr(vcpu)) == -1)) {
3249                 vmcs_write32(TPR_THRESHOLD, 0);
3250                 return;
3251         }
3252
3253         tpr = (kvm_lapic_get_cr8(vcpu) & 0x0f) << 4;
3254         vmcs_write32(TPR_THRESHOLD, (max_irr > tpr) ? tpr >> 4 : max_irr >> 4);
3255 }
3256
3257 static void vmx_complete_interrupts(struct vcpu_vmx *vmx)
3258 {
3259         u32 exit_intr_info;
3260         u32 idt_vectoring_info;
3261         bool unblock_nmi;
3262         u8 vector;
3263         int type;
3264         bool idtv_info_valid;
3265         u32 error;
3266
3267         exit_intr_info = vmcs_read32(VM_EXIT_INTR_INFO);
3268         if (cpu_has_virtual_nmis()) {
3269                 unblock_nmi = (exit_intr_info & INTR_INFO_UNBLOCK_NMI) != 0;
3270                 vector = exit_intr_info & INTR_INFO_VECTOR_MASK;
3271                 /*
3272                  * SDM 3: 25.7.1.2
3273                  * Re-set bit "block by NMI" before VM entry if vmexit caused by
3274                  * a guest IRET fault.
3275                  */
3276                 if (unblock_nmi && vector != DF_VECTOR)
3277                         vmcs_set_bits(GUEST_INTERRUPTIBILITY_INFO,
3278                                       GUEST_INTR_STATE_NMI);
3279         } else if (unlikely(vmx->soft_vnmi_blocked))
3280                 vmx->vnmi_blocked_time +=
3281                         ktime_to_ns(ktime_sub(ktime_get(), vmx->entry_time));
3282
3283         idt_vectoring_info = vmx->idt_vectoring_info;
3284         idtv_info_valid = idt_vectoring_info & VECTORING_INFO_VALID_MASK;
3285         vector = idt_vectoring_info & VECTORING_INFO_VECTOR_MASK;
3286         type = idt_vectoring_info & VECTORING_INFO_TYPE_MASK;
3287         if (vmx->vcpu.arch.nmi_injected) {
3288                 /*
3289                  * SDM 3: 25.7.1.2
3290                  * Clear bit "block by NMI" before VM entry if a NMI delivery
3291                  * faulted.
3292                  */
3293                 if (idtv_info_valid && type == INTR_TYPE_NMI_INTR)
3294                         vmcs_clear_bits(GUEST_INTERRUPTIBILITY_INFO,
3295                                         GUEST_INTR_STATE_NMI);
3296                 else
3297                         vmx->vcpu.arch.nmi_injected = false;
3298         }
3299         kvm_clear_exception_queue(&vmx->vcpu);
3300         if (idtv_info_valid && (type == INTR_TYPE_HARD_EXCEPTION ||
3301                                 type == INTR_TYPE_SOFT_EXCEPTION)) {
3302                 if (idt_vectoring_info & VECTORING_INFO_DELIVER_CODE_MASK) {
3303                         error = vmcs_read32(IDT_VECTORING_ERROR_CODE);
3304                         kvm_queue_exception_e(&vmx->vcpu, vector, error);
3305                 } else
3306                         kvm_queue_exception(&vmx->vcpu, vector);
3307                 vmx->idt_vectoring_info = 0;
3308         }
3309         kvm_clear_interrupt_queue(&vmx->vcpu);
3310         if (idtv_info_valid && type == INTR_TYPE_EXT_INTR) {
3311                 kvm_queue_interrupt(&vmx->vcpu, vector);
3312                 vmx->idt_vectoring_info = 0;
3313         }
3314 }
3315
3316 static void vmx_intr_assist(struct kvm_vcpu *vcpu)
3317 {
3318         update_tpr_threshold(vcpu);
3319
3320         vmx_update_window_states(vcpu);
3321
3322         if (vcpu->guest_debug & KVM_GUESTDBG_SINGLESTEP)
3323                 vmcs_clear_bits(GUEST_INTERRUPTIBILITY_INFO,
3324                                 GUEST_INTR_STATE_STI |
3325                                 GUEST_INTR_STATE_MOV_SS);
3326
3327         if (vcpu->arch.nmi_pending && !vcpu->arch.nmi_injected) {
3328                 if (vcpu->arch.interrupt.pending) {
3329                         enable_nmi_window(vcpu);
3330                 } else if (vcpu->arch.nmi_window_open) {
3331                         vcpu->arch.nmi_pending = false;
3332                         vcpu->arch.nmi_injected = true;
3333                 } else {
3334                         enable_nmi_window(vcpu);
3335                         return;
3336                 }
3337         }
3338         if (vcpu->arch.nmi_injected) {
3339                 vmx_inject_nmi(vcpu);
3340                 if (vcpu->arch.nmi_pending)
3341                         enable_nmi_window(vcpu);
3342                 else if (kvm_cpu_has_interrupt(vcpu))
3343                         enable_irq_window(vcpu);
3344                 return;
3345         }
3346         if (!vcpu->arch.interrupt.pending && kvm_cpu_has_interrupt(vcpu)) {
3347                 if (vcpu->arch.interrupt_window_open)
3348                         kvm_queue_interrupt(vcpu, kvm_cpu_get_interrupt(vcpu));
3349                 else
3350                         enable_irq_window(vcpu);
3351         }
3352         if (vcpu->arch.interrupt.pending) {
3353                 vmx_inject_irq(vcpu, vcpu->arch.interrupt.nr);
3354                 if (kvm_cpu_has_interrupt(vcpu))
3355                         enable_irq_window(vcpu);
3356         }
3357 }
3358
3359 /*
3360  * Failure to inject an interrupt should give us the information
3361  * in IDT_VECTORING_INFO_FIELD.  However, if the failure occurs
3362  * when fetching the interrupt redirection bitmap in the real-mode
3363  * tss, this doesn't happen.  So we do it ourselves.
3364  */
3365 static void fixup_rmode_irq(struct vcpu_vmx *vmx)
3366 {
3367         vmx->rmode.irq.pending = 0;
3368         if (kvm_rip_read(&vmx->vcpu) + 1 != vmx->rmode.irq.rip)
3369                 return;
3370         kvm_rip_write(&vmx->vcpu, vmx->rmode.irq.rip);
3371         if (vmx->idt_vectoring_info & VECTORING_INFO_VALID_MASK) {
3372                 vmx->idt_vectoring_info &= ~VECTORING_INFO_TYPE_MASK;
3373                 vmx->idt_vectoring_info |= INTR_TYPE_EXT_INTR;
3374                 return;
3375         }
3376         vmx->idt_vectoring_info =
3377                 VECTORING_INFO_VALID_MASK
3378                 | INTR_TYPE_EXT_INTR
3379                 | vmx->rmode.irq.vector;
3380 }
3381
3382 #ifdef CONFIG_X86_64
3383 #define R "r"
3384 #define Q "q"
3385 #else
3386 #define R "e"
3387 #define Q "l"
3388 #endif
3389
3390 static void vmx_vcpu_run(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run)
3391 {
3392         struct vcpu_vmx *vmx = to_vmx(vcpu);
3393         u32 intr_info;
3394
3395         /* Record the guest's net vcpu time for enforced NMI injections. */
3396         if (unlikely(!cpu_has_virtual_nmis() && vmx->soft_vnmi_blocked))
3397                 vmx->entry_time = ktime_get();
3398
3399         /* Handle invalid guest state instead of entering VMX */
3400         if (vmx->emulation_required && emulate_invalid_guest_state) {
3401                 handle_invalid_guest_state(vcpu, kvm_run);
3402                 return;
3403         }
3404
3405         if (test_bit(VCPU_REGS_RSP, (unsigned long *)&vcpu->arch.regs_dirty))
3406                 vmcs_writel(GUEST_RSP, vcpu->arch.regs[VCPU_REGS_RSP]);
3407         if (test_bit(VCPU_REGS_RIP, (unsigned long *)&vcpu->arch.regs_dirty))
3408                 vmcs_writel(GUEST_RIP, vcpu->arch.regs[VCPU_REGS_RIP]);
3409
3410         /*
3411          * Loading guest fpu may have cleared host cr0.ts
3412          */
3413         vmcs_writel(HOST_CR0, read_cr0());
3414
3415         set_debugreg(vcpu->arch.dr6, 6);
3416
3417         asm(
3418                 /* Store host registers */
3419                 "push %%"R"dx; push %%"R"bp;"
3420                 "push %%"R"cx \n\t"
3421                 "cmp %%"R"sp, %c[host_rsp](%0) \n\t"
3422                 "je 1f \n\t"
3423                 "mov %%"R"sp, %c[host_rsp](%0) \n\t"
3424                 __ex(ASM_VMX_VMWRITE_RSP_RDX) "\n\t"
3425                 "1: \n\t"
3426                 /* Check if vmlaunch of vmresume is needed */
3427                 "cmpl $0, %c[launched](%0) \n\t"
3428                 /* Load guest registers.  Don't clobber flags. */
3429                 "mov %c[cr2](%0), %%"R"ax \n\t"
3430                 "mov %%"R"ax, %%cr2 \n\t"
3431                 "mov %c[rax](%0), %%"R"ax \n\t"
3432                 "mov %c[rbx](%0), %%"R"bx \n\t"
3433                 "mov %c[rdx](%0), %%"R"dx \n\t"
3434                 "mov %c[rsi](%0), %%"R"si \n\t"
3435                 "mov %c[rdi](%0), %%"R"di \n\t"
3436                 "mov %c[rbp](%0), %%"R"bp \n\t"
3437 #ifdef CONFIG_X86_64
3438                 "mov %c[r8](%0),  %%r8  \n\t"
3439                 "mov %c[r9](%0),  %%r9  \n\t"
3440                 "mov %c[r10](%0), %%r10 \n\t"
3441                 "mov %c[r11](%0), %%r11 \n\t"
3442                 "mov %c[r12](%0), %%r12 \n\t"
3443                 "mov %c[r13](%0), %%r13 \n\t"
3444                 "mov %c[r14](%0), %%r14 \n\t"
3445                 "mov %c[r15](%0), %%r15 \n\t"
3446 #endif
3447                 "mov %c[rcx](%0), %%"R"cx \n\t" /* kills %0 (ecx) */
3448
3449                 /* Enter guest mode */
3450                 "jne .Llaunched \n\t"
3451                 __ex(ASM_VMX_VMLAUNCH) "\n\t"
3452                 "jmp .Lkvm_vmx_return \n\t"
3453                 ".Llaunched: " __ex(ASM_VMX_VMRESUME) "\n\t"
3454                 ".Lkvm_vmx_return: "
3455                 /* Save guest registers, load host registers, keep flags */
3456                 "xchg %0,     (%%"R"sp) \n\t"
3457                 "mov %%"R"ax, %c[rax](%0) \n\t"
3458                 "mov %%"R"bx, %c[rbx](%0) \n\t"
3459                 "push"Q" (%%"R"sp); pop"Q" %c[rcx](%0) \n\t"
3460                 "mov %%"R"dx, %c[rdx](%0) \n\t"
3461                 "mov %%"R"si, %c[rsi](%0) \n\t"
3462                 "mov %%"R"di, %c[rdi](%0) \n\t"
3463                 "mov %%"R"bp, %c[rbp](%0) \n\t"
3464 #ifdef CONFIG_X86_64
3465                 "mov %%r8,  %c[r8](%0) \n\t"
3466                 "mov %%r9,  %c[r9](%0) \n\t"
3467                 "mov %%r10, %c[r10](%0) \n\t"
3468                 "mov %%r11, %c[r11](%0) \n\t"
3469                 "mov %%r12, %c[r12](%0) \n\t"
3470                 "mov %%r13, %c[r13](%0) \n\t"
3471                 "mov %%r14, %c[r14](%0) \n\t"
3472                 "mov %%r15, %c[r15](%0) \n\t"
3473 #endif
3474                 "mov %%cr2, %%"R"ax   \n\t"
3475                 "mov %%"R"ax, %c[cr2](%0) \n\t"
3476
3477                 "pop  %%"R"bp; pop  %%"R"bp; pop  %%"R"dx \n\t"
3478                 "setbe %c[fail](%0) \n\t"
3479               : : "c"(vmx), "d"((unsigned long)HOST_RSP),
3480                 [launched]"i"(offsetof(struct vcpu_vmx, launched)),
3481                 [fail]"i"(offsetof(struct vcpu_vmx, fail)),
3482                 [host_rsp]"i"(offsetof(struct vcpu_vmx, host_rsp)),
3483                 [rax]"i"(offsetof(struct vcpu_vmx, vcpu.arch.regs[VCPU_REGS_RAX])),
3484                 [rbx]"i"(offsetof(struct vcpu_vmx, vcpu.arch.regs[VCPU_REGS_RBX])),
3485                 [rcx]"i"(offsetof(struct vcpu_vmx, vcpu.arch.regs[VCPU_REGS_RCX])),
3486                 [rdx]"i"(offsetof(struct vcpu_vmx, vcpu.arch.regs[VCPU_REGS_RDX])),
3487                 [rsi]"i"(offsetof(struct vcpu_vmx, vcpu.arch.regs[VCPU_REGS_RSI])),
3488                 [rdi]"i"(offsetof(struct vcpu_vmx, vcpu.arch.regs[VCPU_REGS_RDI])),
3489                 [rbp]"i"(offsetof(struct vcpu_vmx, vcpu.arch.regs[VCPU_REGS_RBP])),
3490 #ifdef CONFIG_X86_64
3491                 [r8]"i"(offsetof(struct vcpu_vmx, vcpu.arch.regs[VCPU_REGS_R8])),
3492                 [r9]"i"(offsetof(struct vcpu_vmx, vcpu.arch.regs[VCPU_REGS_R9])),
3493                 [r10]"i"(offsetof(struct vcpu_vmx, vcpu.arch.regs[VCPU_REGS_R10])),
3494                 [r11]"i"(offsetof(struct vcpu_vmx, vcpu.arch.regs[VCPU_REGS_R11])),
3495                 [r12]"i"(offsetof(struct vcpu_vmx, vcpu.arch.regs[VCPU_REGS_R12])),
3496                 [r13]"i"(offsetof(struct vcpu_vmx, vcpu.arch.regs[VCPU_REGS_R13])),
3497                 [r14]"i"(offsetof(struct vcpu_vmx, vcpu.arch.regs[VCPU_REGS_R14])),
3498                 [r15]"i"(offsetof(struct vcpu_vmx, vcpu.arch.regs[VCPU_REGS_R15])),
3499 #endif
3500                 [cr2]"i"(offsetof(struct vcpu_vmx, vcpu.arch.cr2))
3501               : "cc", "memory"
3502                 , R"bx", R"di", R"si"
3503 #ifdef CONFIG_X86_64
3504                 , "r8", "r9", "r10", "r11", "r12", "r13", "r14", "r15"
3505 #endif
3506               );
3507
3508         vcpu->arch.regs_avail = ~((1 << VCPU_REGS_RIP) | (1 << VCPU_REGS_RSP));
3509         vcpu->arch.regs_dirty = 0;
3510
3511         get_debugreg(vcpu->arch.dr6, 6);
3512
3513         vmx->idt_vectoring_info = vmcs_read32(IDT_VECTORING_INFO_FIELD);
3514         if (vmx->rmode.irq.pending)
3515                 fixup_rmode_irq(vmx);
3516
3517         vmx_update_window_states(vcpu);
3518
3519         asm("mov %0, %%ds; mov %0, %%es" : : "r"(__USER_DS));
3520         vmx->launched = 1;
3521
3522         intr_info = vmcs_read32(VM_EXIT_INTR_INFO);
3523
3524         /* We need to handle NMIs before interrupts are enabled */
3525         if ((intr_info & INTR_INFO_INTR_TYPE_MASK) == INTR_TYPE_NMI_INTR &&
3526             (intr_info & INTR_INFO_VALID_MASK)) {
3527                 KVMTRACE_0D(NMI, vcpu, handler);
3528                 asm("int $2");
3529         }
3530
3531         vmx_complete_interrupts(vmx);
3532 }
3533
3534 #undef R
3535 #undef Q
3536
3537 static void vmx_free_vmcs(struct kvm_vcpu *vcpu)
3538 {
3539         struct vcpu_vmx *vmx = to_vmx(vcpu);
3540
3541         if (vmx->vmcs) {
3542                 vcpu_clear(vmx);
3543                 free_vmcs(vmx->vmcs);
3544                 vmx->vmcs = NULL;
3545         }
3546 }
3547
3548 static void vmx_free_vcpu(struct kvm_vcpu *vcpu)
3549 {
3550         struct vcpu_vmx *vmx = to_vmx(vcpu);
3551
3552         spin_lock(&vmx_vpid_lock);
3553         if (vmx->vpid != 0)
3554                 __clear_bit(vmx->vpid, vmx_vpid_bitmap);
3555         spin_unlock(&vmx_vpid_lock);
3556         vmx_free_vmcs(vcpu);
3557         kfree(vmx->host_msrs);
3558         kfree(vmx->guest_msrs);
3559         kvm_vcpu_uninit(vcpu);
3560         kmem_cache_free(kvm_vcpu_cache, vmx);
3561 }
3562
3563 static struct kvm_vcpu *vmx_create_vcpu(struct kvm *kvm, unsigned int id)
3564 {
3565         int err;
3566         struct vcpu_vmx *vmx = kmem_cache_zalloc(kvm_vcpu_cache, GFP_KERNEL);
3567         int cpu;
3568
3569         if (!vmx)
3570                 return ERR_PTR(-ENOMEM);
3571
3572         allocate_vpid(vmx);
3573
3574         err = kvm_vcpu_init(&vmx->vcpu, kvm, id);
3575         if (err)
3576                 goto free_vcpu;
3577
3578         vmx->guest_msrs = kmalloc(PAGE_SIZE, GFP_KERNEL);
3579         if (!vmx->guest_msrs) {
3580                 err = -ENOMEM;
3581                 goto uninit_vcpu;
3582         }
3583
3584         vmx->host_msrs = kmalloc(PAGE_SIZE, GFP_KERNEL);
3585         if (!vmx->host_msrs)
3586                 goto free_guest_msrs;
3587
3588         vmx->vmcs = alloc_vmcs();
3589         if (!vmx->vmcs)
3590                 goto free_msrs;
3591
3592         vmcs_clear(vmx->vmcs);
3593
3594         cpu = get_cpu();
3595         vmx_vcpu_load(&vmx->vcpu, cpu);
3596         err = vmx_vcpu_setup(vmx);
3597         vmx_vcpu_put(&vmx->vcpu);
3598         put_cpu();
3599         if (err)
3600                 goto free_vmcs;
3601         if (vm_need_virtualize_apic_accesses(kvm))
3602                 if (alloc_apic_access_page(kvm) != 0)
3603                         goto free_vmcs;
3604
3605         if (vm_need_ept())
3606                 if (alloc_identity_pagetable(kvm) != 0)
3607                         goto free_vmcs;
3608
3609         return &vmx->vcpu;
3610
3611 free_vmcs:
3612         free_vmcs(vmx->vmcs);
3613 free_msrs:
3614         kfree(vmx->host_msrs);
3615 free_guest_msrs:
3616         kfree(vmx->guest_msrs);
3617 uninit_vcpu:
3618         kvm_vcpu_uninit(&vmx->vcpu);
3619 free_vcpu:
3620         kmem_cache_free(kvm_vcpu_cache, vmx);
3621         return ERR_PTR(err);
3622 }
3623
3624 static void __init vmx_check_processor_compat(void *rtn)
3625 {
3626         struct vmcs_config vmcs_conf;
3627
3628         *(int *)rtn = 0;
3629         if (setup_vmcs_config(&vmcs_conf) < 0)
3630                 *(int *)rtn = -EIO;
3631         if (memcmp(&vmcs_config, &vmcs_conf, sizeof(struct vmcs_config)) != 0) {
3632                 printk(KERN_ERR "kvm: CPU %d feature inconsistency!\n",
3633                                 smp_processor_id());
3634                 *(int *)rtn = -EIO;
3635         }
3636 }
3637
3638 static int get_ept_level(void)
3639 {
3640         return VMX_EPT_DEFAULT_GAW + 1;
3641 }
3642
3643 static int vmx_get_mt_mask_shift(void)
3644 {
3645         return VMX_EPT_MT_EPTE_SHIFT;
3646 }
3647
3648 static struct kvm_x86_ops vmx_x86_ops = {
3649         .cpu_has_kvm_support = cpu_has_kvm_support,
3650         .disabled_by_bios = vmx_disabled_by_bios,
3651         .hardware_setup = hardware_setup,
3652         .hardware_unsetup = hardware_unsetup,
3653         .check_processor_compatibility = vmx_check_processor_compat,
3654         .hardware_enable = hardware_enable,
3655         .hardware_disable = hardware_disable,
3656         .cpu_has_accelerated_tpr = cpu_has_vmx_virtualize_apic_accesses,
3657
3658         .vcpu_create = vmx_create_vcpu,
3659         .vcpu_free = vmx_free_vcpu,
3660         .vcpu_reset = vmx_vcpu_reset,
3661
3662         .prepare_guest_switch = vmx_save_host_state,
3663         .vcpu_load = vmx_vcpu_load,
3664         .vcpu_put = vmx_vcpu_put,
3665
3666         .set_guest_debug = set_guest_debug,
3667         .get_msr = vmx_get_msr,
3668         .set_msr = vmx_set_msr,
3669         .get_segment_base = vmx_get_segment_base,
3670         .get_segment = vmx_get_segment,
3671         .set_segment = vmx_set_segment,
3672         .get_cpl = vmx_get_cpl,
3673         .get_cs_db_l_bits = vmx_get_cs_db_l_bits,
3674         .decache_cr4_guest_bits = vmx_decache_cr4_guest_bits,
3675         .set_cr0 = vmx_set_cr0,
3676         .set_cr3 = vmx_set_cr3,
3677         .set_cr4 = vmx_set_cr4,
3678         .set_efer = vmx_set_efer,
3679         .get_idt = vmx_get_idt,
3680         .set_idt = vmx_set_idt,
3681         .get_gdt = vmx_get_gdt,
3682         .set_gdt = vmx_set_gdt,
3683         .cache_reg = vmx_cache_reg,
3684         .get_rflags = vmx_get_rflags,
3685         .set_rflags = vmx_set_rflags,
3686
3687         .tlb_flush = vmx_flush_tlb,
3688
3689         .run = vmx_vcpu_run,
3690         .handle_exit = vmx_handle_exit,
3691         .skip_emulated_instruction = skip_emulated_instruction,
3692         .patch_hypercall = vmx_patch_hypercall,
3693         .get_irq = vmx_get_irq,
3694         .set_irq = vmx_inject_irq,
3695         .queue_exception = vmx_queue_exception,
3696         .exception_injected = vmx_exception_injected,
3697         .inject_pending_irq = vmx_intr_assist,
3698         .inject_pending_vectors = do_interrupt_requests,
3699
3700         .set_tss_addr = vmx_set_tss_addr,
3701         .get_tdp_level = get_ept_level,
3702         .get_mt_mask_shift = vmx_get_mt_mask_shift,
3703 };
3704
3705 static int __init vmx_init(void)
3706 {
3707         int r;
3708
3709         vmx_io_bitmap_a = (unsigned long *)__get_free_page(GFP_KERNEL);
3710         if (!vmx_io_bitmap_a)
3711                 return -ENOMEM;
3712
3713         vmx_io_bitmap_b = (unsigned long *)__get_free_page(GFP_KERNEL);
3714         if (!vmx_io_bitmap_b) {
3715                 r = -ENOMEM;
3716                 goto out;
3717         }
3718
3719         vmx_msr_bitmap_legacy = (unsigned long *)__get_free_page(GFP_KERNEL);
3720         if (!vmx_msr_bitmap_legacy) {
3721                 r = -ENOMEM;
3722                 goto out1;
3723         }
3724
3725         vmx_msr_bitmap_longmode = (unsigned long *)__get_free_page(GFP_KERNEL);
3726         if (!vmx_msr_bitmap_longmode) {
3727                 r = -ENOMEM;
3728                 goto out2;
3729         }
3730
3731         /*
3732          * Allow direct access to the PC debug port (it is often used for I/O
3733          * delays, but the vmexits simply slow things down).
3734          */
3735         memset(vmx_io_bitmap_a, 0xff, PAGE_SIZE);
3736         clear_bit(0x80, vmx_io_bitmap_a);
3737
3738         memset(vmx_io_bitmap_b, 0xff, PAGE_SIZE);
3739
3740         memset(vmx_msr_bitmap_legacy, 0xff, PAGE_SIZE);
3741         memset(vmx_msr_bitmap_longmode, 0xff, PAGE_SIZE);
3742
3743         set_bit(0, vmx_vpid_bitmap); /* 0 is reserved for host */
3744
3745         r = kvm_init(&vmx_x86_ops, sizeof(struct vcpu_vmx), THIS_MODULE);
3746         if (r)
3747                 goto out3;
3748
3749         vmx_disable_intercept_for_msr(MSR_FS_BASE, false);
3750         vmx_disable_intercept_for_msr(MSR_GS_BASE, false);
3751         vmx_disable_intercept_for_msr(MSR_KERNEL_GS_BASE, true);
3752         vmx_disable_intercept_for_msr(MSR_IA32_SYSENTER_CS, false);
3753         vmx_disable_intercept_for_msr(MSR_IA32_SYSENTER_ESP, false);
3754         vmx_disable_intercept_for_msr(MSR_IA32_SYSENTER_EIP, false);
3755
3756         if (vm_need_ept()) {
3757                 bypass_guest_pf = 0;
3758                 kvm_mmu_set_base_ptes(VMX_EPT_READABLE_MASK |
3759                         VMX_EPT_WRITABLE_MASK);
3760                 kvm_mmu_set_mask_ptes(0ull, 0ull, 0ull, 0ull,
3761                                 VMX_EPT_EXECUTABLE_MASK,
3762                                 VMX_EPT_DEFAULT_MT << VMX_EPT_MT_EPTE_SHIFT);
3763                 kvm_enable_tdp();
3764         } else
3765                 kvm_disable_tdp();
3766
3767         if (bypass_guest_pf)
3768                 kvm_mmu_set_nonpresent_ptes(~0xffeull, 0ull);
3769
3770         ept_sync_global();
3771
3772         return 0;
3773
3774 out3:
3775         free_page((unsigned long)vmx_msr_bitmap_longmode);
3776 out2:
3777         free_page((unsigned long)vmx_msr_bitmap_legacy);
3778 out1:
3779         free_page((unsigned long)vmx_io_bitmap_b);
3780 out:
3781         free_page((unsigned long)vmx_io_bitmap_a);
3782         return r;
3783 }
3784
3785 static void __exit vmx_exit(void)
3786 {
3787         free_page((unsigned long)vmx_msr_bitmap_legacy);
3788         free_page((unsigned long)vmx_msr_bitmap_longmode);
3789         free_page((unsigned long)vmx_io_bitmap_b);
3790         free_page((unsigned long)vmx_io_bitmap_a);
3791
3792         kvm_exit();
3793 }
3794
3795 module_init(vmx_init)
3796 module_exit(vmx_exit)