KVM: convert custom marker based tracing to event traces
[safe/jmp/linux-2.6] / arch / x86 / kvm / vmx.c
1 /*
2  * Kernel-based Virtual Machine driver for Linux
3  *
4  * This module enables machines with Intel VT-x extensions to run virtual
5  * machines without emulation or binary translation.
6  *
7  * Copyright (C) 2006 Qumranet, Inc.
8  *
9  * Authors:
10  *   Avi Kivity   <avi@qumranet.com>
11  *   Yaniv Kamay  <yaniv@qumranet.com>
12  *
13  * This work is licensed under the terms of the GNU GPL, version 2.  See
14  * the COPYING file in the top-level directory.
15  *
16  */
17
18 #include "irq.h"
19 #include "mmu.h"
20
21 #include <linux/kvm_host.h>
22 #include <linux/module.h>
23 #include <linux/kernel.h>
24 #include <linux/mm.h>
25 #include <linux/highmem.h>
26 #include <linux/sched.h>
27 #include <linux/moduleparam.h>
28 #include <linux/ftrace_event.h>
29 #include "kvm_cache_regs.h"
30 #include "x86.h"
31
32 #include <asm/io.h>
33 #include <asm/desc.h>
34 #include <asm/vmx.h>
35 #include <asm/virtext.h>
36 #include <asm/mce.h>
37
38 #include "trace.h"
39
40 #define __ex(x) __kvm_handle_fault_on_reboot(x)
41
42 MODULE_AUTHOR("Qumranet");
43 MODULE_LICENSE("GPL");
44
45 static int __read_mostly bypass_guest_pf = 1;
46 module_param(bypass_guest_pf, bool, S_IRUGO);
47
48 static int __read_mostly enable_vpid = 1;
49 module_param_named(vpid, enable_vpid, bool, 0444);
50
51 static int __read_mostly flexpriority_enabled = 1;
52 module_param_named(flexpriority, flexpriority_enabled, bool, S_IRUGO);
53
54 static int __read_mostly enable_ept = 1;
55 module_param_named(ept, enable_ept, bool, S_IRUGO);
56
57 static int __read_mostly enable_unrestricted_guest = 1;
58 module_param_named(unrestricted_guest,
59                         enable_unrestricted_guest, bool, S_IRUGO);
60
61 static int __read_mostly emulate_invalid_guest_state = 0;
62 module_param(emulate_invalid_guest_state, bool, S_IRUGO);
63
64 struct vmcs {
65         u32 revision_id;
66         u32 abort;
67         char data[0];
68 };
69
70 struct vcpu_vmx {
71         struct kvm_vcpu       vcpu;
72         struct list_head      local_vcpus_link;
73         unsigned long         host_rsp;
74         int                   launched;
75         u8                    fail;
76         u32                   idt_vectoring_info;
77         struct kvm_msr_entry *guest_msrs;
78         struct kvm_msr_entry *host_msrs;
79         int                   nmsrs;
80         int                   save_nmsrs;
81         int                   msr_offset_efer;
82 #ifdef CONFIG_X86_64
83         int                   msr_offset_kernel_gs_base;
84 #endif
85         struct vmcs          *vmcs;
86         struct {
87                 int           loaded;
88                 u16           fs_sel, gs_sel, ldt_sel;
89                 int           gs_ldt_reload_needed;
90                 int           fs_reload_needed;
91                 int           guest_efer_loaded;
92         } host_state;
93         struct {
94                 int vm86_active;
95                 u8 save_iopl;
96                 struct kvm_save_segment {
97                         u16 selector;
98                         unsigned long base;
99                         u32 limit;
100                         u32 ar;
101                 } tr, es, ds, fs, gs;
102                 struct {
103                         bool pending;
104                         u8 vector;
105                         unsigned rip;
106                 } irq;
107         } rmode;
108         int vpid;
109         bool emulation_required;
110         enum emulation_result invalid_state_emulation_result;
111
112         /* Support for vnmi-less CPUs */
113         int soft_vnmi_blocked;
114         ktime_t entry_time;
115         s64 vnmi_blocked_time;
116         u32 exit_reason;
117 };
118
119 static inline struct vcpu_vmx *to_vmx(struct kvm_vcpu *vcpu)
120 {
121         return container_of(vcpu, struct vcpu_vmx, vcpu);
122 }
123
124 static int init_rmode(struct kvm *kvm);
125 static u64 construct_eptp(unsigned long root_hpa);
126
127 static DEFINE_PER_CPU(struct vmcs *, vmxarea);
128 static DEFINE_PER_CPU(struct vmcs *, current_vmcs);
129 static DEFINE_PER_CPU(struct list_head, vcpus_on_cpu);
130
131 static unsigned long *vmx_io_bitmap_a;
132 static unsigned long *vmx_io_bitmap_b;
133 static unsigned long *vmx_msr_bitmap_legacy;
134 static unsigned long *vmx_msr_bitmap_longmode;
135
136 static DECLARE_BITMAP(vmx_vpid_bitmap, VMX_NR_VPIDS);
137 static DEFINE_SPINLOCK(vmx_vpid_lock);
138
139 static struct vmcs_config {
140         int size;
141         int order;
142         u32 revision_id;
143         u32 pin_based_exec_ctrl;
144         u32 cpu_based_exec_ctrl;
145         u32 cpu_based_2nd_exec_ctrl;
146         u32 vmexit_ctrl;
147         u32 vmentry_ctrl;
148 } vmcs_config;
149
150 static struct vmx_capability {
151         u32 ept;
152         u32 vpid;
153 } vmx_capability;
154
155 #define VMX_SEGMENT_FIELD(seg)                                  \
156         [VCPU_SREG_##seg] = {                                   \
157                 .selector = GUEST_##seg##_SELECTOR,             \
158                 .base = GUEST_##seg##_BASE,                     \
159                 .limit = GUEST_##seg##_LIMIT,                   \
160                 .ar_bytes = GUEST_##seg##_AR_BYTES,             \
161         }
162
163 static struct kvm_vmx_segment_field {
164         unsigned selector;
165         unsigned base;
166         unsigned limit;
167         unsigned ar_bytes;
168 } kvm_vmx_segment_fields[] = {
169         VMX_SEGMENT_FIELD(CS),
170         VMX_SEGMENT_FIELD(DS),
171         VMX_SEGMENT_FIELD(ES),
172         VMX_SEGMENT_FIELD(FS),
173         VMX_SEGMENT_FIELD(GS),
174         VMX_SEGMENT_FIELD(SS),
175         VMX_SEGMENT_FIELD(TR),
176         VMX_SEGMENT_FIELD(LDTR),
177 };
178
179 static void ept_save_pdptrs(struct kvm_vcpu *vcpu);
180
181 /*
182  * Keep MSR_K6_STAR at the end, as setup_msrs() will try to optimize it
183  * away by decrementing the array size.
184  */
185 static const u32 vmx_msr_index[] = {
186 #ifdef CONFIG_X86_64
187         MSR_SYSCALL_MASK, MSR_LSTAR, MSR_CSTAR, MSR_KERNEL_GS_BASE,
188 #endif
189         MSR_EFER, MSR_K6_STAR,
190 };
191 #define NR_VMX_MSR ARRAY_SIZE(vmx_msr_index)
192
193 static void load_msrs(struct kvm_msr_entry *e, int n)
194 {
195         int i;
196
197         for (i = 0; i < n; ++i)
198                 wrmsrl(e[i].index, e[i].data);
199 }
200
201 static void save_msrs(struct kvm_msr_entry *e, int n)
202 {
203         int i;
204
205         for (i = 0; i < n; ++i)
206                 rdmsrl(e[i].index, e[i].data);
207 }
208
209 static inline int is_page_fault(u32 intr_info)
210 {
211         return (intr_info & (INTR_INFO_INTR_TYPE_MASK | INTR_INFO_VECTOR_MASK |
212                              INTR_INFO_VALID_MASK)) ==
213                 (INTR_TYPE_HARD_EXCEPTION | PF_VECTOR | INTR_INFO_VALID_MASK);
214 }
215
216 static inline int is_no_device(u32 intr_info)
217 {
218         return (intr_info & (INTR_INFO_INTR_TYPE_MASK | INTR_INFO_VECTOR_MASK |
219                              INTR_INFO_VALID_MASK)) ==
220                 (INTR_TYPE_HARD_EXCEPTION | NM_VECTOR | INTR_INFO_VALID_MASK);
221 }
222
223 static inline int is_invalid_opcode(u32 intr_info)
224 {
225         return (intr_info & (INTR_INFO_INTR_TYPE_MASK | INTR_INFO_VECTOR_MASK |
226                              INTR_INFO_VALID_MASK)) ==
227                 (INTR_TYPE_HARD_EXCEPTION | UD_VECTOR | INTR_INFO_VALID_MASK);
228 }
229
230 static inline int is_external_interrupt(u32 intr_info)
231 {
232         return (intr_info & (INTR_INFO_INTR_TYPE_MASK | INTR_INFO_VALID_MASK))
233                 == (INTR_TYPE_EXT_INTR | INTR_INFO_VALID_MASK);
234 }
235
236 static inline int is_machine_check(u32 intr_info)
237 {
238         return (intr_info & (INTR_INFO_INTR_TYPE_MASK | INTR_INFO_VECTOR_MASK |
239                              INTR_INFO_VALID_MASK)) ==
240                 (INTR_TYPE_HARD_EXCEPTION | MC_VECTOR | INTR_INFO_VALID_MASK);
241 }
242
243 static inline int cpu_has_vmx_msr_bitmap(void)
244 {
245         return vmcs_config.cpu_based_exec_ctrl & CPU_BASED_USE_MSR_BITMAPS;
246 }
247
248 static inline int cpu_has_vmx_tpr_shadow(void)
249 {
250         return vmcs_config.cpu_based_exec_ctrl & CPU_BASED_TPR_SHADOW;
251 }
252
253 static inline int vm_need_tpr_shadow(struct kvm *kvm)
254 {
255         return (cpu_has_vmx_tpr_shadow()) && (irqchip_in_kernel(kvm));
256 }
257
258 static inline int cpu_has_secondary_exec_ctrls(void)
259 {
260         return vmcs_config.cpu_based_exec_ctrl &
261                 CPU_BASED_ACTIVATE_SECONDARY_CONTROLS;
262 }
263
264 static inline bool cpu_has_vmx_virtualize_apic_accesses(void)
265 {
266         return vmcs_config.cpu_based_2nd_exec_ctrl &
267                 SECONDARY_EXEC_VIRTUALIZE_APIC_ACCESSES;
268 }
269
270 static inline bool cpu_has_vmx_flexpriority(void)
271 {
272         return cpu_has_vmx_tpr_shadow() &&
273                 cpu_has_vmx_virtualize_apic_accesses();
274 }
275
276 static inline bool cpu_has_vmx_ept_execute_only(void)
277 {
278         return !!(vmx_capability.ept & VMX_EPT_EXECUTE_ONLY_BIT);
279 }
280
281 static inline bool cpu_has_vmx_eptp_uncacheable(void)
282 {
283         return !!(vmx_capability.ept & VMX_EPTP_UC_BIT);
284 }
285
286 static inline bool cpu_has_vmx_eptp_writeback(void)
287 {
288         return !!(vmx_capability.ept & VMX_EPTP_WB_BIT);
289 }
290
291 static inline bool cpu_has_vmx_ept_2m_page(void)
292 {
293         return !!(vmx_capability.ept & VMX_EPT_2MB_PAGE_BIT);
294 }
295
296 static inline int cpu_has_vmx_invept_individual_addr(void)
297 {
298         return !!(vmx_capability.ept & VMX_EPT_EXTENT_INDIVIDUAL_BIT);
299 }
300
301 static inline int cpu_has_vmx_invept_context(void)
302 {
303         return !!(vmx_capability.ept & VMX_EPT_EXTENT_CONTEXT_BIT);
304 }
305
306 static inline int cpu_has_vmx_invept_global(void)
307 {
308         return !!(vmx_capability.ept & VMX_EPT_EXTENT_GLOBAL_BIT);
309 }
310
311 static inline int cpu_has_vmx_ept(void)
312 {
313         return vmcs_config.cpu_based_2nd_exec_ctrl &
314                 SECONDARY_EXEC_ENABLE_EPT;
315 }
316
317 static inline int cpu_has_vmx_unrestricted_guest(void)
318 {
319         return vmcs_config.cpu_based_2nd_exec_ctrl &
320                 SECONDARY_EXEC_UNRESTRICTED_GUEST;
321 }
322
323 static inline int vm_need_virtualize_apic_accesses(struct kvm *kvm)
324 {
325         return flexpriority_enabled &&
326                 (cpu_has_vmx_virtualize_apic_accesses()) &&
327                 (irqchip_in_kernel(kvm));
328 }
329
330 static inline int cpu_has_vmx_vpid(void)
331 {
332         return vmcs_config.cpu_based_2nd_exec_ctrl &
333                 SECONDARY_EXEC_ENABLE_VPID;
334 }
335
336 static inline int cpu_has_virtual_nmis(void)
337 {
338         return vmcs_config.pin_based_exec_ctrl & PIN_BASED_VIRTUAL_NMIS;
339 }
340
341 static inline bool report_flexpriority(void)
342 {
343         return flexpriority_enabled;
344 }
345
346 static int __find_msr_index(struct vcpu_vmx *vmx, u32 msr)
347 {
348         int i;
349
350         for (i = 0; i < vmx->nmsrs; ++i)
351                 if (vmx->guest_msrs[i].index == msr)
352                         return i;
353         return -1;
354 }
355
356 static inline void __invvpid(int ext, u16 vpid, gva_t gva)
357 {
358     struct {
359         u64 vpid : 16;
360         u64 rsvd : 48;
361         u64 gva;
362     } operand = { vpid, 0, gva };
363
364     asm volatile (__ex(ASM_VMX_INVVPID)
365                   /* CF==1 or ZF==1 --> rc = -1 */
366                   "; ja 1f ; ud2 ; 1:"
367                   : : "a"(&operand), "c"(ext) : "cc", "memory");
368 }
369
370 static inline void __invept(int ext, u64 eptp, gpa_t gpa)
371 {
372         struct {
373                 u64 eptp, gpa;
374         } operand = {eptp, gpa};
375
376         asm volatile (__ex(ASM_VMX_INVEPT)
377                         /* CF==1 or ZF==1 --> rc = -1 */
378                         "; ja 1f ; ud2 ; 1:\n"
379                         : : "a" (&operand), "c" (ext) : "cc", "memory");
380 }
381
382 static struct kvm_msr_entry *find_msr_entry(struct vcpu_vmx *vmx, u32 msr)
383 {
384         int i;
385
386         i = __find_msr_index(vmx, msr);
387         if (i >= 0)
388                 return &vmx->guest_msrs[i];
389         return NULL;
390 }
391
392 static void vmcs_clear(struct vmcs *vmcs)
393 {
394         u64 phys_addr = __pa(vmcs);
395         u8 error;
396
397         asm volatile (__ex(ASM_VMX_VMCLEAR_RAX) "; setna %0"
398                       : "=g"(error) : "a"(&phys_addr), "m"(phys_addr)
399                       : "cc", "memory");
400         if (error)
401                 printk(KERN_ERR "kvm: vmclear fail: %p/%llx\n",
402                        vmcs, phys_addr);
403 }
404
405 static void __vcpu_clear(void *arg)
406 {
407         struct vcpu_vmx *vmx = arg;
408         int cpu = raw_smp_processor_id();
409
410         if (vmx->vcpu.cpu == cpu)
411                 vmcs_clear(vmx->vmcs);
412         if (per_cpu(current_vmcs, cpu) == vmx->vmcs)
413                 per_cpu(current_vmcs, cpu) = NULL;
414         rdtscll(vmx->vcpu.arch.host_tsc);
415         list_del(&vmx->local_vcpus_link);
416         vmx->vcpu.cpu = -1;
417         vmx->launched = 0;
418 }
419
420 static void vcpu_clear(struct vcpu_vmx *vmx)
421 {
422         if (vmx->vcpu.cpu == -1)
423                 return;
424         smp_call_function_single(vmx->vcpu.cpu, __vcpu_clear, vmx, 1);
425 }
426
427 static inline void vpid_sync_vcpu_all(struct vcpu_vmx *vmx)
428 {
429         if (vmx->vpid == 0)
430                 return;
431
432         __invvpid(VMX_VPID_EXTENT_SINGLE_CONTEXT, vmx->vpid, 0);
433 }
434
435 static inline void ept_sync_global(void)
436 {
437         if (cpu_has_vmx_invept_global())
438                 __invept(VMX_EPT_EXTENT_GLOBAL, 0, 0);
439 }
440
441 static inline void ept_sync_context(u64 eptp)
442 {
443         if (enable_ept) {
444                 if (cpu_has_vmx_invept_context())
445                         __invept(VMX_EPT_EXTENT_CONTEXT, eptp, 0);
446                 else
447                         ept_sync_global();
448         }
449 }
450
451 static inline void ept_sync_individual_addr(u64 eptp, gpa_t gpa)
452 {
453         if (enable_ept) {
454                 if (cpu_has_vmx_invept_individual_addr())
455                         __invept(VMX_EPT_EXTENT_INDIVIDUAL_ADDR,
456                                         eptp, gpa);
457                 else
458                         ept_sync_context(eptp);
459         }
460 }
461
462 static unsigned long vmcs_readl(unsigned long field)
463 {
464         unsigned long value;
465
466         asm volatile (__ex(ASM_VMX_VMREAD_RDX_RAX)
467                       : "=a"(value) : "d"(field) : "cc");
468         return value;
469 }
470
471 static u16 vmcs_read16(unsigned long field)
472 {
473         return vmcs_readl(field);
474 }
475
476 static u32 vmcs_read32(unsigned long field)
477 {
478         return vmcs_readl(field);
479 }
480
481 static u64 vmcs_read64(unsigned long field)
482 {
483 #ifdef CONFIG_X86_64
484         return vmcs_readl(field);
485 #else
486         return vmcs_readl(field) | ((u64)vmcs_readl(field+1) << 32);
487 #endif
488 }
489
490 static noinline void vmwrite_error(unsigned long field, unsigned long value)
491 {
492         printk(KERN_ERR "vmwrite error: reg %lx value %lx (err %d)\n",
493                field, value, vmcs_read32(VM_INSTRUCTION_ERROR));
494         dump_stack();
495 }
496
497 static void vmcs_writel(unsigned long field, unsigned long value)
498 {
499         u8 error;
500
501         asm volatile (__ex(ASM_VMX_VMWRITE_RAX_RDX) "; setna %0"
502                        : "=q"(error) : "a"(value), "d"(field) : "cc");
503         if (unlikely(error))
504                 vmwrite_error(field, value);
505 }
506
507 static void vmcs_write16(unsigned long field, u16 value)
508 {
509         vmcs_writel(field, value);
510 }
511
512 static void vmcs_write32(unsigned long field, u32 value)
513 {
514         vmcs_writel(field, value);
515 }
516
517 static void vmcs_write64(unsigned long field, u64 value)
518 {
519         vmcs_writel(field, value);
520 #ifndef CONFIG_X86_64
521         asm volatile ("");
522         vmcs_writel(field+1, value >> 32);
523 #endif
524 }
525
526 static void vmcs_clear_bits(unsigned long field, u32 mask)
527 {
528         vmcs_writel(field, vmcs_readl(field) & ~mask);
529 }
530
531 static void vmcs_set_bits(unsigned long field, u32 mask)
532 {
533         vmcs_writel(field, vmcs_readl(field) | mask);
534 }
535
536 static void update_exception_bitmap(struct kvm_vcpu *vcpu)
537 {
538         u32 eb;
539
540         eb = (1u << PF_VECTOR) | (1u << UD_VECTOR) | (1u << MC_VECTOR);
541         if (!vcpu->fpu_active)
542                 eb |= 1u << NM_VECTOR;
543         if (vcpu->guest_debug & KVM_GUESTDBG_ENABLE) {
544                 if (vcpu->guest_debug &
545                     (KVM_GUESTDBG_SINGLESTEP | KVM_GUESTDBG_USE_HW_BP))
546                         eb |= 1u << DB_VECTOR;
547                 if (vcpu->guest_debug & KVM_GUESTDBG_USE_SW_BP)
548                         eb |= 1u << BP_VECTOR;
549         }
550         if (to_vmx(vcpu)->rmode.vm86_active)
551                 eb = ~0;
552         if (enable_ept)
553                 eb &= ~(1u << PF_VECTOR); /* bypass_guest_pf = 0 */
554         vmcs_write32(EXCEPTION_BITMAP, eb);
555 }
556
557 static void reload_tss(void)
558 {
559         /*
560          * VT restores TR but not its size.  Useless.
561          */
562         struct descriptor_table gdt;
563         struct desc_struct *descs;
564
565         kvm_get_gdt(&gdt);
566         descs = (void *)gdt.base;
567         descs[GDT_ENTRY_TSS].type = 9; /* available TSS */
568         load_TR_desc();
569 }
570
571 static void load_transition_efer(struct vcpu_vmx *vmx)
572 {
573         int efer_offset = vmx->msr_offset_efer;
574         u64 host_efer = vmx->host_msrs[efer_offset].data;
575         u64 guest_efer = vmx->guest_msrs[efer_offset].data;
576         u64 ignore_bits;
577
578         if (efer_offset < 0)
579                 return;
580         /*
581          * NX is emulated; LMA and LME handled by hardware; SCE meaninless
582          * outside long mode
583          */
584         ignore_bits = EFER_NX | EFER_SCE;
585 #ifdef CONFIG_X86_64
586         ignore_bits |= EFER_LMA | EFER_LME;
587         /* SCE is meaningful only in long mode on Intel */
588         if (guest_efer & EFER_LMA)
589                 ignore_bits &= ~(u64)EFER_SCE;
590 #endif
591         if ((guest_efer & ~ignore_bits) == (host_efer & ~ignore_bits))
592                 return;
593
594         vmx->host_state.guest_efer_loaded = 1;
595         guest_efer &= ~ignore_bits;
596         guest_efer |= host_efer & ignore_bits;
597         wrmsrl(MSR_EFER, guest_efer);
598         vmx->vcpu.stat.efer_reload++;
599 }
600
601 static void reload_host_efer(struct vcpu_vmx *vmx)
602 {
603         if (vmx->host_state.guest_efer_loaded) {
604                 vmx->host_state.guest_efer_loaded = 0;
605                 load_msrs(vmx->host_msrs + vmx->msr_offset_efer, 1);
606         }
607 }
608
609 static void vmx_save_host_state(struct kvm_vcpu *vcpu)
610 {
611         struct vcpu_vmx *vmx = to_vmx(vcpu);
612
613         if (vmx->host_state.loaded)
614                 return;
615
616         vmx->host_state.loaded = 1;
617         /*
618          * Set host fs and gs selectors.  Unfortunately, 22.2.3 does not
619          * allow segment selectors with cpl > 0 or ti == 1.
620          */
621         vmx->host_state.ldt_sel = kvm_read_ldt();
622         vmx->host_state.gs_ldt_reload_needed = vmx->host_state.ldt_sel;
623         vmx->host_state.fs_sel = kvm_read_fs();
624         if (!(vmx->host_state.fs_sel & 7)) {
625                 vmcs_write16(HOST_FS_SELECTOR, vmx->host_state.fs_sel);
626                 vmx->host_state.fs_reload_needed = 0;
627         } else {
628                 vmcs_write16(HOST_FS_SELECTOR, 0);
629                 vmx->host_state.fs_reload_needed = 1;
630         }
631         vmx->host_state.gs_sel = kvm_read_gs();
632         if (!(vmx->host_state.gs_sel & 7))
633                 vmcs_write16(HOST_GS_SELECTOR, vmx->host_state.gs_sel);
634         else {
635                 vmcs_write16(HOST_GS_SELECTOR, 0);
636                 vmx->host_state.gs_ldt_reload_needed = 1;
637         }
638
639 #ifdef CONFIG_X86_64
640         vmcs_writel(HOST_FS_BASE, read_msr(MSR_FS_BASE));
641         vmcs_writel(HOST_GS_BASE, read_msr(MSR_GS_BASE));
642 #else
643         vmcs_writel(HOST_FS_BASE, segment_base(vmx->host_state.fs_sel));
644         vmcs_writel(HOST_GS_BASE, segment_base(vmx->host_state.gs_sel));
645 #endif
646
647 #ifdef CONFIG_X86_64
648         if (is_long_mode(&vmx->vcpu))
649                 save_msrs(vmx->host_msrs +
650                           vmx->msr_offset_kernel_gs_base, 1);
651
652 #endif
653         load_msrs(vmx->guest_msrs, vmx->save_nmsrs);
654         load_transition_efer(vmx);
655 }
656
657 static void __vmx_load_host_state(struct vcpu_vmx *vmx)
658 {
659         unsigned long flags;
660
661         if (!vmx->host_state.loaded)
662                 return;
663
664         ++vmx->vcpu.stat.host_state_reload;
665         vmx->host_state.loaded = 0;
666         if (vmx->host_state.fs_reload_needed)
667                 kvm_load_fs(vmx->host_state.fs_sel);
668         if (vmx->host_state.gs_ldt_reload_needed) {
669                 kvm_load_ldt(vmx->host_state.ldt_sel);
670                 /*
671                  * If we have to reload gs, we must take care to
672                  * preserve our gs base.
673                  */
674                 local_irq_save(flags);
675                 kvm_load_gs(vmx->host_state.gs_sel);
676 #ifdef CONFIG_X86_64
677                 wrmsrl(MSR_GS_BASE, vmcs_readl(HOST_GS_BASE));
678 #endif
679                 local_irq_restore(flags);
680         }
681         reload_tss();
682         save_msrs(vmx->guest_msrs, vmx->save_nmsrs);
683         load_msrs(vmx->host_msrs, vmx->save_nmsrs);
684         reload_host_efer(vmx);
685 }
686
687 static void vmx_load_host_state(struct vcpu_vmx *vmx)
688 {
689         preempt_disable();
690         __vmx_load_host_state(vmx);
691         preempt_enable();
692 }
693
694 /*
695  * Switches to specified vcpu, until a matching vcpu_put(), but assumes
696  * vcpu mutex is already taken.
697  */
698 static void vmx_vcpu_load(struct kvm_vcpu *vcpu, int cpu)
699 {
700         struct vcpu_vmx *vmx = to_vmx(vcpu);
701         u64 phys_addr = __pa(vmx->vmcs);
702         u64 tsc_this, delta, new_offset;
703
704         if (vcpu->cpu != cpu) {
705                 vcpu_clear(vmx);
706                 kvm_migrate_timers(vcpu);
707                 vpid_sync_vcpu_all(vmx);
708                 local_irq_disable();
709                 list_add(&vmx->local_vcpus_link,
710                          &per_cpu(vcpus_on_cpu, cpu));
711                 local_irq_enable();
712         }
713
714         if (per_cpu(current_vmcs, cpu) != vmx->vmcs) {
715                 u8 error;
716
717                 per_cpu(current_vmcs, cpu) = vmx->vmcs;
718                 asm volatile (__ex(ASM_VMX_VMPTRLD_RAX) "; setna %0"
719                               : "=g"(error) : "a"(&phys_addr), "m"(phys_addr)
720                               : "cc");
721                 if (error)
722                         printk(KERN_ERR "kvm: vmptrld %p/%llx fail\n",
723                                vmx->vmcs, phys_addr);
724         }
725
726         if (vcpu->cpu != cpu) {
727                 struct descriptor_table dt;
728                 unsigned long sysenter_esp;
729
730                 vcpu->cpu = cpu;
731                 /*
732                  * Linux uses per-cpu TSS and GDT, so set these when switching
733                  * processors.
734                  */
735                 vmcs_writel(HOST_TR_BASE, kvm_read_tr_base()); /* 22.2.4 */
736                 kvm_get_gdt(&dt);
737                 vmcs_writel(HOST_GDTR_BASE, dt.base);   /* 22.2.4 */
738
739                 rdmsrl(MSR_IA32_SYSENTER_ESP, sysenter_esp);
740                 vmcs_writel(HOST_IA32_SYSENTER_ESP, sysenter_esp); /* 22.2.3 */
741
742                 /*
743                  * Make sure the time stamp counter is monotonous.
744                  */
745                 rdtscll(tsc_this);
746                 if (tsc_this < vcpu->arch.host_tsc) {
747                         delta = vcpu->arch.host_tsc - tsc_this;
748                         new_offset = vmcs_read64(TSC_OFFSET) + delta;
749                         vmcs_write64(TSC_OFFSET, new_offset);
750                 }
751         }
752 }
753
754 static void vmx_vcpu_put(struct kvm_vcpu *vcpu)
755 {
756         __vmx_load_host_state(to_vmx(vcpu));
757 }
758
759 static void vmx_fpu_activate(struct kvm_vcpu *vcpu)
760 {
761         if (vcpu->fpu_active)
762                 return;
763         vcpu->fpu_active = 1;
764         vmcs_clear_bits(GUEST_CR0, X86_CR0_TS);
765         if (vcpu->arch.cr0 & X86_CR0_TS)
766                 vmcs_set_bits(GUEST_CR0, X86_CR0_TS);
767         update_exception_bitmap(vcpu);
768 }
769
770 static void vmx_fpu_deactivate(struct kvm_vcpu *vcpu)
771 {
772         if (!vcpu->fpu_active)
773                 return;
774         vcpu->fpu_active = 0;
775         vmcs_set_bits(GUEST_CR0, X86_CR0_TS);
776         update_exception_bitmap(vcpu);
777 }
778
779 static unsigned long vmx_get_rflags(struct kvm_vcpu *vcpu)
780 {
781         return vmcs_readl(GUEST_RFLAGS);
782 }
783
784 static void vmx_set_rflags(struct kvm_vcpu *vcpu, unsigned long rflags)
785 {
786         if (to_vmx(vcpu)->rmode.vm86_active)
787                 rflags |= X86_EFLAGS_IOPL | X86_EFLAGS_VM;
788         vmcs_writel(GUEST_RFLAGS, rflags);
789 }
790
791 static u32 vmx_get_interrupt_shadow(struct kvm_vcpu *vcpu, int mask)
792 {
793         u32 interruptibility = vmcs_read32(GUEST_INTERRUPTIBILITY_INFO);
794         int ret = 0;
795
796         if (interruptibility & GUEST_INTR_STATE_STI)
797                 ret |= X86_SHADOW_INT_STI;
798         if (interruptibility & GUEST_INTR_STATE_MOV_SS)
799                 ret |= X86_SHADOW_INT_MOV_SS;
800
801         return ret & mask;
802 }
803
804 static void vmx_set_interrupt_shadow(struct kvm_vcpu *vcpu, int mask)
805 {
806         u32 interruptibility_old = vmcs_read32(GUEST_INTERRUPTIBILITY_INFO);
807         u32 interruptibility = interruptibility_old;
808
809         interruptibility &= ~(GUEST_INTR_STATE_STI | GUEST_INTR_STATE_MOV_SS);
810
811         if (mask & X86_SHADOW_INT_MOV_SS)
812                 interruptibility |= GUEST_INTR_STATE_MOV_SS;
813         if (mask & X86_SHADOW_INT_STI)
814                 interruptibility |= GUEST_INTR_STATE_STI;
815
816         if ((interruptibility != interruptibility_old))
817                 vmcs_write32(GUEST_INTERRUPTIBILITY_INFO, interruptibility);
818 }
819
820 static void skip_emulated_instruction(struct kvm_vcpu *vcpu)
821 {
822         unsigned long rip;
823
824         rip = kvm_rip_read(vcpu);
825         rip += vmcs_read32(VM_EXIT_INSTRUCTION_LEN);
826         kvm_rip_write(vcpu, rip);
827
828         /* skipping an emulated instruction also counts */
829         vmx_set_interrupt_shadow(vcpu, 0);
830 }
831
832 static void vmx_queue_exception(struct kvm_vcpu *vcpu, unsigned nr,
833                                 bool has_error_code, u32 error_code)
834 {
835         struct vcpu_vmx *vmx = to_vmx(vcpu);
836         u32 intr_info = nr | INTR_INFO_VALID_MASK;
837
838         if (has_error_code) {
839                 vmcs_write32(VM_ENTRY_EXCEPTION_ERROR_CODE, error_code);
840                 intr_info |= INTR_INFO_DELIVER_CODE_MASK;
841         }
842
843         if (vmx->rmode.vm86_active) {
844                 vmx->rmode.irq.pending = true;
845                 vmx->rmode.irq.vector = nr;
846                 vmx->rmode.irq.rip = kvm_rip_read(vcpu);
847                 if (kvm_exception_is_soft(nr))
848                         vmx->rmode.irq.rip +=
849                                 vmx->vcpu.arch.event_exit_inst_len;
850                 intr_info |= INTR_TYPE_SOFT_INTR;
851                 vmcs_write32(VM_ENTRY_INTR_INFO_FIELD, intr_info);
852                 vmcs_write32(VM_ENTRY_INSTRUCTION_LEN, 1);
853                 kvm_rip_write(vcpu, vmx->rmode.irq.rip - 1);
854                 return;
855         }
856
857         if (kvm_exception_is_soft(nr)) {
858                 vmcs_write32(VM_ENTRY_INSTRUCTION_LEN,
859                              vmx->vcpu.arch.event_exit_inst_len);
860                 intr_info |= INTR_TYPE_SOFT_EXCEPTION;
861         } else
862                 intr_info |= INTR_TYPE_HARD_EXCEPTION;
863
864         vmcs_write32(VM_ENTRY_INTR_INFO_FIELD, intr_info);
865 }
866
867 /*
868  * Swap MSR entry in host/guest MSR entry array.
869  */
870 #ifdef CONFIG_X86_64
871 static void move_msr_up(struct vcpu_vmx *vmx, int from, int to)
872 {
873         struct kvm_msr_entry tmp;
874
875         tmp = vmx->guest_msrs[to];
876         vmx->guest_msrs[to] = vmx->guest_msrs[from];
877         vmx->guest_msrs[from] = tmp;
878         tmp = vmx->host_msrs[to];
879         vmx->host_msrs[to] = vmx->host_msrs[from];
880         vmx->host_msrs[from] = tmp;
881 }
882 #endif
883
884 /*
885  * Set up the vmcs to automatically save and restore system
886  * msrs.  Don't touch the 64-bit msrs if the guest is in legacy
887  * mode, as fiddling with msrs is very expensive.
888  */
889 static void setup_msrs(struct vcpu_vmx *vmx)
890 {
891         int save_nmsrs;
892         unsigned long *msr_bitmap;
893
894         vmx_load_host_state(vmx);
895         save_nmsrs = 0;
896 #ifdef CONFIG_X86_64
897         if (is_long_mode(&vmx->vcpu)) {
898                 int index;
899
900                 index = __find_msr_index(vmx, MSR_SYSCALL_MASK);
901                 if (index >= 0)
902                         move_msr_up(vmx, index, save_nmsrs++);
903                 index = __find_msr_index(vmx, MSR_LSTAR);
904                 if (index >= 0)
905                         move_msr_up(vmx, index, save_nmsrs++);
906                 index = __find_msr_index(vmx, MSR_CSTAR);
907                 if (index >= 0)
908                         move_msr_up(vmx, index, save_nmsrs++);
909                 index = __find_msr_index(vmx, MSR_KERNEL_GS_BASE);
910                 if (index >= 0)
911                         move_msr_up(vmx, index, save_nmsrs++);
912                 /*
913                  * MSR_K6_STAR is only needed on long mode guests, and only
914                  * if efer.sce is enabled.
915                  */
916                 index = __find_msr_index(vmx, MSR_K6_STAR);
917                 if ((index >= 0) && (vmx->vcpu.arch.shadow_efer & EFER_SCE))
918                         move_msr_up(vmx, index, save_nmsrs++);
919         }
920 #endif
921         vmx->save_nmsrs = save_nmsrs;
922
923 #ifdef CONFIG_X86_64
924         vmx->msr_offset_kernel_gs_base =
925                 __find_msr_index(vmx, MSR_KERNEL_GS_BASE);
926 #endif
927         vmx->msr_offset_efer = __find_msr_index(vmx, MSR_EFER);
928
929         if (cpu_has_vmx_msr_bitmap()) {
930                 if (is_long_mode(&vmx->vcpu))
931                         msr_bitmap = vmx_msr_bitmap_longmode;
932                 else
933                         msr_bitmap = vmx_msr_bitmap_legacy;
934
935                 vmcs_write64(MSR_BITMAP, __pa(msr_bitmap));
936         }
937 }
938
939 /*
940  * reads and returns guest's timestamp counter "register"
941  * guest_tsc = host_tsc + tsc_offset    -- 21.3
942  */
943 static u64 guest_read_tsc(void)
944 {
945         u64 host_tsc, tsc_offset;
946
947         rdtscll(host_tsc);
948         tsc_offset = vmcs_read64(TSC_OFFSET);
949         return host_tsc + tsc_offset;
950 }
951
952 /*
953  * writes 'guest_tsc' into guest's timestamp counter "register"
954  * guest_tsc = host_tsc + tsc_offset ==> tsc_offset = guest_tsc - host_tsc
955  */
956 static void guest_write_tsc(u64 guest_tsc, u64 host_tsc)
957 {
958         vmcs_write64(TSC_OFFSET, guest_tsc - host_tsc);
959 }
960
961 /*
962  * Reads an msr value (of 'msr_index') into 'pdata'.
963  * Returns 0 on success, non-0 otherwise.
964  * Assumes vcpu_load() was already called.
965  */
966 static int vmx_get_msr(struct kvm_vcpu *vcpu, u32 msr_index, u64 *pdata)
967 {
968         u64 data;
969         struct kvm_msr_entry *msr;
970
971         if (!pdata) {
972                 printk(KERN_ERR "BUG: get_msr called with NULL pdata\n");
973                 return -EINVAL;
974         }
975
976         switch (msr_index) {
977 #ifdef CONFIG_X86_64
978         case MSR_FS_BASE:
979                 data = vmcs_readl(GUEST_FS_BASE);
980                 break;
981         case MSR_GS_BASE:
982                 data = vmcs_readl(GUEST_GS_BASE);
983                 break;
984         case MSR_EFER:
985                 return kvm_get_msr_common(vcpu, msr_index, pdata);
986 #endif
987         case MSR_IA32_TSC:
988                 data = guest_read_tsc();
989                 break;
990         case MSR_IA32_SYSENTER_CS:
991                 data = vmcs_read32(GUEST_SYSENTER_CS);
992                 break;
993         case MSR_IA32_SYSENTER_EIP:
994                 data = vmcs_readl(GUEST_SYSENTER_EIP);
995                 break;
996         case MSR_IA32_SYSENTER_ESP:
997                 data = vmcs_readl(GUEST_SYSENTER_ESP);
998                 break;
999         default:
1000                 vmx_load_host_state(to_vmx(vcpu));
1001                 msr = find_msr_entry(to_vmx(vcpu), msr_index);
1002                 if (msr) {
1003                         data = msr->data;
1004                         break;
1005                 }
1006                 return kvm_get_msr_common(vcpu, msr_index, pdata);
1007         }
1008
1009         *pdata = data;
1010         return 0;
1011 }
1012
1013 /*
1014  * Writes msr value into into the appropriate "register".
1015  * Returns 0 on success, non-0 otherwise.
1016  * Assumes vcpu_load() was already called.
1017  */
1018 static int vmx_set_msr(struct kvm_vcpu *vcpu, u32 msr_index, u64 data)
1019 {
1020         struct vcpu_vmx *vmx = to_vmx(vcpu);
1021         struct kvm_msr_entry *msr;
1022         u64 host_tsc;
1023         int ret = 0;
1024
1025         switch (msr_index) {
1026         case MSR_EFER:
1027                 vmx_load_host_state(vmx);
1028                 ret = kvm_set_msr_common(vcpu, msr_index, data);
1029                 break;
1030 #ifdef CONFIG_X86_64
1031         case MSR_FS_BASE:
1032                 vmcs_writel(GUEST_FS_BASE, data);
1033                 break;
1034         case MSR_GS_BASE:
1035                 vmcs_writel(GUEST_GS_BASE, data);
1036                 break;
1037 #endif
1038         case MSR_IA32_SYSENTER_CS:
1039                 vmcs_write32(GUEST_SYSENTER_CS, data);
1040                 break;
1041         case MSR_IA32_SYSENTER_EIP:
1042                 vmcs_writel(GUEST_SYSENTER_EIP, data);
1043                 break;
1044         case MSR_IA32_SYSENTER_ESP:
1045                 vmcs_writel(GUEST_SYSENTER_ESP, data);
1046                 break;
1047         case MSR_IA32_TSC:
1048                 rdtscll(host_tsc);
1049                 guest_write_tsc(data, host_tsc);
1050                 break;
1051         case MSR_IA32_CR_PAT:
1052                 if (vmcs_config.vmentry_ctrl & VM_ENTRY_LOAD_IA32_PAT) {
1053                         vmcs_write64(GUEST_IA32_PAT, data);
1054                         vcpu->arch.pat = data;
1055                         break;
1056                 }
1057                 /* Otherwise falls through to kvm_set_msr_common */
1058         default:
1059                 vmx_load_host_state(vmx);
1060                 msr = find_msr_entry(vmx, msr_index);
1061                 if (msr) {
1062                         msr->data = data;
1063                         break;
1064                 }
1065                 ret = kvm_set_msr_common(vcpu, msr_index, data);
1066         }
1067
1068         return ret;
1069 }
1070
1071 static void vmx_cache_reg(struct kvm_vcpu *vcpu, enum kvm_reg reg)
1072 {
1073         __set_bit(reg, (unsigned long *)&vcpu->arch.regs_avail);
1074         switch (reg) {
1075         case VCPU_REGS_RSP:
1076                 vcpu->arch.regs[VCPU_REGS_RSP] = vmcs_readl(GUEST_RSP);
1077                 break;
1078         case VCPU_REGS_RIP:
1079                 vcpu->arch.regs[VCPU_REGS_RIP] = vmcs_readl(GUEST_RIP);
1080                 break;
1081         case VCPU_EXREG_PDPTR:
1082                 if (enable_ept)
1083                         ept_save_pdptrs(vcpu);
1084                 break;
1085         default:
1086                 break;
1087         }
1088 }
1089
1090 static int set_guest_debug(struct kvm_vcpu *vcpu, struct kvm_guest_debug *dbg)
1091 {
1092         int old_debug = vcpu->guest_debug;
1093         unsigned long flags;
1094
1095         vcpu->guest_debug = dbg->control;
1096         if (!(vcpu->guest_debug & KVM_GUESTDBG_ENABLE))
1097                 vcpu->guest_debug = 0;
1098
1099         if (vcpu->guest_debug & KVM_GUESTDBG_USE_HW_BP)
1100                 vmcs_writel(GUEST_DR7, dbg->arch.debugreg[7]);
1101         else
1102                 vmcs_writel(GUEST_DR7, vcpu->arch.dr7);
1103
1104         flags = vmcs_readl(GUEST_RFLAGS);
1105         if (vcpu->guest_debug & KVM_GUESTDBG_SINGLESTEP)
1106                 flags |= X86_EFLAGS_TF | X86_EFLAGS_RF;
1107         else if (old_debug & KVM_GUESTDBG_SINGLESTEP)
1108                 flags &= ~(X86_EFLAGS_TF | X86_EFLAGS_RF);
1109         vmcs_writel(GUEST_RFLAGS, flags);
1110
1111         update_exception_bitmap(vcpu);
1112
1113         return 0;
1114 }
1115
1116 static __init int cpu_has_kvm_support(void)
1117 {
1118         return cpu_has_vmx();
1119 }
1120
1121 static __init int vmx_disabled_by_bios(void)
1122 {
1123         u64 msr;
1124
1125         rdmsrl(MSR_IA32_FEATURE_CONTROL, msr);
1126         return (msr & (FEATURE_CONTROL_LOCKED |
1127                        FEATURE_CONTROL_VMXON_ENABLED))
1128             == FEATURE_CONTROL_LOCKED;
1129         /* locked but not enabled */
1130 }
1131
1132 static void hardware_enable(void *garbage)
1133 {
1134         int cpu = raw_smp_processor_id();
1135         u64 phys_addr = __pa(per_cpu(vmxarea, cpu));
1136         u64 old;
1137
1138         INIT_LIST_HEAD(&per_cpu(vcpus_on_cpu, cpu));
1139         rdmsrl(MSR_IA32_FEATURE_CONTROL, old);
1140         if ((old & (FEATURE_CONTROL_LOCKED |
1141                     FEATURE_CONTROL_VMXON_ENABLED))
1142             != (FEATURE_CONTROL_LOCKED |
1143                 FEATURE_CONTROL_VMXON_ENABLED))
1144                 /* enable and lock */
1145                 wrmsrl(MSR_IA32_FEATURE_CONTROL, old |
1146                        FEATURE_CONTROL_LOCKED |
1147                        FEATURE_CONTROL_VMXON_ENABLED);
1148         write_cr4(read_cr4() | X86_CR4_VMXE); /* FIXME: not cpu hotplug safe */
1149         asm volatile (ASM_VMX_VMXON_RAX
1150                       : : "a"(&phys_addr), "m"(phys_addr)
1151                       : "memory", "cc");
1152 }
1153
1154 static void vmclear_local_vcpus(void)
1155 {
1156         int cpu = raw_smp_processor_id();
1157         struct vcpu_vmx *vmx, *n;
1158
1159         list_for_each_entry_safe(vmx, n, &per_cpu(vcpus_on_cpu, cpu),
1160                                  local_vcpus_link)
1161                 __vcpu_clear(vmx);
1162 }
1163
1164
1165 /* Just like cpu_vmxoff(), but with the __kvm_handle_fault_on_reboot()
1166  * tricks.
1167  */
1168 static void kvm_cpu_vmxoff(void)
1169 {
1170         asm volatile (__ex(ASM_VMX_VMXOFF) : : : "cc");
1171         write_cr4(read_cr4() & ~X86_CR4_VMXE);
1172 }
1173
1174 static void hardware_disable(void *garbage)
1175 {
1176         vmclear_local_vcpus();
1177         kvm_cpu_vmxoff();
1178 }
1179
1180 static __init int adjust_vmx_controls(u32 ctl_min, u32 ctl_opt,
1181                                       u32 msr, u32 *result)
1182 {
1183         u32 vmx_msr_low, vmx_msr_high;
1184         u32 ctl = ctl_min | ctl_opt;
1185
1186         rdmsr(msr, vmx_msr_low, vmx_msr_high);
1187
1188         ctl &= vmx_msr_high; /* bit == 0 in high word ==> must be zero */
1189         ctl |= vmx_msr_low;  /* bit == 1 in low word  ==> must be one  */
1190
1191         /* Ensure minimum (required) set of control bits are supported. */
1192         if (ctl_min & ~ctl)
1193                 return -EIO;
1194
1195         *result = ctl;
1196         return 0;
1197 }
1198
1199 static __init int setup_vmcs_config(struct vmcs_config *vmcs_conf)
1200 {
1201         u32 vmx_msr_low, vmx_msr_high;
1202         u32 min, opt, min2, opt2;
1203         u32 _pin_based_exec_control = 0;
1204         u32 _cpu_based_exec_control = 0;
1205         u32 _cpu_based_2nd_exec_control = 0;
1206         u32 _vmexit_control = 0;
1207         u32 _vmentry_control = 0;
1208
1209         min = PIN_BASED_EXT_INTR_MASK | PIN_BASED_NMI_EXITING;
1210         opt = PIN_BASED_VIRTUAL_NMIS;
1211         if (adjust_vmx_controls(min, opt, MSR_IA32_VMX_PINBASED_CTLS,
1212                                 &_pin_based_exec_control) < 0)
1213                 return -EIO;
1214
1215         min = CPU_BASED_HLT_EXITING |
1216 #ifdef CONFIG_X86_64
1217               CPU_BASED_CR8_LOAD_EXITING |
1218               CPU_BASED_CR8_STORE_EXITING |
1219 #endif
1220               CPU_BASED_CR3_LOAD_EXITING |
1221               CPU_BASED_CR3_STORE_EXITING |
1222               CPU_BASED_USE_IO_BITMAPS |
1223               CPU_BASED_MOV_DR_EXITING |
1224               CPU_BASED_USE_TSC_OFFSETING |
1225               CPU_BASED_INVLPG_EXITING;
1226         opt = CPU_BASED_TPR_SHADOW |
1227               CPU_BASED_USE_MSR_BITMAPS |
1228               CPU_BASED_ACTIVATE_SECONDARY_CONTROLS;
1229         if (adjust_vmx_controls(min, opt, MSR_IA32_VMX_PROCBASED_CTLS,
1230                                 &_cpu_based_exec_control) < 0)
1231                 return -EIO;
1232 #ifdef CONFIG_X86_64
1233         if ((_cpu_based_exec_control & CPU_BASED_TPR_SHADOW))
1234                 _cpu_based_exec_control &= ~CPU_BASED_CR8_LOAD_EXITING &
1235                                            ~CPU_BASED_CR8_STORE_EXITING;
1236 #endif
1237         if (_cpu_based_exec_control & CPU_BASED_ACTIVATE_SECONDARY_CONTROLS) {
1238                 min2 = 0;
1239                 opt2 = SECONDARY_EXEC_VIRTUALIZE_APIC_ACCESSES |
1240                         SECONDARY_EXEC_WBINVD_EXITING |
1241                         SECONDARY_EXEC_ENABLE_VPID |
1242                         SECONDARY_EXEC_ENABLE_EPT |
1243                         SECONDARY_EXEC_UNRESTRICTED_GUEST;
1244                 if (adjust_vmx_controls(min2, opt2,
1245                                         MSR_IA32_VMX_PROCBASED_CTLS2,
1246                                         &_cpu_based_2nd_exec_control) < 0)
1247                         return -EIO;
1248         }
1249 #ifndef CONFIG_X86_64
1250         if (!(_cpu_based_2nd_exec_control &
1251                                 SECONDARY_EXEC_VIRTUALIZE_APIC_ACCESSES))
1252                 _cpu_based_exec_control &= ~CPU_BASED_TPR_SHADOW;
1253 #endif
1254         if (_cpu_based_2nd_exec_control & SECONDARY_EXEC_ENABLE_EPT) {
1255                 /* CR3 accesses and invlpg don't need to cause VM Exits when EPT
1256                    enabled */
1257                 min &= ~(CPU_BASED_CR3_LOAD_EXITING |
1258                          CPU_BASED_CR3_STORE_EXITING |
1259                          CPU_BASED_INVLPG_EXITING);
1260                 if (adjust_vmx_controls(min, opt, MSR_IA32_VMX_PROCBASED_CTLS,
1261                                         &_cpu_based_exec_control) < 0)
1262                         return -EIO;
1263                 rdmsr(MSR_IA32_VMX_EPT_VPID_CAP,
1264                       vmx_capability.ept, vmx_capability.vpid);
1265         }
1266
1267         min = 0;
1268 #ifdef CONFIG_X86_64
1269         min |= VM_EXIT_HOST_ADDR_SPACE_SIZE;
1270 #endif
1271         opt = VM_EXIT_SAVE_IA32_PAT | VM_EXIT_LOAD_IA32_PAT;
1272         if (adjust_vmx_controls(min, opt, MSR_IA32_VMX_EXIT_CTLS,
1273                                 &_vmexit_control) < 0)
1274                 return -EIO;
1275
1276         min = 0;
1277         opt = VM_ENTRY_LOAD_IA32_PAT;
1278         if (adjust_vmx_controls(min, opt, MSR_IA32_VMX_ENTRY_CTLS,
1279                                 &_vmentry_control) < 0)
1280                 return -EIO;
1281
1282         rdmsr(MSR_IA32_VMX_BASIC, vmx_msr_low, vmx_msr_high);
1283
1284         /* IA-32 SDM Vol 3B: VMCS size is never greater than 4kB. */
1285         if ((vmx_msr_high & 0x1fff) > PAGE_SIZE)
1286                 return -EIO;
1287
1288 #ifdef CONFIG_X86_64
1289         /* IA-32 SDM Vol 3B: 64-bit CPUs always have VMX_BASIC_MSR[48]==0. */
1290         if (vmx_msr_high & (1u<<16))
1291                 return -EIO;
1292 #endif
1293
1294         /* Require Write-Back (WB) memory type for VMCS accesses. */
1295         if (((vmx_msr_high >> 18) & 15) != 6)
1296                 return -EIO;
1297
1298         vmcs_conf->size = vmx_msr_high & 0x1fff;
1299         vmcs_conf->order = get_order(vmcs_config.size);
1300         vmcs_conf->revision_id = vmx_msr_low;
1301
1302         vmcs_conf->pin_based_exec_ctrl = _pin_based_exec_control;
1303         vmcs_conf->cpu_based_exec_ctrl = _cpu_based_exec_control;
1304         vmcs_conf->cpu_based_2nd_exec_ctrl = _cpu_based_2nd_exec_control;
1305         vmcs_conf->vmexit_ctrl         = _vmexit_control;
1306         vmcs_conf->vmentry_ctrl        = _vmentry_control;
1307
1308         return 0;
1309 }
1310
1311 static struct vmcs *alloc_vmcs_cpu(int cpu)
1312 {
1313         int node = cpu_to_node(cpu);
1314         struct page *pages;
1315         struct vmcs *vmcs;
1316
1317         pages = alloc_pages_exact_node(node, GFP_KERNEL, vmcs_config.order);
1318         if (!pages)
1319                 return NULL;
1320         vmcs = page_address(pages);
1321         memset(vmcs, 0, vmcs_config.size);
1322         vmcs->revision_id = vmcs_config.revision_id; /* vmcs revision id */
1323         return vmcs;
1324 }
1325
1326 static struct vmcs *alloc_vmcs(void)
1327 {
1328         return alloc_vmcs_cpu(raw_smp_processor_id());
1329 }
1330
1331 static void free_vmcs(struct vmcs *vmcs)
1332 {
1333         free_pages((unsigned long)vmcs, vmcs_config.order);
1334 }
1335
1336 static void free_kvm_area(void)
1337 {
1338         int cpu;
1339
1340         for_each_online_cpu(cpu)
1341                 free_vmcs(per_cpu(vmxarea, cpu));
1342 }
1343
1344 static __init int alloc_kvm_area(void)
1345 {
1346         int cpu;
1347
1348         for_each_online_cpu(cpu) {
1349                 struct vmcs *vmcs;
1350
1351                 vmcs = alloc_vmcs_cpu(cpu);
1352                 if (!vmcs) {
1353                         free_kvm_area();
1354                         return -ENOMEM;
1355                 }
1356
1357                 per_cpu(vmxarea, cpu) = vmcs;
1358         }
1359         return 0;
1360 }
1361
1362 static __init int hardware_setup(void)
1363 {
1364         if (setup_vmcs_config(&vmcs_config) < 0)
1365                 return -EIO;
1366
1367         if (boot_cpu_has(X86_FEATURE_NX))
1368                 kvm_enable_efer_bits(EFER_NX);
1369
1370         if (!cpu_has_vmx_vpid())
1371                 enable_vpid = 0;
1372
1373         if (!cpu_has_vmx_ept()) {
1374                 enable_ept = 0;
1375                 enable_unrestricted_guest = 0;
1376         }
1377
1378         if (!cpu_has_vmx_unrestricted_guest())
1379                 enable_unrestricted_guest = 0;
1380
1381         if (!cpu_has_vmx_flexpriority())
1382                 flexpriority_enabled = 0;
1383
1384         if (!cpu_has_vmx_tpr_shadow())
1385                 kvm_x86_ops->update_cr8_intercept = NULL;
1386
1387         if (enable_ept && !cpu_has_vmx_ept_2m_page())
1388                 kvm_disable_largepages();
1389
1390         return alloc_kvm_area();
1391 }
1392
1393 static __exit void hardware_unsetup(void)
1394 {
1395         free_kvm_area();
1396 }
1397
1398 static void fix_pmode_dataseg(int seg, struct kvm_save_segment *save)
1399 {
1400         struct kvm_vmx_segment_field *sf = &kvm_vmx_segment_fields[seg];
1401
1402         if (vmcs_readl(sf->base) == save->base && (save->base & AR_S_MASK)) {
1403                 vmcs_write16(sf->selector, save->selector);
1404                 vmcs_writel(sf->base, save->base);
1405                 vmcs_write32(sf->limit, save->limit);
1406                 vmcs_write32(sf->ar_bytes, save->ar);
1407         } else {
1408                 u32 dpl = (vmcs_read16(sf->selector) & SELECTOR_RPL_MASK)
1409                         << AR_DPL_SHIFT;
1410                 vmcs_write32(sf->ar_bytes, 0x93 | dpl);
1411         }
1412 }
1413
1414 static void enter_pmode(struct kvm_vcpu *vcpu)
1415 {
1416         unsigned long flags;
1417         struct vcpu_vmx *vmx = to_vmx(vcpu);
1418
1419         vmx->emulation_required = 1;
1420         vmx->rmode.vm86_active = 0;
1421
1422         vmcs_writel(GUEST_TR_BASE, vmx->rmode.tr.base);
1423         vmcs_write32(GUEST_TR_LIMIT, vmx->rmode.tr.limit);
1424         vmcs_write32(GUEST_TR_AR_BYTES, vmx->rmode.tr.ar);
1425
1426         flags = vmcs_readl(GUEST_RFLAGS);
1427         flags &= ~(X86_EFLAGS_IOPL | X86_EFLAGS_VM);
1428         flags |= (vmx->rmode.save_iopl << IOPL_SHIFT);
1429         vmcs_writel(GUEST_RFLAGS, flags);
1430
1431         vmcs_writel(GUEST_CR4, (vmcs_readl(GUEST_CR4) & ~X86_CR4_VME) |
1432                         (vmcs_readl(CR4_READ_SHADOW) & X86_CR4_VME));
1433
1434         update_exception_bitmap(vcpu);
1435
1436         if (emulate_invalid_guest_state)
1437                 return;
1438
1439         fix_pmode_dataseg(VCPU_SREG_ES, &vmx->rmode.es);
1440         fix_pmode_dataseg(VCPU_SREG_DS, &vmx->rmode.ds);
1441         fix_pmode_dataseg(VCPU_SREG_GS, &vmx->rmode.gs);
1442         fix_pmode_dataseg(VCPU_SREG_FS, &vmx->rmode.fs);
1443
1444         vmcs_write16(GUEST_SS_SELECTOR, 0);
1445         vmcs_write32(GUEST_SS_AR_BYTES, 0x93);
1446
1447         vmcs_write16(GUEST_CS_SELECTOR,
1448                      vmcs_read16(GUEST_CS_SELECTOR) & ~SELECTOR_RPL_MASK);
1449         vmcs_write32(GUEST_CS_AR_BYTES, 0x9b);
1450 }
1451
1452 static gva_t rmode_tss_base(struct kvm *kvm)
1453 {
1454         if (!kvm->arch.tss_addr) {
1455                 gfn_t base_gfn = kvm->memslots[0].base_gfn +
1456                                  kvm->memslots[0].npages - 3;
1457                 return base_gfn << PAGE_SHIFT;
1458         }
1459         return kvm->arch.tss_addr;
1460 }
1461
1462 static void fix_rmode_seg(int seg, struct kvm_save_segment *save)
1463 {
1464         struct kvm_vmx_segment_field *sf = &kvm_vmx_segment_fields[seg];
1465
1466         save->selector = vmcs_read16(sf->selector);
1467         save->base = vmcs_readl(sf->base);
1468         save->limit = vmcs_read32(sf->limit);
1469         save->ar = vmcs_read32(sf->ar_bytes);
1470         vmcs_write16(sf->selector, save->base >> 4);
1471         vmcs_write32(sf->base, save->base & 0xfffff);
1472         vmcs_write32(sf->limit, 0xffff);
1473         vmcs_write32(sf->ar_bytes, 0xf3);
1474 }
1475
1476 static void enter_rmode(struct kvm_vcpu *vcpu)
1477 {
1478         unsigned long flags;
1479         struct vcpu_vmx *vmx = to_vmx(vcpu);
1480
1481         if (enable_unrestricted_guest)
1482                 return;
1483
1484         vmx->emulation_required = 1;
1485         vmx->rmode.vm86_active = 1;
1486
1487         vmx->rmode.tr.base = vmcs_readl(GUEST_TR_BASE);
1488         vmcs_writel(GUEST_TR_BASE, rmode_tss_base(vcpu->kvm));
1489
1490         vmx->rmode.tr.limit = vmcs_read32(GUEST_TR_LIMIT);
1491         vmcs_write32(GUEST_TR_LIMIT, RMODE_TSS_SIZE - 1);
1492
1493         vmx->rmode.tr.ar = vmcs_read32(GUEST_TR_AR_BYTES);
1494         vmcs_write32(GUEST_TR_AR_BYTES, 0x008b);
1495
1496         flags = vmcs_readl(GUEST_RFLAGS);
1497         vmx->rmode.save_iopl
1498                 = (flags & X86_EFLAGS_IOPL) >> IOPL_SHIFT;
1499
1500         flags |= X86_EFLAGS_IOPL | X86_EFLAGS_VM;
1501
1502         vmcs_writel(GUEST_RFLAGS, flags);
1503         vmcs_writel(GUEST_CR4, vmcs_readl(GUEST_CR4) | X86_CR4_VME);
1504         update_exception_bitmap(vcpu);
1505
1506         if (emulate_invalid_guest_state)
1507                 goto continue_rmode;
1508
1509         vmcs_write16(GUEST_SS_SELECTOR, vmcs_readl(GUEST_SS_BASE) >> 4);
1510         vmcs_write32(GUEST_SS_LIMIT, 0xffff);
1511         vmcs_write32(GUEST_SS_AR_BYTES, 0xf3);
1512
1513         vmcs_write32(GUEST_CS_AR_BYTES, 0xf3);
1514         vmcs_write32(GUEST_CS_LIMIT, 0xffff);
1515         if (vmcs_readl(GUEST_CS_BASE) == 0xffff0000)
1516                 vmcs_writel(GUEST_CS_BASE, 0xf0000);
1517         vmcs_write16(GUEST_CS_SELECTOR, vmcs_readl(GUEST_CS_BASE) >> 4);
1518
1519         fix_rmode_seg(VCPU_SREG_ES, &vmx->rmode.es);
1520         fix_rmode_seg(VCPU_SREG_DS, &vmx->rmode.ds);
1521         fix_rmode_seg(VCPU_SREG_GS, &vmx->rmode.gs);
1522         fix_rmode_seg(VCPU_SREG_FS, &vmx->rmode.fs);
1523
1524 continue_rmode:
1525         kvm_mmu_reset_context(vcpu);
1526         init_rmode(vcpu->kvm);
1527 }
1528
1529 static void vmx_set_efer(struct kvm_vcpu *vcpu, u64 efer)
1530 {
1531         struct vcpu_vmx *vmx = to_vmx(vcpu);
1532         struct kvm_msr_entry *msr = find_msr_entry(vmx, MSR_EFER);
1533
1534         vcpu->arch.shadow_efer = efer;
1535         if (!msr)
1536                 return;
1537         if (efer & EFER_LMA) {
1538                 vmcs_write32(VM_ENTRY_CONTROLS,
1539                              vmcs_read32(VM_ENTRY_CONTROLS) |
1540                              VM_ENTRY_IA32E_MODE);
1541                 msr->data = efer;
1542         } else {
1543                 vmcs_write32(VM_ENTRY_CONTROLS,
1544                              vmcs_read32(VM_ENTRY_CONTROLS) &
1545                              ~VM_ENTRY_IA32E_MODE);
1546
1547                 msr->data = efer & ~EFER_LME;
1548         }
1549         setup_msrs(vmx);
1550 }
1551
1552 #ifdef CONFIG_X86_64
1553
1554 static void enter_lmode(struct kvm_vcpu *vcpu)
1555 {
1556         u32 guest_tr_ar;
1557
1558         guest_tr_ar = vmcs_read32(GUEST_TR_AR_BYTES);
1559         if ((guest_tr_ar & AR_TYPE_MASK) != AR_TYPE_BUSY_64_TSS) {
1560                 printk(KERN_DEBUG "%s: tss fixup for long mode. \n",
1561                        __func__);
1562                 vmcs_write32(GUEST_TR_AR_BYTES,
1563                              (guest_tr_ar & ~AR_TYPE_MASK)
1564                              | AR_TYPE_BUSY_64_TSS);
1565         }
1566         vcpu->arch.shadow_efer |= EFER_LMA;
1567         vmx_set_efer(vcpu, vcpu->arch.shadow_efer);
1568 }
1569
1570 static void exit_lmode(struct kvm_vcpu *vcpu)
1571 {
1572         vcpu->arch.shadow_efer &= ~EFER_LMA;
1573
1574         vmcs_write32(VM_ENTRY_CONTROLS,
1575                      vmcs_read32(VM_ENTRY_CONTROLS)
1576                      & ~VM_ENTRY_IA32E_MODE);
1577 }
1578
1579 #endif
1580
1581 static void vmx_flush_tlb(struct kvm_vcpu *vcpu)
1582 {
1583         vpid_sync_vcpu_all(to_vmx(vcpu));
1584         if (enable_ept)
1585                 ept_sync_context(construct_eptp(vcpu->arch.mmu.root_hpa));
1586 }
1587
1588 static void vmx_decache_cr4_guest_bits(struct kvm_vcpu *vcpu)
1589 {
1590         vcpu->arch.cr4 &= KVM_GUEST_CR4_MASK;
1591         vcpu->arch.cr4 |= vmcs_readl(GUEST_CR4) & ~KVM_GUEST_CR4_MASK;
1592 }
1593
1594 static void ept_load_pdptrs(struct kvm_vcpu *vcpu)
1595 {
1596         if (!test_bit(VCPU_EXREG_PDPTR,
1597                       (unsigned long *)&vcpu->arch.regs_dirty))
1598                 return;
1599
1600         if (is_paging(vcpu) && is_pae(vcpu) && !is_long_mode(vcpu)) {
1601                 vmcs_write64(GUEST_PDPTR0, vcpu->arch.pdptrs[0]);
1602                 vmcs_write64(GUEST_PDPTR1, vcpu->arch.pdptrs[1]);
1603                 vmcs_write64(GUEST_PDPTR2, vcpu->arch.pdptrs[2]);
1604                 vmcs_write64(GUEST_PDPTR3, vcpu->arch.pdptrs[3]);
1605         }
1606 }
1607
1608 static void ept_save_pdptrs(struct kvm_vcpu *vcpu)
1609 {
1610         if (is_paging(vcpu) && is_pae(vcpu) && !is_long_mode(vcpu)) {
1611                 vcpu->arch.pdptrs[0] = vmcs_read64(GUEST_PDPTR0);
1612                 vcpu->arch.pdptrs[1] = vmcs_read64(GUEST_PDPTR1);
1613                 vcpu->arch.pdptrs[2] = vmcs_read64(GUEST_PDPTR2);
1614                 vcpu->arch.pdptrs[3] = vmcs_read64(GUEST_PDPTR3);
1615         }
1616
1617         __set_bit(VCPU_EXREG_PDPTR,
1618                   (unsigned long *)&vcpu->arch.regs_avail);
1619         __set_bit(VCPU_EXREG_PDPTR,
1620                   (unsigned long *)&vcpu->arch.regs_dirty);
1621 }
1622
1623 static void vmx_set_cr4(struct kvm_vcpu *vcpu, unsigned long cr4);
1624
1625 static void ept_update_paging_mode_cr0(unsigned long *hw_cr0,
1626                                         unsigned long cr0,
1627                                         struct kvm_vcpu *vcpu)
1628 {
1629         if (!(cr0 & X86_CR0_PG)) {
1630                 /* From paging/starting to nonpaging */
1631                 vmcs_write32(CPU_BASED_VM_EXEC_CONTROL,
1632                              vmcs_read32(CPU_BASED_VM_EXEC_CONTROL) |
1633                              (CPU_BASED_CR3_LOAD_EXITING |
1634                               CPU_BASED_CR3_STORE_EXITING));
1635                 vcpu->arch.cr0 = cr0;
1636                 vmx_set_cr4(vcpu, vcpu->arch.cr4);
1637                 *hw_cr0 &= ~X86_CR0_WP;
1638         } else if (!is_paging(vcpu)) {
1639                 /* From nonpaging to paging */
1640                 vmcs_write32(CPU_BASED_VM_EXEC_CONTROL,
1641                              vmcs_read32(CPU_BASED_VM_EXEC_CONTROL) &
1642                              ~(CPU_BASED_CR3_LOAD_EXITING |
1643                                CPU_BASED_CR3_STORE_EXITING));
1644                 vcpu->arch.cr0 = cr0;
1645                 vmx_set_cr4(vcpu, vcpu->arch.cr4);
1646                 if (!(vcpu->arch.cr0 & X86_CR0_WP))
1647                         *hw_cr0 &= ~X86_CR0_WP;
1648         }
1649 }
1650
1651 static void ept_update_paging_mode_cr4(unsigned long *hw_cr4,
1652                                         struct kvm_vcpu *vcpu)
1653 {
1654         if (!is_paging(vcpu)) {
1655                 *hw_cr4 &= ~X86_CR4_PAE;
1656                 *hw_cr4 |= X86_CR4_PSE;
1657         } else if (!(vcpu->arch.cr4 & X86_CR4_PAE))
1658                 *hw_cr4 &= ~X86_CR4_PAE;
1659 }
1660
1661 static void vmx_set_cr0(struct kvm_vcpu *vcpu, unsigned long cr0)
1662 {
1663         struct vcpu_vmx *vmx = to_vmx(vcpu);
1664         unsigned long hw_cr0;
1665
1666         if (enable_unrestricted_guest)
1667                 hw_cr0 = (cr0 & ~KVM_GUEST_CR0_MASK_UNRESTRICTED_GUEST)
1668                         | KVM_VM_CR0_ALWAYS_ON_UNRESTRICTED_GUEST;
1669         else
1670                 hw_cr0 = (cr0 & ~KVM_GUEST_CR0_MASK) | KVM_VM_CR0_ALWAYS_ON;
1671
1672         vmx_fpu_deactivate(vcpu);
1673
1674         if (vmx->rmode.vm86_active && (cr0 & X86_CR0_PE))
1675                 enter_pmode(vcpu);
1676
1677         if (!vmx->rmode.vm86_active && !(cr0 & X86_CR0_PE))
1678                 enter_rmode(vcpu);
1679
1680 #ifdef CONFIG_X86_64
1681         if (vcpu->arch.shadow_efer & EFER_LME) {
1682                 if (!is_paging(vcpu) && (cr0 & X86_CR0_PG))
1683                         enter_lmode(vcpu);
1684                 if (is_paging(vcpu) && !(cr0 & X86_CR0_PG))
1685                         exit_lmode(vcpu);
1686         }
1687 #endif
1688
1689         if (enable_ept)
1690                 ept_update_paging_mode_cr0(&hw_cr0, cr0, vcpu);
1691
1692         vmcs_writel(CR0_READ_SHADOW, cr0);
1693         vmcs_writel(GUEST_CR0, hw_cr0);
1694         vcpu->arch.cr0 = cr0;
1695
1696         if (!(cr0 & X86_CR0_TS) || !(cr0 & X86_CR0_PE))
1697                 vmx_fpu_activate(vcpu);
1698 }
1699
1700 static u64 construct_eptp(unsigned long root_hpa)
1701 {
1702         u64 eptp;
1703
1704         /* TODO write the value reading from MSR */
1705         eptp = VMX_EPT_DEFAULT_MT |
1706                 VMX_EPT_DEFAULT_GAW << VMX_EPT_GAW_EPTP_SHIFT;
1707         eptp |= (root_hpa & PAGE_MASK);
1708
1709         return eptp;
1710 }
1711
1712 static void vmx_set_cr3(struct kvm_vcpu *vcpu, unsigned long cr3)
1713 {
1714         unsigned long guest_cr3;
1715         u64 eptp;
1716
1717         guest_cr3 = cr3;
1718         if (enable_ept) {
1719                 eptp = construct_eptp(cr3);
1720                 vmcs_write64(EPT_POINTER, eptp);
1721                 guest_cr3 = is_paging(vcpu) ? vcpu->arch.cr3 :
1722                         VMX_EPT_IDENTITY_PAGETABLE_ADDR;
1723         }
1724
1725         vmx_flush_tlb(vcpu);
1726         vmcs_writel(GUEST_CR3, guest_cr3);
1727         if (vcpu->arch.cr0 & X86_CR0_PE)
1728                 vmx_fpu_deactivate(vcpu);
1729 }
1730
1731 static void vmx_set_cr4(struct kvm_vcpu *vcpu, unsigned long cr4)
1732 {
1733         unsigned long hw_cr4 = cr4 | (to_vmx(vcpu)->rmode.vm86_active ?
1734                     KVM_RMODE_VM_CR4_ALWAYS_ON : KVM_PMODE_VM_CR4_ALWAYS_ON);
1735
1736         vcpu->arch.cr4 = cr4;
1737         if (enable_ept)
1738                 ept_update_paging_mode_cr4(&hw_cr4, vcpu);
1739
1740         vmcs_writel(CR4_READ_SHADOW, cr4);
1741         vmcs_writel(GUEST_CR4, hw_cr4);
1742 }
1743
1744 static u64 vmx_get_segment_base(struct kvm_vcpu *vcpu, int seg)
1745 {
1746         struct kvm_vmx_segment_field *sf = &kvm_vmx_segment_fields[seg];
1747
1748         return vmcs_readl(sf->base);
1749 }
1750
1751 static void vmx_get_segment(struct kvm_vcpu *vcpu,
1752                             struct kvm_segment *var, int seg)
1753 {
1754         struct kvm_vmx_segment_field *sf = &kvm_vmx_segment_fields[seg];
1755         u32 ar;
1756
1757         var->base = vmcs_readl(sf->base);
1758         var->limit = vmcs_read32(sf->limit);
1759         var->selector = vmcs_read16(sf->selector);
1760         ar = vmcs_read32(sf->ar_bytes);
1761         if ((ar & AR_UNUSABLE_MASK) && !emulate_invalid_guest_state)
1762                 ar = 0;
1763         var->type = ar & 15;
1764         var->s = (ar >> 4) & 1;
1765         var->dpl = (ar >> 5) & 3;
1766         var->present = (ar >> 7) & 1;
1767         var->avl = (ar >> 12) & 1;
1768         var->l = (ar >> 13) & 1;
1769         var->db = (ar >> 14) & 1;
1770         var->g = (ar >> 15) & 1;
1771         var->unusable = (ar >> 16) & 1;
1772 }
1773
1774 static int vmx_get_cpl(struct kvm_vcpu *vcpu)
1775 {
1776         struct kvm_segment kvm_seg;
1777
1778         if (!(vcpu->arch.cr0 & X86_CR0_PE)) /* if real mode */
1779                 return 0;
1780
1781         if (vmx_get_rflags(vcpu) & X86_EFLAGS_VM) /* if virtual 8086 */
1782                 return 3;
1783
1784         vmx_get_segment(vcpu, &kvm_seg, VCPU_SREG_CS);
1785         return kvm_seg.selector & 3;
1786 }
1787
1788 static u32 vmx_segment_access_rights(struct kvm_segment *var)
1789 {
1790         u32 ar;
1791
1792         if (var->unusable)
1793                 ar = 1 << 16;
1794         else {
1795                 ar = var->type & 15;
1796                 ar |= (var->s & 1) << 4;
1797                 ar |= (var->dpl & 3) << 5;
1798                 ar |= (var->present & 1) << 7;
1799                 ar |= (var->avl & 1) << 12;
1800                 ar |= (var->l & 1) << 13;
1801                 ar |= (var->db & 1) << 14;
1802                 ar |= (var->g & 1) << 15;
1803         }
1804         if (ar == 0) /* a 0 value means unusable */
1805                 ar = AR_UNUSABLE_MASK;
1806
1807         return ar;
1808 }
1809
1810 static void vmx_set_segment(struct kvm_vcpu *vcpu,
1811                             struct kvm_segment *var, int seg)
1812 {
1813         struct vcpu_vmx *vmx = to_vmx(vcpu);
1814         struct kvm_vmx_segment_field *sf = &kvm_vmx_segment_fields[seg];
1815         u32 ar;
1816
1817         if (vmx->rmode.vm86_active && seg == VCPU_SREG_TR) {
1818                 vmx->rmode.tr.selector = var->selector;
1819                 vmx->rmode.tr.base = var->base;
1820                 vmx->rmode.tr.limit = var->limit;
1821                 vmx->rmode.tr.ar = vmx_segment_access_rights(var);
1822                 return;
1823         }
1824         vmcs_writel(sf->base, var->base);
1825         vmcs_write32(sf->limit, var->limit);
1826         vmcs_write16(sf->selector, var->selector);
1827         if (vmx->rmode.vm86_active && var->s) {
1828                 /*
1829                  * Hack real-mode segments into vm86 compatibility.
1830                  */
1831                 if (var->base == 0xffff0000 && var->selector == 0xf000)
1832                         vmcs_writel(sf->base, 0xf0000);
1833                 ar = 0xf3;
1834         } else
1835                 ar = vmx_segment_access_rights(var);
1836
1837         /*
1838          *   Fix the "Accessed" bit in AR field of segment registers for older
1839          * qemu binaries.
1840          *   IA32 arch specifies that at the time of processor reset the
1841          * "Accessed" bit in the AR field of segment registers is 1. And qemu
1842          * is setting it to 0 in the usedland code. This causes invalid guest
1843          * state vmexit when "unrestricted guest" mode is turned on.
1844          *    Fix for this setup issue in cpu_reset is being pushed in the qemu
1845          * tree. Newer qemu binaries with that qemu fix would not need this
1846          * kvm hack.
1847          */
1848         if (enable_unrestricted_guest && (seg != VCPU_SREG_LDTR))
1849                 ar |= 0x1; /* Accessed */
1850
1851         vmcs_write32(sf->ar_bytes, ar);
1852 }
1853
1854 static void vmx_get_cs_db_l_bits(struct kvm_vcpu *vcpu, int *db, int *l)
1855 {
1856         u32 ar = vmcs_read32(GUEST_CS_AR_BYTES);
1857
1858         *db = (ar >> 14) & 1;
1859         *l = (ar >> 13) & 1;
1860 }
1861
1862 static void vmx_get_idt(struct kvm_vcpu *vcpu, struct descriptor_table *dt)
1863 {
1864         dt->limit = vmcs_read32(GUEST_IDTR_LIMIT);
1865         dt->base = vmcs_readl(GUEST_IDTR_BASE);
1866 }
1867
1868 static void vmx_set_idt(struct kvm_vcpu *vcpu, struct descriptor_table *dt)
1869 {
1870         vmcs_write32(GUEST_IDTR_LIMIT, dt->limit);
1871         vmcs_writel(GUEST_IDTR_BASE, dt->base);
1872 }
1873
1874 static void vmx_get_gdt(struct kvm_vcpu *vcpu, struct descriptor_table *dt)
1875 {
1876         dt->limit = vmcs_read32(GUEST_GDTR_LIMIT);
1877         dt->base = vmcs_readl(GUEST_GDTR_BASE);
1878 }
1879
1880 static void vmx_set_gdt(struct kvm_vcpu *vcpu, struct descriptor_table *dt)
1881 {
1882         vmcs_write32(GUEST_GDTR_LIMIT, dt->limit);
1883         vmcs_writel(GUEST_GDTR_BASE, dt->base);
1884 }
1885
1886 static bool rmode_segment_valid(struct kvm_vcpu *vcpu, int seg)
1887 {
1888         struct kvm_segment var;
1889         u32 ar;
1890
1891         vmx_get_segment(vcpu, &var, seg);
1892         ar = vmx_segment_access_rights(&var);
1893
1894         if (var.base != (var.selector << 4))
1895                 return false;
1896         if (var.limit != 0xffff)
1897                 return false;
1898         if (ar != 0xf3)
1899                 return false;
1900
1901         return true;
1902 }
1903
1904 static bool code_segment_valid(struct kvm_vcpu *vcpu)
1905 {
1906         struct kvm_segment cs;
1907         unsigned int cs_rpl;
1908
1909         vmx_get_segment(vcpu, &cs, VCPU_SREG_CS);
1910         cs_rpl = cs.selector & SELECTOR_RPL_MASK;
1911
1912         if (cs.unusable)
1913                 return false;
1914         if (~cs.type & (AR_TYPE_CODE_MASK|AR_TYPE_ACCESSES_MASK))
1915                 return false;
1916         if (!cs.s)
1917                 return false;
1918         if (cs.type & AR_TYPE_WRITEABLE_MASK) {
1919                 if (cs.dpl > cs_rpl)
1920                         return false;
1921         } else {
1922                 if (cs.dpl != cs_rpl)
1923                         return false;
1924         }
1925         if (!cs.present)
1926                 return false;
1927
1928         /* TODO: Add Reserved field check, this'll require a new member in the kvm_segment_field structure */
1929         return true;
1930 }
1931
1932 static bool stack_segment_valid(struct kvm_vcpu *vcpu)
1933 {
1934         struct kvm_segment ss;
1935         unsigned int ss_rpl;
1936
1937         vmx_get_segment(vcpu, &ss, VCPU_SREG_SS);
1938         ss_rpl = ss.selector & SELECTOR_RPL_MASK;
1939
1940         if (ss.unusable)
1941                 return true;
1942         if (ss.type != 3 && ss.type != 7)
1943                 return false;
1944         if (!ss.s)
1945                 return false;
1946         if (ss.dpl != ss_rpl) /* DPL != RPL */
1947                 return false;
1948         if (!ss.present)
1949                 return false;
1950
1951         return true;
1952 }
1953
1954 static bool data_segment_valid(struct kvm_vcpu *vcpu, int seg)
1955 {
1956         struct kvm_segment var;
1957         unsigned int rpl;
1958
1959         vmx_get_segment(vcpu, &var, seg);
1960         rpl = var.selector & SELECTOR_RPL_MASK;
1961
1962         if (var.unusable)
1963                 return true;
1964         if (!var.s)
1965                 return false;
1966         if (!var.present)
1967                 return false;
1968         if (~var.type & (AR_TYPE_CODE_MASK|AR_TYPE_WRITEABLE_MASK)) {
1969                 if (var.dpl < rpl) /* DPL < RPL */
1970                         return false;
1971         }
1972
1973         /* TODO: Add other members to kvm_segment_field to allow checking for other access
1974          * rights flags
1975          */
1976         return true;
1977 }
1978
1979 static bool tr_valid(struct kvm_vcpu *vcpu)
1980 {
1981         struct kvm_segment tr;
1982
1983         vmx_get_segment(vcpu, &tr, VCPU_SREG_TR);
1984
1985         if (tr.unusable)
1986                 return false;
1987         if (tr.selector & SELECTOR_TI_MASK)     /* TI = 1 */
1988                 return false;
1989         if (tr.type != 3 && tr.type != 11) /* TODO: Check if guest is in IA32e mode */
1990                 return false;
1991         if (!tr.present)
1992                 return false;
1993
1994         return true;
1995 }
1996
1997 static bool ldtr_valid(struct kvm_vcpu *vcpu)
1998 {
1999         struct kvm_segment ldtr;
2000
2001         vmx_get_segment(vcpu, &ldtr, VCPU_SREG_LDTR);
2002
2003         if (ldtr.unusable)
2004                 return true;
2005         if (ldtr.selector & SELECTOR_TI_MASK)   /* TI = 1 */
2006                 return false;
2007         if (ldtr.type != 2)
2008                 return false;
2009         if (!ldtr.present)
2010                 return false;
2011
2012         return true;
2013 }
2014
2015 static bool cs_ss_rpl_check(struct kvm_vcpu *vcpu)
2016 {
2017         struct kvm_segment cs, ss;
2018
2019         vmx_get_segment(vcpu, &cs, VCPU_SREG_CS);
2020         vmx_get_segment(vcpu, &ss, VCPU_SREG_SS);
2021
2022         return ((cs.selector & SELECTOR_RPL_MASK) ==
2023                  (ss.selector & SELECTOR_RPL_MASK));
2024 }
2025
2026 /*
2027  * Check if guest state is valid. Returns true if valid, false if
2028  * not.
2029  * We assume that registers are always usable
2030  */
2031 static bool guest_state_valid(struct kvm_vcpu *vcpu)
2032 {
2033         /* real mode guest state checks */
2034         if (!(vcpu->arch.cr0 & X86_CR0_PE)) {
2035                 if (!rmode_segment_valid(vcpu, VCPU_SREG_CS))
2036                         return false;
2037                 if (!rmode_segment_valid(vcpu, VCPU_SREG_SS))
2038                         return false;
2039                 if (!rmode_segment_valid(vcpu, VCPU_SREG_DS))
2040                         return false;
2041                 if (!rmode_segment_valid(vcpu, VCPU_SREG_ES))
2042                         return false;
2043                 if (!rmode_segment_valid(vcpu, VCPU_SREG_FS))
2044                         return false;
2045                 if (!rmode_segment_valid(vcpu, VCPU_SREG_GS))
2046                         return false;
2047         } else {
2048         /* protected mode guest state checks */
2049                 if (!cs_ss_rpl_check(vcpu))
2050                         return false;
2051                 if (!code_segment_valid(vcpu))
2052                         return false;
2053                 if (!stack_segment_valid(vcpu))
2054                         return false;
2055                 if (!data_segment_valid(vcpu, VCPU_SREG_DS))
2056                         return false;
2057                 if (!data_segment_valid(vcpu, VCPU_SREG_ES))
2058                         return false;
2059                 if (!data_segment_valid(vcpu, VCPU_SREG_FS))
2060                         return false;
2061                 if (!data_segment_valid(vcpu, VCPU_SREG_GS))
2062                         return false;
2063                 if (!tr_valid(vcpu))
2064                         return false;
2065                 if (!ldtr_valid(vcpu))
2066                         return false;
2067         }
2068         /* TODO:
2069          * - Add checks on RIP
2070          * - Add checks on RFLAGS
2071          */
2072
2073         return true;
2074 }
2075
2076 static int init_rmode_tss(struct kvm *kvm)
2077 {
2078         gfn_t fn = rmode_tss_base(kvm) >> PAGE_SHIFT;
2079         u16 data = 0;
2080         int ret = 0;
2081         int r;
2082
2083         r = kvm_clear_guest_page(kvm, fn, 0, PAGE_SIZE);
2084         if (r < 0)
2085                 goto out;
2086         data = TSS_BASE_SIZE + TSS_REDIRECTION_SIZE;
2087         r = kvm_write_guest_page(kvm, fn++, &data,
2088                         TSS_IOPB_BASE_OFFSET, sizeof(u16));
2089         if (r < 0)
2090                 goto out;
2091         r = kvm_clear_guest_page(kvm, fn++, 0, PAGE_SIZE);
2092         if (r < 0)
2093                 goto out;
2094         r = kvm_clear_guest_page(kvm, fn, 0, PAGE_SIZE);
2095         if (r < 0)
2096                 goto out;
2097         data = ~0;
2098         r = kvm_write_guest_page(kvm, fn, &data,
2099                                  RMODE_TSS_SIZE - 2 * PAGE_SIZE - 1,
2100                                  sizeof(u8));
2101         if (r < 0)
2102                 goto out;
2103
2104         ret = 1;
2105 out:
2106         return ret;
2107 }
2108
2109 static int init_rmode_identity_map(struct kvm *kvm)
2110 {
2111         int i, r, ret;
2112         pfn_t identity_map_pfn;
2113         u32 tmp;
2114
2115         if (!enable_ept)
2116                 return 1;
2117         if (unlikely(!kvm->arch.ept_identity_pagetable)) {
2118                 printk(KERN_ERR "EPT: identity-mapping pagetable "
2119                         "haven't been allocated!\n");
2120                 return 0;
2121         }
2122         if (likely(kvm->arch.ept_identity_pagetable_done))
2123                 return 1;
2124         ret = 0;
2125         identity_map_pfn = VMX_EPT_IDENTITY_PAGETABLE_ADDR >> PAGE_SHIFT;
2126         r = kvm_clear_guest_page(kvm, identity_map_pfn, 0, PAGE_SIZE);
2127         if (r < 0)
2128                 goto out;
2129         /* Set up identity-mapping pagetable for EPT in real mode */
2130         for (i = 0; i < PT32_ENT_PER_PAGE; i++) {
2131                 tmp = (i << 22) + (_PAGE_PRESENT | _PAGE_RW | _PAGE_USER |
2132                         _PAGE_ACCESSED | _PAGE_DIRTY | _PAGE_PSE);
2133                 r = kvm_write_guest_page(kvm, identity_map_pfn,
2134                                 &tmp, i * sizeof(tmp), sizeof(tmp));
2135                 if (r < 0)
2136                         goto out;
2137         }
2138         kvm->arch.ept_identity_pagetable_done = true;
2139         ret = 1;
2140 out:
2141         return ret;
2142 }
2143
2144 static void seg_setup(int seg)
2145 {
2146         struct kvm_vmx_segment_field *sf = &kvm_vmx_segment_fields[seg];
2147         unsigned int ar;
2148
2149         vmcs_write16(sf->selector, 0);
2150         vmcs_writel(sf->base, 0);
2151         vmcs_write32(sf->limit, 0xffff);
2152         if (enable_unrestricted_guest) {
2153                 ar = 0x93;
2154                 if (seg == VCPU_SREG_CS)
2155                         ar |= 0x08; /* code segment */
2156         } else
2157                 ar = 0xf3;
2158
2159         vmcs_write32(sf->ar_bytes, ar);
2160 }
2161
2162 static int alloc_apic_access_page(struct kvm *kvm)
2163 {
2164         struct kvm_userspace_memory_region kvm_userspace_mem;
2165         int r = 0;
2166
2167         down_write(&kvm->slots_lock);
2168         if (kvm->arch.apic_access_page)
2169                 goto out;
2170         kvm_userspace_mem.slot = APIC_ACCESS_PAGE_PRIVATE_MEMSLOT;
2171         kvm_userspace_mem.flags = 0;
2172         kvm_userspace_mem.guest_phys_addr = 0xfee00000ULL;
2173         kvm_userspace_mem.memory_size = PAGE_SIZE;
2174         r = __kvm_set_memory_region(kvm, &kvm_userspace_mem, 0);
2175         if (r)
2176                 goto out;
2177
2178         kvm->arch.apic_access_page = gfn_to_page(kvm, 0xfee00);
2179 out:
2180         up_write(&kvm->slots_lock);
2181         return r;
2182 }
2183
2184 static int alloc_identity_pagetable(struct kvm *kvm)
2185 {
2186         struct kvm_userspace_memory_region kvm_userspace_mem;
2187         int r = 0;
2188
2189         down_write(&kvm->slots_lock);
2190         if (kvm->arch.ept_identity_pagetable)
2191                 goto out;
2192         kvm_userspace_mem.slot = IDENTITY_PAGETABLE_PRIVATE_MEMSLOT;
2193         kvm_userspace_mem.flags = 0;
2194         kvm_userspace_mem.guest_phys_addr = VMX_EPT_IDENTITY_PAGETABLE_ADDR;
2195         kvm_userspace_mem.memory_size = PAGE_SIZE;
2196         r = __kvm_set_memory_region(kvm, &kvm_userspace_mem, 0);
2197         if (r)
2198                 goto out;
2199
2200         kvm->arch.ept_identity_pagetable = gfn_to_page(kvm,
2201                         VMX_EPT_IDENTITY_PAGETABLE_ADDR >> PAGE_SHIFT);
2202 out:
2203         up_write(&kvm->slots_lock);
2204         return r;
2205 }
2206
2207 static void allocate_vpid(struct vcpu_vmx *vmx)
2208 {
2209         int vpid;
2210
2211         vmx->vpid = 0;
2212         if (!enable_vpid)
2213                 return;
2214         spin_lock(&vmx_vpid_lock);
2215         vpid = find_first_zero_bit(vmx_vpid_bitmap, VMX_NR_VPIDS);
2216         if (vpid < VMX_NR_VPIDS) {
2217                 vmx->vpid = vpid;
2218                 __set_bit(vpid, vmx_vpid_bitmap);
2219         }
2220         spin_unlock(&vmx_vpid_lock);
2221 }
2222
2223 static void __vmx_disable_intercept_for_msr(unsigned long *msr_bitmap, u32 msr)
2224 {
2225         int f = sizeof(unsigned long);
2226
2227         if (!cpu_has_vmx_msr_bitmap())
2228                 return;
2229
2230         /*
2231          * See Intel PRM Vol. 3, 20.6.9 (MSR-Bitmap Address). Early manuals
2232          * have the write-low and read-high bitmap offsets the wrong way round.
2233          * We can control MSRs 0x00000000-0x00001fff and 0xc0000000-0xc0001fff.
2234          */
2235         if (msr <= 0x1fff) {
2236                 __clear_bit(msr, msr_bitmap + 0x000 / f); /* read-low */
2237                 __clear_bit(msr, msr_bitmap + 0x800 / f); /* write-low */
2238         } else if ((msr >= 0xc0000000) && (msr <= 0xc0001fff)) {
2239                 msr &= 0x1fff;
2240                 __clear_bit(msr, msr_bitmap + 0x400 / f); /* read-high */
2241                 __clear_bit(msr, msr_bitmap + 0xc00 / f); /* write-high */
2242         }
2243 }
2244
2245 static void vmx_disable_intercept_for_msr(u32 msr, bool longmode_only)
2246 {
2247         if (!longmode_only)
2248                 __vmx_disable_intercept_for_msr(vmx_msr_bitmap_legacy, msr);
2249         __vmx_disable_intercept_for_msr(vmx_msr_bitmap_longmode, msr);
2250 }
2251
2252 /*
2253  * Sets up the vmcs for emulated real mode.
2254  */
2255 static int vmx_vcpu_setup(struct vcpu_vmx *vmx)
2256 {
2257         u32 host_sysenter_cs, msr_low, msr_high;
2258         u32 junk;
2259         u64 host_pat, tsc_this, tsc_base;
2260         unsigned long a;
2261         struct descriptor_table dt;
2262         int i;
2263         unsigned long kvm_vmx_return;
2264         u32 exec_control;
2265
2266         /* I/O */
2267         vmcs_write64(IO_BITMAP_A, __pa(vmx_io_bitmap_a));
2268         vmcs_write64(IO_BITMAP_B, __pa(vmx_io_bitmap_b));
2269
2270         if (cpu_has_vmx_msr_bitmap())
2271                 vmcs_write64(MSR_BITMAP, __pa(vmx_msr_bitmap_legacy));
2272
2273         vmcs_write64(VMCS_LINK_POINTER, -1ull); /* 22.3.1.5 */
2274
2275         /* Control */
2276         vmcs_write32(PIN_BASED_VM_EXEC_CONTROL,
2277                 vmcs_config.pin_based_exec_ctrl);
2278
2279         exec_control = vmcs_config.cpu_based_exec_ctrl;
2280         if (!vm_need_tpr_shadow(vmx->vcpu.kvm)) {
2281                 exec_control &= ~CPU_BASED_TPR_SHADOW;
2282 #ifdef CONFIG_X86_64
2283                 exec_control |= CPU_BASED_CR8_STORE_EXITING |
2284                                 CPU_BASED_CR8_LOAD_EXITING;
2285 #endif
2286         }
2287         if (!enable_ept)
2288                 exec_control |= CPU_BASED_CR3_STORE_EXITING |
2289                                 CPU_BASED_CR3_LOAD_EXITING  |
2290                                 CPU_BASED_INVLPG_EXITING;
2291         vmcs_write32(CPU_BASED_VM_EXEC_CONTROL, exec_control);
2292
2293         if (cpu_has_secondary_exec_ctrls()) {
2294                 exec_control = vmcs_config.cpu_based_2nd_exec_ctrl;
2295                 if (!vm_need_virtualize_apic_accesses(vmx->vcpu.kvm))
2296                         exec_control &=
2297                                 ~SECONDARY_EXEC_VIRTUALIZE_APIC_ACCESSES;
2298                 if (vmx->vpid == 0)
2299                         exec_control &= ~SECONDARY_EXEC_ENABLE_VPID;
2300                 if (!enable_ept)
2301                         exec_control &= ~SECONDARY_EXEC_ENABLE_EPT;
2302                 if (!enable_unrestricted_guest)
2303                         exec_control &= ~SECONDARY_EXEC_UNRESTRICTED_GUEST;
2304                 vmcs_write32(SECONDARY_VM_EXEC_CONTROL, exec_control);
2305         }
2306
2307         vmcs_write32(PAGE_FAULT_ERROR_CODE_MASK, !!bypass_guest_pf);
2308         vmcs_write32(PAGE_FAULT_ERROR_CODE_MATCH, !!bypass_guest_pf);
2309         vmcs_write32(CR3_TARGET_COUNT, 0);           /* 22.2.1 */
2310
2311         vmcs_writel(HOST_CR0, read_cr0());  /* 22.2.3 */
2312         vmcs_writel(HOST_CR4, read_cr4());  /* 22.2.3, 22.2.5 */
2313         vmcs_writel(HOST_CR3, read_cr3());  /* 22.2.3  FIXME: shadow tables */
2314
2315         vmcs_write16(HOST_CS_SELECTOR, __KERNEL_CS);  /* 22.2.4 */
2316         vmcs_write16(HOST_DS_SELECTOR, __KERNEL_DS);  /* 22.2.4 */
2317         vmcs_write16(HOST_ES_SELECTOR, __KERNEL_DS);  /* 22.2.4 */
2318         vmcs_write16(HOST_FS_SELECTOR, kvm_read_fs());    /* 22.2.4 */
2319         vmcs_write16(HOST_GS_SELECTOR, kvm_read_gs());    /* 22.2.4 */
2320         vmcs_write16(HOST_SS_SELECTOR, __KERNEL_DS);  /* 22.2.4 */
2321 #ifdef CONFIG_X86_64
2322         rdmsrl(MSR_FS_BASE, a);
2323         vmcs_writel(HOST_FS_BASE, a); /* 22.2.4 */
2324         rdmsrl(MSR_GS_BASE, a);
2325         vmcs_writel(HOST_GS_BASE, a); /* 22.2.4 */
2326 #else
2327         vmcs_writel(HOST_FS_BASE, 0); /* 22.2.4 */
2328         vmcs_writel(HOST_GS_BASE, 0); /* 22.2.4 */
2329 #endif
2330
2331         vmcs_write16(HOST_TR_SELECTOR, GDT_ENTRY_TSS*8);  /* 22.2.4 */
2332
2333         kvm_get_idt(&dt);
2334         vmcs_writel(HOST_IDTR_BASE, dt.base);   /* 22.2.4 */
2335
2336         asm("mov $.Lkvm_vmx_return, %0" : "=r"(kvm_vmx_return));
2337         vmcs_writel(HOST_RIP, kvm_vmx_return); /* 22.2.5 */
2338         vmcs_write32(VM_EXIT_MSR_STORE_COUNT, 0);
2339         vmcs_write32(VM_EXIT_MSR_LOAD_COUNT, 0);
2340         vmcs_write32(VM_ENTRY_MSR_LOAD_COUNT, 0);
2341
2342         rdmsr(MSR_IA32_SYSENTER_CS, host_sysenter_cs, junk);
2343         vmcs_write32(HOST_IA32_SYSENTER_CS, host_sysenter_cs);
2344         rdmsrl(MSR_IA32_SYSENTER_ESP, a);
2345         vmcs_writel(HOST_IA32_SYSENTER_ESP, a);   /* 22.2.3 */
2346         rdmsrl(MSR_IA32_SYSENTER_EIP, a);
2347         vmcs_writel(HOST_IA32_SYSENTER_EIP, a);   /* 22.2.3 */
2348
2349         if (vmcs_config.vmexit_ctrl & VM_EXIT_LOAD_IA32_PAT) {
2350                 rdmsr(MSR_IA32_CR_PAT, msr_low, msr_high);
2351                 host_pat = msr_low | ((u64) msr_high << 32);
2352                 vmcs_write64(HOST_IA32_PAT, host_pat);
2353         }
2354         if (vmcs_config.vmentry_ctrl & VM_ENTRY_LOAD_IA32_PAT) {
2355                 rdmsr(MSR_IA32_CR_PAT, msr_low, msr_high);
2356                 host_pat = msr_low | ((u64) msr_high << 32);
2357                 /* Write the default value follow host pat */
2358                 vmcs_write64(GUEST_IA32_PAT, host_pat);
2359                 /* Keep arch.pat sync with GUEST_IA32_PAT */
2360                 vmx->vcpu.arch.pat = host_pat;
2361         }
2362
2363         for (i = 0; i < NR_VMX_MSR; ++i) {
2364                 u32 index = vmx_msr_index[i];
2365                 u32 data_low, data_high;
2366                 u64 data;
2367                 int j = vmx->nmsrs;
2368
2369                 if (rdmsr_safe(index, &data_low, &data_high) < 0)
2370                         continue;
2371                 if (wrmsr_safe(index, data_low, data_high) < 0)
2372                         continue;
2373                 data = data_low | ((u64)data_high << 32);
2374                 vmx->host_msrs[j].index = index;
2375                 vmx->host_msrs[j].reserved = 0;
2376                 vmx->host_msrs[j].data = data;
2377                 vmx->guest_msrs[j] = vmx->host_msrs[j];
2378                 ++vmx->nmsrs;
2379         }
2380
2381         vmcs_write32(VM_EXIT_CONTROLS, vmcs_config.vmexit_ctrl);
2382
2383         /* 22.2.1, 20.8.1 */
2384         vmcs_write32(VM_ENTRY_CONTROLS, vmcs_config.vmentry_ctrl);
2385
2386         vmcs_writel(CR0_GUEST_HOST_MASK, ~0UL);
2387         vmcs_writel(CR4_GUEST_HOST_MASK, KVM_GUEST_CR4_MASK);
2388
2389         tsc_base = vmx->vcpu.kvm->arch.vm_init_tsc;
2390         rdtscll(tsc_this);
2391         if (tsc_this < vmx->vcpu.kvm->arch.vm_init_tsc)
2392                 tsc_base = tsc_this;
2393
2394         guest_write_tsc(0, tsc_base);
2395
2396         return 0;
2397 }
2398
2399 static int init_rmode(struct kvm *kvm)
2400 {
2401         if (!init_rmode_tss(kvm))
2402                 return 0;
2403         if (!init_rmode_identity_map(kvm))
2404                 return 0;
2405         return 1;
2406 }
2407
2408 static int vmx_vcpu_reset(struct kvm_vcpu *vcpu)
2409 {
2410         struct vcpu_vmx *vmx = to_vmx(vcpu);
2411         u64 msr;
2412         int ret;
2413
2414         vcpu->arch.regs_avail = ~((1 << VCPU_REGS_RIP) | (1 << VCPU_REGS_RSP));
2415         down_read(&vcpu->kvm->slots_lock);
2416         if (!init_rmode(vmx->vcpu.kvm)) {
2417                 ret = -ENOMEM;
2418                 goto out;
2419         }
2420
2421         vmx->rmode.vm86_active = 0;
2422
2423         vmx->soft_vnmi_blocked = 0;
2424
2425         vmx->vcpu.arch.regs[VCPU_REGS_RDX] = get_rdx_init_val();
2426         kvm_set_cr8(&vmx->vcpu, 0);
2427         msr = 0xfee00000 | MSR_IA32_APICBASE_ENABLE;
2428         if (kvm_vcpu_is_bsp(&vmx->vcpu))
2429                 msr |= MSR_IA32_APICBASE_BSP;
2430         kvm_set_apic_base(&vmx->vcpu, msr);
2431
2432         fx_init(&vmx->vcpu);
2433
2434         seg_setup(VCPU_SREG_CS);
2435         /*
2436          * GUEST_CS_BASE should really be 0xffff0000, but VT vm86 mode
2437          * insists on having GUEST_CS_BASE == GUEST_CS_SELECTOR << 4.  Sigh.
2438          */
2439         if (kvm_vcpu_is_bsp(&vmx->vcpu)) {
2440                 vmcs_write16(GUEST_CS_SELECTOR, 0xf000);
2441                 vmcs_writel(GUEST_CS_BASE, 0x000f0000);
2442         } else {
2443                 vmcs_write16(GUEST_CS_SELECTOR, vmx->vcpu.arch.sipi_vector << 8);
2444                 vmcs_writel(GUEST_CS_BASE, vmx->vcpu.arch.sipi_vector << 12);
2445         }
2446
2447         seg_setup(VCPU_SREG_DS);
2448         seg_setup(VCPU_SREG_ES);
2449         seg_setup(VCPU_SREG_FS);
2450         seg_setup(VCPU_SREG_GS);
2451         seg_setup(VCPU_SREG_SS);
2452
2453         vmcs_write16(GUEST_TR_SELECTOR, 0);
2454         vmcs_writel(GUEST_TR_BASE, 0);
2455         vmcs_write32(GUEST_TR_LIMIT, 0xffff);
2456         vmcs_write32(GUEST_TR_AR_BYTES, 0x008b);
2457
2458         vmcs_write16(GUEST_LDTR_SELECTOR, 0);
2459         vmcs_writel(GUEST_LDTR_BASE, 0);
2460         vmcs_write32(GUEST_LDTR_LIMIT, 0xffff);
2461         vmcs_write32(GUEST_LDTR_AR_BYTES, 0x00082);
2462
2463         vmcs_write32(GUEST_SYSENTER_CS, 0);
2464         vmcs_writel(GUEST_SYSENTER_ESP, 0);
2465         vmcs_writel(GUEST_SYSENTER_EIP, 0);
2466
2467         vmcs_writel(GUEST_RFLAGS, 0x02);
2468         if (kvm_vcpu_is_bsp(&vmx->vcpu))
2469                 kvm_rip_write(vcpu, 0xfff0);
2470         else
2471                 kvm_rip_write(vcpu, 0);
2472         kvm_register_write(vcpu, VCPU_REGS_RSP, 0);
2473
2474         vmcs_writel(GUEST_DR7, 0x400);
2475
2476         vmcs_writel(GUEST_GDTR_BASE, 0);
2477         vmcs_write32(GUEST_GDTR_LIMIT, 0xffff);
2478
2479         vmcs_writel(GUEST_IDTR_BASE, 0);
2480         vmcs_write32(GUEST_IDTR_LIMIT, 0xffff);
2481
2482         vmcs_write32(GUEST_ACTIVITY_STATE, 0);
2483         vmcs_write32(GUEST_INTERRUPTIBILITY_INFO, 0);
2484         vmcs_write32(GUEST_PENDING_DBG_EXCEPTIONS, 0);
2485
2486         /* Special registers */
2487         vmcs_write64(GUEST_IA32_DEBUGCTL, 0);
2488
2489         setup_msrs(vmx);
2490
2491         vmcs_write32(VM_ENTRY_INTR_INFO_FIELD, 0);  /* 22.2.1 */
2492
2493         if (cpu_has_vmx_tpr_shadow()) {
2494                 vmcs_write64(VIRTUAL_APIC_PAGE_ADDR, 0);
2495                 if (vm_need_tpr_shadow(vmx->vcpu.kvm))
2496                         vmcs_write64(VIRTUAL_APIC_PAGE_ADDR,
2497                                 page_to_phys(vmx->vcpu.arch.apic->regs_page));
2498                 vmcs_write32(TPR_THRESHOLD, 0);
2499         }
2500
2501         if (vm_need_virtualize_apic_accesses(vmx->vcpu.kvm))
2502                 vmcs_write64(APIC_ACCESS_ADDR,
2503                              page_to_phys(vmx->vcpu.kvm->arch.apic_access_page));
2504
2505         if (vmx->vpid != 0)
2506                 vmcs_write16(VIRTUAL_PROCESSOR_ID, vmx->vpid);
2507
2508         vmx->vcpu.arch.cr0 = 0x60000010;
2509         vmx_set_cr0(&vmx->vcpu, vmx->vcpu.arch.cr0); /* enter rmode */
2510         vmx_set_cr4(&vmx->vcpu, 0);
2511         vmx_set_efer(&vmx->vcpu, 0);
2512         vmx_fpu_activate(&vmx->vcpu);
2513         update_exception_bitmap(&vmx->vcpu);
2514
2515         vpid_sync_vcpu_all(vmx);
2516
2517         ret = 0;
2518
2519         /* HACK: Don't enable emulation on guest boot/reset */
2520         vmx->emulation_required = 0;
2521
2522 out:
2523         up_read(&vcpu->kvm->slots_lock);
2524         return ret;
2525 }
2526
2527 static void enable_irq_window(struct kvm_vcpu *vcpu)
2528 {
2529         u32 cpu_based_vm_exec_control;
2530
2531         cpu_based_vm_exec_control = vmcs_read32(CPU_BASED_VM_EXEC_CONTROL);
2532         cpu_based_vm_exec_control |= CPU_BASED_VIRTUAL_INTR_PENDING;
2533         vmcs_write32(CPU_BASED_VM_EXEC_CONTROL, cpu_based_vm_exec_control);
2534 }
2535
2536 static void enable_nmi_window(struct kvm_vcpu *vcpu)
2537 {
2538         u32 cpu_based_vm_exec_control;
2539
2540         if (!cpu_has_virtual_nmis()) {
2541                 enable_irq_window(vcpu);
2542                 return;
2543         }
2544
2545         cpu_based_vm_exec_control = vmcs_read32(CPU_BASED_VM_EXEC_CONTROL);
2546         cpu_based_vm_exec_control |= CPU_BASED_VIRTUAL_NMI_PENDING;
2547         vmcs_write32(CPU_BASED_VM_EXEC_CONTROL, cpu_based_vm_exec_control);
2548 }
2549
2550 static void vmx_inject_irq(struct kvm_vcpu *vcpu)
2551 {
2552         struct vcpu_vmx *vmx = to_vmx(vcpu);
2553         uint32_t intr;
2554         int irq = vcpu->arch.interrupt.nr;
2555
2556         trace_kvm_inj_virq(irq);
2557
2558         ++vcpu->stat.irq_injections;
2559         if (vmx->rmode.vm86_active) {
2560                 vmx->rmode.irq.pending = true;
2561                 vmx->rmode.irq.vector = irq;
2562                 vmx->rmode.irq.rip = kvm_rip_read(vcpu);
2563                 if (vcpu->arch.interrupt.soft)
2564                         vmx->rmode.irq.rip +=
2565                                 vmx->vcpu.arch.event_exit_inst_len;
2566                 vmcs_write32(VM_ENTRY_INTR_INFO_FIELD,
2567                              irq | INTR_TYPE_SOFT_INTR | INTR_INFO_VALID_MASK);
2568                 vmcs_write32(VM_ENTRY_INSTRUCTION_LEN, 1);
2569                 kvm_rip_write(vcpu, vmx->rmode.irq.rip - 1);
2570                 return;
2571         }
2572         intr = irq | INTR_INFO_VALID_MASK;
2573         if (vcpu->arch.interrupt.soft) {
2574                 intr |= INTR_TYPE_SOFT_INTR;
2575                 vmcs_write32(VM_ENTRY_INSTRUCTION_LEN,
2576                              vmx->vcpu.arch.event_exit_inst_len);
2577         } else
2578                 intr |= INTR_TYPE_EXT_INTR;
2579         vmcs_write32(VM_ENTRY_INTR_INFO_FIELD, intr);
2580 }
2581
2582 static void vmx_inject_nmi(struct kvm_vcpu *vcpu)
2583 {
2584         struct vcpu_vmx *vmx = to_vmx(vcpu);
2585
2586         if (!cpu_has_virtual_nmis()) {
2587                 /*
2588                  * Tracking the NMI-blocked state in software is built upon
2589                  * finding the next open IRQ window. This, in turn, depends on
2590                  * well-behaving guests: They have to keep IRQs disabled at
2591                  * least as long as the NMI handler runs. Otherwise we may
2592                  * cause NMI nesting, maybe breaking the guest. But as this is
2593                  * highly unlikely, we can live with the residual risk.
2594                  */
2595                 vmx->soft_vnmi_blocked = 1;
2596                 vmx->vnmi_blocked_time = 0;
2597         }
2598
2599         ++vcpu->stat.nmi_injections;
2600         if (vmx->rmode.vm86_active) {
2601                 vmx->rmode.irq.pending = true;
2602                 vmx->rmode.irq.vector = NMI_VECTOR;
2603                 vmx->rmode.irq.rip = kvm_rip_read(vcpu);
2604                 vmcs_write32(VM_ENTRY_INTR_INFO_FIELD,
2605                              NMI_VECTOR | INTR_TYPE_SOFT_INTR |
2606                              INTR_INFO_VALID_MASK);
2607                 vmcs_write32(VM_ENTRY_INSTRUCTION_LEN, 1);
2608                 kvm_rip_write(vcpu, vmx->rmode.irq.rip - 1);
2609                 return;
2610         }
2611         vmcs_write32(VM_ENTRY_INTR_INFO_FIELD,
2612                         INTR_TYPE_NMI_INTR | INTR_INFO_VALID_MASK | NMI_VECTOR);
2613 }
2614
2615 static int vmx_nmi_allowed(struct kvm_vcpu *vcpu)
2616 {
2617         if (!cpu_has_virtual_nmis() && to_vmx(vcpu)->soft_vnmi_blocked)
2618                 return 0;
2619
2620         return  !(vmcs_read32(GUEST_INTERRUPTIBILITY_INFO) &
2621                         (GUEST_INTR_STATE_STI | GUEST_INTR_STATE_MOV_SS |
2622                                 GUEST_INTR_STATE_NMI));
2623 }
2624
2625 static int vmx_interrupt_allowed(struct kvm_vcpu *vcpu)
2626 {
2627         return (vmcs_readl(GUEST_RFLAGS) & X86_EFLAGS_IF) &&
2628                 !(vmcs_read32(GUEST_INTERRUPTIBILITY_INFO) &
2629                         (GUEST_INTR_STATE_STI | GUEST_INTR_STATE_MOV_SS));
2630 }
2631
2632 static int vmx_set_tss_addr(struct kvm *kvm, unsigned int addr)
2633 {
2634         int ret;
2635         struct kvm_userspace_memory_region tss_mem = {
2636                 .slot = TSS_PRIVATE_MEMSLOT,
2637                 .guest_phys_addr = addr,
2638                 .memory_size = PAGE_SIZE * 3,
2639                 .flags = 0,
2640         };
2641
2642         ret = kvm_set_memory_region(kvm, &tss_mem, 0);
2643         if (ret)
2644                 return ret;
2645         kvm->arch.tss_addr = addr;
2646         return 0;
2647 }
2648
2649 static int handle_rmode_exception(struct kvm_vcpu *vcpu,
2650                                   int vec, u32 err_code)
2651 {
2652         /*
2653          * Instruction with address size override prefix opcode 0x67
2654          * Cause the #SS fault with 0 error code in VM86 mode.
2655          */
2656         if (((vec == GP_VECTOR) || (vec == SS_VECTOR)) && err_code == 0)
2657                 if (emulate_instruction(vcpu, NULL, 0, 0, 0) == EMULATE_DONE)
2658                         return 1;
2659         /*
2660          * Forward all other exceptions that are valid in real mode.
2661          * FIXME: Breaks guest debugging in real mode, needs to be fixed with
2662          *        the required debugging infrastructure rework.
2663          */
2664         switch (vec) {
2665         case DB_VECTOR:
2666                 if (vcpu->guest_debug &
2667                     (KVM_GUESTDBG_SINGLESTEP | KVM_GUESTDBG_USE_HW_BP))
2668                         return 0;
2669                 kvm_queue_exception(vcpu, vec);
2670                 return 1;
2671         case BP_VECTOR:
2672                 if (vcpu->guest_debug & KVM_GUESTDBG_USE_SW_BP)
2673                         return 0;
2674                 /* fall through */
2675         case DE_VECTOR:
2676         case OF_VECTOR:
2677         case BR_VECTOR:
2678         case UD_VECTOR:
2679         case DF_VECTOR:
2680         case SS_VECTOR:
2681         case GP_VECTOR:
2682         case MF_VECTOR:
2683                 kvm_queue_exception(vcpu, vec);
2684                 return 1;
2685         }
2686         return 0;
2687 }
2688
2689 /*
2690  * Trigger machine check on the host. We assume all the MSRs are already set up
2691  * by the CPU and that we still run on the same CPU as the MCE occurred on.
2692  * We pass a fake environment to the machine check handler because we want
2693  * the guest to be always treated like user space, no matter what context
2694  * it used internally.
2695  */
2696 static void kvm_machine_check(void)
2697 {
2698 #if defined(CONFIG_X86_MCE) && defined(CONFIG_X86_64)
2699         struct pt_regs regs = {
2700                 .cs = 3, /* Fake ring 3 no matter what the guest ran on */
2701                 .flags = X86_EFLAGS_IF,
2702         };
2703
2704         do_machine_check(&regs, 0);
2705 #endif
2706 }
2707
2708 static int handle_machine_check(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run)
2709 {
2710         /* already handled by vcpu_run */
2711         return 1;
2712 }
2713
2714 static int handle_exception(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run)
2715 {
2716         struct vcpu_vmx *vmx = to_vmx(vcpu);
2717         u32 intr_info, ex_no, error_code;
2718         unsigned long cr2, rip, dr6;
2719         u32 vect_info;
2720         enum emulation_result er;
2721
2722         vect_info = vmx->idt_vectoring_info;
2723         intr_info = vmcs_read32(VM_EXIT_INTR_INFO);
2724
2725         if (is_machine_check(intr_info))
2726                 return handle_machine_check(vcpu, kvm_run);
2727
2728         if ((vect_info & VECTORING_INFO_VALID_MASK) &&
2729                                                 !is_page_fault(intr_info))
2730                 printk(KERN_ERR "%s: unexpected, vectoring info 0x%x "
2731                        "intr info 0x%x\n", __func__, vect_info, intr_info);
2732
2733         if ((intr_info & INTR_INFO_INTR_TYPE_MASK) == INTR_TYPE_NMI_INTR)
2734                 return 1;  /* already handled by vmx_vcpu_run() */
2735
2736         if (is_no_device(intr_info)) {
2737                 vmx_fpu_activate(vcpu);
2738                 return 1;
2739         }
2740
2741         if (is_invalid_opcode(intr_info)) {
2742                 er = emulate_instruction(vcpu, kvm_run, 0, 0, EMULTYPE_TRAP_UD);
2743                 if (er != EMULATE_DONE)
2744                         kvm_queue_exception(vcpu, UD_VECTOR);
2745                 return 1;
2746         }
2747
2748         error_code = 0;
2749         rip = kvm_rip_read(vcpu);
2750         if (intr_info & INTR_INFO_DELIVER_CODE_MASK)
2751                 error_code = vmcs_read32(VM_EXIT_INTR_ERROR_CODE);
2752         if (is_page_fault(intr_info)) {
2753                 /* EPT won't cause page fault directly */
2754                 if (enable_ept)
2755                         BUG();
2756                 cr2 = vmcs_readl(EXIT_QUALIFICATION);
2757                 trace_kvm_page_fault(cr2, error_code);
2758
2759                 if (kvm_event_needs_reinjection(vcpu))
2760                         kvm_mmu_unprotect_page_virt(vcpu, cr2);
2761                 return kvm_mmu_page_fault(vcpu, cr2, error_code);
2762         }
2763
2764         if (vmx->rmode.vm86_active &&
2765             handle_rmode_exception(vcpu, intr_info & INTR_INFO_VECTOR_MASK,
2766                                                                 error_code)) {
2767                 if (vcpu->arch.halt_request) {
2768                         vcpu->arch.halt_request = 0;
2769                         return kvm_emulate_halt(vcpu);
2770                 }
2771                 return 1;
2772         }
2773
2774         ex_no = intr_info & INTR_INFO_VECTOR_MASK;
2775         switch (ex_no) {
2776         case DB_VECTOR:
2777                 dr6 = vmcs_readl(EXIT_QUALIFICATION);
2778                 if (!(vcpu->guest_debug &
2779                       (KVM_GUESTDBG_SINGLESTEP | KVM_GUESTDBG_USE_HW_BP))) {
2780                         vcpu->arch.dr6 = dr6 | DR6_FIXED_1;
2781                         kvm_queue_exception(vcpu, DB_VECTOR);
2782                         return 1;
2783                 }
2784                 kvm_run->debug.arch.dr6 = dr6 | DR6_FIXED_1;
2785                 kvm_run->debug.arch.dr7 = vmcs_readl(GUEST_DR7);
2786                 /* fall through */
2787         case BP_VECTOR:
2788                 kvm_run->exit_reason = KVM_EXIT_DEBUG;
2789                 kvm_run->debug.arch.pc = vmcs_readl(GUEST_CS_BASE) + rip;
2790                 kvm_run->debug.arch.exception = ex_no;
2791                 break;
2792         default:
2793                 kvm_run->exit_reason = KVM_EXIT_EXCEPTION;
2794                 kvm_run->ex.exception = ex_no;
2795                 kvm_run->ex.error_code = error_code;
2796                 break;
2797         }
2798         return 0;
2799 }
2800
2801 static int handle_external_interrupt(struct kvm_vcpu *vcpu,
2802                                      struct kvm_run *kvm_run)
2803 {
2804         ++vcpu->stat.irq_exits;
2805         return 1;
2806 }
2807
2808 static int handle_triple_fault(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run)
2809 {
2810         kvm_run->exit_reason = KVM_EXIT_SHUTDOWN;
2811         return 0;
2812 }
2813
2814 static int handle_io(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run)
2815 {
2816         unsigned long exit_qualification;
2817         int size, in, string;
2818         unsigned port;
2819
2820         ++vcpu->stat.io_exits;
2821         exit_qualification = vmcs_readl(EXIT_QUALIFICATION);
2822         string = (exit_qualification & 16) != 0;
2823
2824         if (string) {
2825                 if (emulate_instruction(vcpu,
2826                                         kvm_run, 0, 0, 0) == EMULATE_DO_MMIO)
2827                         return 0;
2828                 return 1;
2829         }
2830
2831         size = (exit_qualification & 7) + 1;
2832         in = (exit_qualification & 8) != 0;
2833         port = exit_qualification >> 16;
2834
2835         skip_emulated_instruction(vcpu);
2836         return kvm_emulate_pio(vcpu, kvm_run, in, size, port);
2837 }
2838
2839 static void
2840 vmx_patch_hypercall(struct kvm_vcpu *vcpu, unsigned char *hypercall)
2841 {
2842         /*
2843          * Patch in the VMCALL instruction:
2844          */
2845         hypercall[0] = 0x0f;
2846         hypercall[1] = 0x01;
2847         hypercall[2] = 0xc1;
2848 }
2849
2850 static int handle_cr(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run)
2851 {
2852         unsigned long exit_qualification, val;
2853         int cr;
2854         int reg;
2855
2856         exit_qualification = vmcs_readl(EXIT_QUALIFICATION);
2857         cr = exit_qualification & 15;
2858         reg = (exit_qualification >> 8) & 15;
2859         switch ((exit_qualification >> 4) & 3) {
2860         case 0: /* mov to cr */
2861                 val = kvm_register_read(vcpu, reg);
2862                 trace_kvm_cr_write(cr, val);
2863                 switch (cr) {
2864                 case 0:
2865                         kvm_set_cr0(vcpu, val);
2866                         skip_emulated_instruction(vcpu);
2867                         return 1;
2868                 case 3:
2869                         kvm_set_cr3(vcpu, val);
2870                         skip_emulated_instruction(vcpu);
2871                         return 1;
2872                 case 4:
2873                         kvm_set_cr4(vcpu, val);
2874                         skip_emulated_instruction(vcpu);
2875                         return 1;
2876                 case 8: {
2877                                 u8 cr8_prev = kvm_get_cr8(vcpu);
2878                                 u8 cr8 = kvm_register_read(vcpu, reg);
2879                                 kvm_set_cr8(vcpu, cr8);
2880                                 skip_emulated_instruction(vcpu);
2881                                 if (irqchip_in_kernel(vcpu->kvm))
2882                                         return 1;
2883                                 if (cr8_prev <= cr8)
2884                                         return 1;
2885                                 kvm_run->exit_reason = KVM_EXIT_SET_TPR;
2886                                 return 0;
2887                         }
2888                 };
2889                 break;
2890         case 2: /* clts */
2891                 vmx_fpu_deactivate(vcpu);
2892                 vcpu->arch.cr0 &= ~X86_CR0_TS;
2893                 vmcs_writel(CR0_READ_SHADOW, vcpu->arch.cr0);
2894                 vmx_fpu_activate(vcpu);
2895                 skip_emulated_instruction(vcpu);
2896                 return 1;
2897         case 1: /*mov from cr*/
2898                 switch (cr) {
2899                 case 3:
2900                         kvm_register_write(vcpu, reg, vcpu->arch.cr3);
2901                         trace_kvm_cr_read(cr, vcpu->arch.cr3);
2902                         skip_emulated_instruction(vcpu);
2903                         return 1;
2904                 case 8:
2905                         val = kvm_get_cr8(vcpu);
2906                         kvm_register_write(vcpu, reg, val);
2907                         trace_kvm_cr_read(cr, val);
2908                         skip_emulated_instruction(vcpu);
2909                         return 1;
2910                 }
2911                 break;
2912         case 3: /* lmsw */
2913                 kvm_lmsw(vcpu, (exit_qualification >> LMSW_SOURCE_DATA_SHIFT) & 0x0f);
2914
2915                 skip_emulated_instruction(vcpu);
2916                 return 1;
2917         default:
2918                 break;
2919         }
2920         kvm_run->exit_reason = 0;
2921         pr_unimpl(vcpu, "unhandled control register: op %d cr %d\n",
2922                (int)(exit_qualification >> 4) & 3, cr);
2923         return 0;
2924 }
2925
2926 static int handle_dr(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run)
2927 {
2928         unsigned long exit_qualification;
2929         unsigned long val;
2930         int dr, reg;
2931
2932         dr = vmcs_readl(GUEST_DR7);
2933         if (dr & DR7_GD) {
2934                 /*
2935                  * As the vm-exit takes precedence over the debug trap, we
2936                  * need to emulate the latter, either for the host or the
2937                  * guest debugging itself.
2938                  */
2939                 if (vcpu->guest_debug & KVM_GUESTDBG_USE_HW_BP) {
2940                         kvm_run->debug.arch.dr6 = vcpu->arch.dr6;
2941                         kvm_run->debug.arch.dr7 = dr;
2942                         kvm_run->debug.arch.pc =
2943                                 vmcs_readl(GUEST_CS_BASE) +
2944                                 vmcs_readl(GUEST_RIP);
2945                         kvm_run->debug.arch.exception = DB_VECTOR;
2946                         kvm_run->exit_reason = KVM_EXIT_DEBUG;
2947                         return 0;
2948                 } else {
2949                         vcpu->arch.dr7 &= ~DR7_GD;
2950                         vcpu->arch.dr6 |= DR6_BD;
2951                         vmcs_writel(GUEST_DR7, vcpu->arch.dr7);
2952                         kvm_queue_exception(vcpu, DB_VECTOR);
2953                         return 1;
2954                 }
2955         }
2956
2957         exit_qualification = vmcs_readl(EXIT_QUALIFICATION);
2958         dr = exit_qualification & DEBUG_REG_ACCESS_NUM;
2959         reg = DEBUG_REG_ACCESS_REG(exit_qualification);
2960         if (exit_qualification & TYPE_MOV_FROM_DR) {
2961                 switch (dr) {
2962                 case 0 ... 3:
2963                         val = vcpu->arch.db[dr];
2964                         break;
2965                 case 6:
2966                         val = vcpu->arch.dr6;
2967                         break;
2968                 case 7:
2969                         val = vcpu->arch.dr7;
2970                         break;
2971                 default:
2972                         val = 0;
2973                 }
2974                 kvm_register_write(vcpu, reg, val);
2975         } else {
2976                 val = vcpu->arch.regs[reg];
2977                 switch (dr) {
2978                 case 0 ... 3:
2979                         vcpu->arch.db[dr] = val;
2980                         if (!(vcpu->guest_debug & KVM_GUESTDBG_USE_HW_BP))
2981                                 vcpu->arch.eff_db[dr] = val;
2982                         break;
2983                 case 4 ... 5:
2984                         if (vcpu->arch.cr4 & X86_CR4_DE)
2985                                 kvm_queue_exception(vcpu, UD_VECTOR);
2986                         break;
2987                 case 6:
2988                         if (val & 0xffffffff00000000ULL) {
2989                                 kvm_queue_exception(vcpu, GP_VECTOR);
2990                                 break;
2991                         }
2992                         vcpu->arch.dr6 = (val & DR6_VOLATILE) | DR6_FIXED_1;
2993                         break;
2994                 case 7:
2995                         if (val & 0xffffffff00000000ULL) {
2996                                 kvm_queue_exception(vcpu, GP_VECTOR);
2997                                 break;
2998                         }
2999                         vcpu->arch.dr7 = (val & DR7_VOLATILE) | DR7_FIXED_1;
3000                         if (!(vcpu->guest_debug & KVM_GUESTDBG_USE_HW_BP)) {
3001                                 vmcs_writel(GUEST_DR7, vcpu->arch.dr7);
3002                                 vcpu->arch.switch_db_regs =
3003                                         (val & DR7_BP_EN_MASK);
3004                         }
3005                         break;
3006                 }
3007         }
3008         skip_emulated_instruction(vcpu);
3009         return 1;
3010 }
3011
3012 static int handle_cpuid(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run)
3013 {
3014         kvm_emulate_cpuid(vcpu);
3015         return 1;
3016 }
3017
3018 static int handle_rdmsr(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run)
3019 {
3020         u32 ecx = vcpu->arch.regs[VCPU_REGS_RCX];
3021         u64 data;
3022
3023         if (vmx_get_msr(vcpu, ecx, &data)) {
3024                 kvm_inject_gp(vcpu, 0);
3025                 return 1;
3026         }
3027
3028         trace_kvm_msr_read(ecx, data);
3029
3030         /* FIXME: handling of bits 32:63 of rax, rdx */
3031         vcpu->arch.regs[VCPU_REGS_RAX] = data & -1u;
3032         vcpu->arch.regs[VCPU_REGS_RDX] = (data >> 32) & -1u;
3033         skip_emulated_instruction(vcpu);
3034         return 1;
3035 }
3036
3037 static int handle_wrmsr(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run)
3038 {
3039         u32 ecx = vcpu->arch.regs[VCPU_REGS_RCX];
3040         u64 data = (vcpu->arch.regs[VCPU_REGS_RAX] & -1u)
3041                 | ((u64)(vcpu->arch.regs[VCPU_REGS_RDX] & -1u) << 32);
3042
3043         trace_kvm_msr_write(ecx, data);
3044
3045         if (vmx_set_msr(vcpu, ecx, data) != 0) {
3046                 kvm_inject_gp(vcpu, 0);
3047                 return 1;
3048         }
3049
3050         skip_emulated_instruction(vcpu);
3051         return 1;
3052 }
3053
3054 static int handle_tpr_below_threshold(struct kvm_vcpu *vcpu,
3055                                       struct kvm_run *kvm_run)
3056 {
3057         return 1;
3058 }
3059
3060 static int handle_interrupt_window(struct kvm_vcpu *vcpu,
3061                                    struct kvm_run *kvm_run)
3062 {
3063         u32 cpu_based_vm_exec_control;
3064
3065         /* clear pending irq */
3066         cpu_based_vm_exec_control = vmcs_read32(CPU_BASED_VM_EXEC_CONTROL);
3067         cpu_based_vm_exec_control &= ~CPU_BASED_VIRTUAL_INTR_PENDING;
3068         vmcs_write32(CPU_BASED_VM_EXEC_CONTROL, cpu_based_vm_exec_control);
3069
3070         ++vcpu->stat.irq_window_exits;
3071
3072         /*
3073          * If the user space waits to inject interrupts, exit as soon as
3074          * possible
3075          */
3076         if (!irqchip_in_kernel(vcpu->kvm) &&
3077             kvm_run->request_interrupt_window &&
3078             !kvm_cpu_has_interrupt(vcpu)) {
3079                 kvm_run->exit_reason = KVM_EXIT_IRQ_WINDOW_OPEN;
3080                 return 0;
3081         }
3082         return 1;
3083 }
3084
3085 static int handle_halt(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run)
3086 {
3087         skip_emulated_instruction(vcpu);
3088         return kvm_emulate_halt(vcpu);
3089 }
3090
3091 static int handle_vmcall(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run)
3092 {
3093         skip_emulated_instruction(vcpu);
3094         kvm_emulate_hypercall(vcpu);
3095         return 1;
3096 }
3097
3098 static int handle_vmx_insn(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run)
3099 {
3100         kvm_queue_exception(vcpu, UD_VECTOR);
3101         return 1;
3102 }
3103
3104 static int handle_invlpg(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run)
3105 {
3106         unsigned long exit_qualification = vmcs_readl(EXIT_QUALIFICATION);
3107
3108         kvm_mmu_invlpg(vcpu, exit_qualification);
3109         skip_emulated_instruction(vcpu);
3110         return 1;
3111 }
3112
3113 static int handle_wbinvd(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run)
3114 {
3115         skip_emulated_instruction(vcpu);
3116         /* TODO: Add support for VT-d/pass-through device */
3117         return 1;
3118 }
3119
3120 static int handle_apic_access(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run)
3121 {
3122         unsigned long exit_qualification;
3123         enum emulation_result er;
3124         unsigned long offset;
3125
3126         exit_qualification = vmcs_readl(EXIT_QUALIFICATION);
3127         offset = exit_qualification & 0xffful;
3128
3129         er = emulate_instruction(vcpu, kvm_run, 0, 0, 0);
3130
3131         if (er !=  EMULATE_DONE) {
3132                 printk(KERN_ERR
3133                        "Fail to handle apic access vmexit! Offset is 0x%lx\n",
3134                        offset);
3135                 return -ENOTSUPP;
3136         }
3137         return 1;
3138 }
3139
3140 static int handle_task_switch(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run)
3141 {
3142         struct vcpu_vmx *vmx = to_vmx(vcpu);
3143         unsigned long exit_qualification;
3144         u16 tss_selector;
3145         int reason, type, idt_v;
3146
3147         idt_v = (vmx->idt_vectoring_info & VECTORING_INFO_VALID_MASK);
3148         type = (vmx->idt_vectoring_info & VECTORING_INFO_TYPE_MASK);
3149
3150         exit_qualification = vmcs_readl(EXIT_QUALIFICATION);
3151
3152         reason = (u32)exit_qualification >> 30;
3153         if (reason == TASK_SWITCH_GATE && idt_v) {
3154                 switch (type) {
3155                 case INTR_TYPE_NMI_INTR:
3156                         vcpu->arch.nmi_injected = false;
3157                         if (cpu_has_virtual_nmis())
3158                                 vmcs_set_bits(GUEST_INTERRUPTIBILITY_INFO,
3159                                               GUEST_INTR_STATE_NMI);
3160                         break;
3161                 case INTR_TYPE_EXT_INTR:
3162                 case INTR_TYPE_SOFT_INTR:
3163                         kvm_clear_interrupt_queue(vcpu);
3164                         break;
3165                 case INTR_TYPE_HARD_EXCEPTION:
3166                 case INTR_TYPE_SOFT_EXCEPTION:
3167                         kvm_clear_exception_queue(vcpu);
3168                         break;
3169                 default:
3170                         break;
3171                 }
3172         }
3173         tss_selector = exit_qualification;
3174
3175         if (!idt_v || (type != INTR_TYPE_HARD_EXCEPTION &&
3176                        type != INTR_TYPE_EXT_INTR &&
3177                        type != INTR_TYPE_NMI_INTR))
3178                 skip_emulated_instruction(vcpu);
3179
3180         if (!kvm_task_switch(vcpu, tss_selector, reason))
3181                 return 0;
3182
3183         /* clear all local breakpoint enable flags */
3184         vmcs_writel(GUEST_DR7, vmcs_readl(GUEST_DR7) & ~55);
3185
3186         /*
3187          * TODO: What about debug traps on tss switch?
3188          *       Are we supposed to inject them and update dr6?
3189          */
3190
3191         return 1;
3192 }
3193
3194 static int handle_ept_violation(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run)
3195 {
3196         unsigned long exit_qualification;
3197         gpa_t gpa;
3198         int gla_validity;
3199
3200         exit_qualification = vmcs_readl(EXIT_QUALIFICATION);
3201
3202         if (exit_qualification & (1 << 6)) {
3203                 printk(KERN_ERR "EPT: GPA exceeds GAW!\n");
3204                 return -ENOTSUPP;
3205         }
3206
3207         gla_validity = (exit_qualification >> 7) & 0x3;
3208         if (gla_validity != 0x3 && gla_validity != 0x1 && gla_validity != 0) {
3209                 printk(KERN_ERR "EPT: Handling EPT violation failed!\n");
3210                 printk(KERN_ERR "EPT: GPA: 0x%lx, GVA: 0x%lx\n",
3211                         (long unsigned int)vmcs_read64(GUEST_PHYSICAL_ADDRESS),
3212                         vmcs_readl(GUEST_LINEAR_ADDRESS));
3213                 printk(KERN_ERR "EPT: Exit qualification is 0x%lx\n",
3214                         (long unsigned int)exit_qualification);
3215                 kvm_run->exit_reason = KVM_EXIT_UNKNOWN;
3216                 kvm_run->hw.hardware_exit_reason = EXIT_REASON_EPT_VIOLATION;
3217                 return 0;
3218         }
3219
3220         gpa = vmcs_read64(GUEST_PHYSICAL_ADDRESS);
3221         trace_kvm_page_fault(gpa, exit_qualification);
3222         return kvm_mmu_page_fault(vcpu, gpa & PAGE_MASK, 0);
3223 }
3224
3225 static u64 ept_rsvd_mask(u64 spte, int level)
3226 {
3227         int i;
3228         u64 mask = 0;
3229
3230         for (i = 51; i > boot_cpu_data.x86_phys_bits; i--)
3231                 mask |= (1ULL << i);
3232
3233         if (level > 2)
3234                 /* bits 7:3 reserved */
3235                 mask |= 0xf8;
3236         else if (level == 2) {
3237                 if (spte & (1ULL << 7))
3238                         /* 2MB ref, bits 20:12 reserved */
3239                         mask |= 0x1ff000;
3240                 else
3241                         /* bits 6:3 reserved */
3242                         mask |= 0x78;
3243         }
3244
3245         return mask;
3246 }
3247
3248 static void ept_misconfig_inspect_spte(struct kvm_vcpu *vcpu, u64 spte,
3249                                        int level)
3250 {
3251         printk(KERN_ERR "%s: spte 0x%llx level %d\n", __func__, spte, level);
3252
3253         /* 010b (write-only) */
3254         WARN_ON((spte & 0x7) == 0x2);
3255
3256         /* 110b (write/execute) */
3257         WARN_ON((spte & 0x7) == 0x6);
3258
3259         /* 100b (execute-only) and value not supported by logical processor */
3260         if (!cpu_has_vmx_ept_execute_only())
3261                 WARN_ON((spte & 0x7) == 0x4);
3262
3263         /* not 000b */
3264         if ((spte & 0x7)) {
3265                 u64 rsvd_bits = spte & ept_rsvd_mask(spte, level);
3266
3267                 if (rsvd_bits != 0) {
3268                         printk(KERN_ERR "%s: rsvd_bits = 0x%llx\n",
3269                                          __func__, rsvd_bits);
3270                         WARN_ON(1);
3271                 }
3272
3273                 if (level == 1 || (level == 2 && (spte & (1ULL << 7)))) {
3274                         u64 ept_mem_type = (spte & 0x38) >> 3;
3275
3276                         if (ept_mem_type == 2 || ept_mem_type == 3 ||
3277                             ept_mem_type == 7) {
3278                                 printk(KERN_ERR "%s: ept_mem_type=0x%llx\n",
3279                                                 __func__, ept_mem_type);
3280                                 WARN_ON(1);
3281                         }
3282                 }
3283         }
3284 }
3285
3286 static int handle_ept_misconfig(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run)
3287 {
3288         u64 sptes[4];
3289         int nr_sptes, i;
3290         gpa_t gpa;
3291
3292         gpa = vmcs_read64(GUEST_PHYSICAL_ADDRESS);
3293
3294         printk(KERN_ERR "EPT: Misconfiguration.\n");
3295         printk(KERN_ERR "EPT: GPA: 0x%llx\n", gpa);
3296
3297         nr_sptes = kvm_mmu_get_spte_hierarchy(vcpu, gpa, sptes);
3298
3299         for (i = PT64_ROOT_LEVEL; i > PT64_ROOT_LEVEL - nr_sptes; --i)
3300                 ept_misconfig_inspect_spte(vcpu, sptes[i-1], i);
3301
3302         kvm_run->exit_reason = KVM_EXIT_UNKNOWN;
3303         kvm_run->hw.hardware_exit_reason = EXIT_REASON_EPT_MISCONFIG;
3304
3305         return 0;
3306 }
3307
3308 static int handle_nmi_window(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run)
3309 {
3310         u32 cpu_based_vm_exec_control;
3311
3312         /* clear pending NMI */
3313         cpu_based_vm_exec_control = vmcs_read32(CPU_BASED_VM_EXEC_CONTROL);
3314         cpu_based_vm_exec_control &= ~CPU_BASED_VIRTUAL_NMI_PENDING;
3315         vmcs_write32(CPU_BASED_VM_EXEC_CONTROL, cpu_based_vm_exec_control);
3316         ++vcpu->stat.nmi_window_exits;
3317
3318         return 1;
3319 }
3320
3321 static void handle_invalid_guest_state(struct kvm_vcpu *vcpu,
3322                                 struct kvm_run *kvm_run)
3323 {
3324         struct vcpu_vmx *vmx = to_vmx(vcpu);
3325         enum emulation_result err = EMULATE_DONE;
3326
3327         local_irq_enable();
3328         preempt_enable();
3329
3330         while (!guest_state_valid(vcpu)) {
3331                 err = emulate_instruction(vcpu, kvm_run, 0, 0, 0);
3332
3333                 if (err == EMULATE_DO_MMIO)
3334                         break;
3335
3336                 if (err != EMULATE_DONE) {
3337                         kvm_report_emulation_failure(vcpu, "emulation failure");
3338                         break;
3339                 }
3340
3341                 if (signal_pending(current))
3342                         break;
3343                 if (need_resched())
3344                         schedule();
3345         }
3346
3347         preempt_disable();
3348         local_irq_disable();
3349
3350         vmx->invalid_state_emulation_result = err;
3351 }
3352
3353 /*
3354  * The exit handlers return 1 if the exit was handled fully and guest execution
3355  * may resume.  Otherwise they set the kvm_run parameter to indicate what needs
3356  * to be done to userspace and return 0.
3357  */
3358 static int (*kvm_vmx_exit_handlers[])(struct kvm_vcpu *vcpu,
3359                                       struct kvm_run *kvm_run) = {
3360         [EXIT_REASON_EXCEPTION_NMI]           = handle_exception,
3361         [EXIT_REASON_EXTERNAL_INTERRUPT]      = handle_external_interrupt,
3362         [EXIT_REASON_TRIPLE_FAULT]            = handle_triple_fault,
3363         [EXIT_REASON_NMI_WINDOW]              = handle_nmi_window,
3364         [EXIT_REASON_IO_INSTRUCTION]          = handle_io,
3365         [EXIT_REASON_CR_ACCESS]               = handle_cr,
3366         [EXIT_REASON_DR_ACCESS]               = handle_dr,
3367         [EXIT_REASON_CPUID]                   = handle_cpuid,
3368         [EXIT_REASON_MSR_READ]                = handle_rdmsr,
3369         [EXIT_REASON_MSR_WRITE]               = handle_wrmsr,
3370         [EXIT_REASON_PENDING_INTERRUPT]       = handle_interrupt_window,
3371         [EXIT_REASON_HLT]                     = handle_halt,
3372         [EXIT_REASON_INVLPG]                  = handle_invlpg,
3373         [EXIT_REASON_VMCALL]                  = handle_vmcall,
3374         [EXIT_REASON_VMCLEAR]                 = handle_vmx_insn,
3375         [EXIT_REASON_VMLAUNCH]                = handle_vmx_insn,
3376         [EXIT_REASON_VMPTRLD]                 = handle_vmx_insn,
3377         [EXIT_REASON_VMPTRST]                 = handle_vmx_insn,
3378         [EXIT_REASON_VMREAD]                  = handle_vmx_insn,
3379         [EXIT_REASON_VMRESUME]                = handle_vmx_insn,
3380         [EXIT_REASON_VMWRITE]                 = handle_vmx_insn,
3381         [EXIT_REASON_VMOFF]                   = handle_vmx_insn,
3382         [EXIT_REASON_VMON]                    = handle_vmx_insn,
3383         [EXIT_REASON_TPR_BELOW_THRESHOLD]     = handle_tpr_below_threshold,
3384         [EXIT_REASON_APIC_ACCESS]             = handle_apic_access,
3385         [EXIT_REASON_WBINVD]                  = handle_wbinvd,
3386         [EXIT_REASON_TASK_SWITCH]             = handle_task_switch,
3387         [EXIT_REASON_MCE_DURING_VMENTRY]      = handle_machine_check,
3388         [EXIT_REASON_EPT_VIOLATION]           = handle_ept_violation,
3389         [EXIT_REASON_EPT_MISCONFIG]           = handle_ept_misconfig,
3390 };
3391
3392 static const int kvm_vmx_max_exit_handlers =
3393         ARRAY_SIZE(kvm_vmx_exit_handlers);
3394
3395 /*
3396  * The guest has exited.  See if we can fix it or if we need userspace
3397  * assistance.
3398  */
3399 static int vmx_handle_exit(struct kvm_run *kvm_run, struct kvm_vcpu *vcpu)
3400 {
3401         struct vcpu_vmx *vmx = to_vmx(vcpu);
3402         u32 exit_reason = vmx->exit_reason;
3403         u32 vectoring_info = vmx->idt_vectoring_info;
3404
3405         trace_kvm_exit(exit_reason, kvm_rip_read(vcpu));
3406
3407         /* If we need to emulate an MMIO from handle_invalid_guest_state
3408          * we just return 0 */
3409         if (vmx->emulation_required && emulate_invalid_guest_state) {
3410                 if (guest_state_valid(vcpu))
3411                         vmx->emulation_required = 0;
3412                 return vmx->invalid_state_emulation_result != EMULATE_DO_MMIO;
3413         }
3414
3415         /* Access CR3 don't cause VMExit in paging mode, so we need
3416          * to sync with guest real CR3. */
3417         if (enable_ept && is_paging(vcpu))
3418                 vcpu->arch.cr3 = vmcs_readl(GUEST_CR3);
3419
3420         if (unlikely(vmx->fail)) {
3421                 kvm_run->exit_reason = KVM_EXIT_FAIL_ENTRY;
3422                 kvm_run->fail_entry.hardware_entry_failure_reason
3423                         = vmcs_read32(VM_INSTRUCTION_ERROR);
3424                 return 0;
3425         }
3426
3427         if ((vectoring_info & VECTORING_INFO_VALID_MASK) &&
3428                         (exit_reason != EXIT_REASON_EXCEPTION_NMI &&
3429                         exit_reason != EXIT_REASON_EPT_VIOLATION &&
3430                         exit_reason != EXIT_REASON_TASK_SWITCH))
3431                 printk(KERN_WARNING "%s: unexpected, valid vectoring info "
3432                        "(0x%x) and exit reason is 0x%x\n",
3433                        __func__, vectoring_info, exit_reason);
3434
3435         if (unlikely(!cpu_has_virtual_nmis() && vmx->soft_vnmi_blocked)) {
3436                 if (vmx_interrupt_allowed(vcpu)) {
3437                         vmx->soft_vnmi_blocked = 0;
3438                 } else if (vmx->vnmi_blocked_time > 1000000000LL &&
3439                            vcpu->arch.nmi_pending) {
3440                         /*
3441                          * This CPU don't support us in finding the end of an
3442                          * NMI-blocked window if the guest runs with IRQs
3443                          * disabled. So we pull the trigger after 1 s of
3444                          * futile waiting, but inform the user about this.
3445                          */
3446                         printk(KERN_WARNING "%s: Breaking out of NMI-blocked "
3447                                "state on VCPU %d after 1 s timeout\n",
3448                                __func__, vcpu->vcpu_id);
3449                         vmx->soft_vnmi_blocked = 0;
3450                 }
3451         }
3452
3453         if (exit_reason < kvm_vmx_max_exit_handlers
3454             && kvm_vmx_exit_handlers[exit_reason])
3455                 return kvm_vmx_exit_handlers[exit_reason](vcpu, kvm_run);
3456         else {
3457                 kvm_run->exit_reason = KVM_EXIT_UNKNOWN;
3458                 kvm_run->hw.hardware_exit_reason = exit_reason;
3459         }
3460         return 0;
3461 }
3462
3463 static void update_cr8_intercept(struct kvm_vcpu *vcpu, int tpr, int irr)
3464 {
3465         if (irr == -1 || tpr < irr) {
3466                 vmcs_write32(TPR_THRESHOLD, 0);
3467                 return;
3468         }
3469
3470         vmcs_write32(TPR_THRESHOLD, irr);
3471 }
3472
3473 static void vmx_complete_interrupts(struct vcpu_vmx *vmx)
3474 {
3475         u32 exit_intr_info;
3476         u32 idt_vectoring_info = vmx->idt_vectoring_info;
3477         bool unblock_nmi;
3478         u8 vector;
3479         int type;
3480         bool idtv_info_valid;
3481
3482         exit_intr_info = vmcs_read32(VM_EXIT_INTR_INFO);
3483
3484         vmx->exit_reason = vmcs_read32(VM_EXIT_REASON);
3485
3486         /* Handle machine checks before interrupts are enabled */
3487         if ((vmx->exit_reason == EXIT_REASON_MCE_DURING_VMENTRY)
3488             || (vmx->exit_reason == EXIT_REASON_EXCEPTION_NMI
3489                 && is_machine_check(exit_intr_info)))
3490                 kvm_machine_check();
3491
3492         /* We need to handle NMIs before interrupts are enabled */
3493         if ((exit_intr_info & INTR_INFO_INTR_TYPE_MASK) == INTR_TYPE_NMI_INTR &&
3494             (exit_intr_info & INTR_INFO_VALID_MASK))
3495                 asm("int $2");
3496
3497         idtv_info_valid = idt_vectoring_info & VECTORING_INFO_VALID_MASK;
3498
3499         if (cpu_has_virtual_nmis()) {
3500                 unblock_nmi = (exit_intr_info & INTR_INFO_UNBLOCK_NMI) != 0;
3501                 vector = exit_intr_info & INTR_INFO_VECTOR_MASK;
3502                 /*
3503                  * SDM 3: 27.7.1.2 (September 2008)
3504                  * Re-set bit "block by NMI" before VM entry if vmexit caused by
3505                  * a guest IRET fault.
3506                  * SDM 3: 23.2.2 (September 2008)
3507                  * Bit 12 is undefined in any of the following cases:
3508                  *  If the VM exit sets the valid bit in the IDT-vectoring
3509                  *   information field.
3510                  *  If the VM exit is due to a double fault.
3511                  */
3512                 if ((exit_intr_info & INTR_INFO_VALID_MASK) && unblock_nmi &&
3513                     vector != DF_VECTOR && !idtv_info_valid)
3514                         vmcs_set_bits(GUEST_INTERRUPTIBILITY_INFO,
3515                                       GUEST_INTR_STATE_NMI);
3516         } else if (unlikely(vmx->soft_vnmi_blocked))
3517                 vmx->vnmi_blocked_time +=
3518                         ktime_to_ns(ktime_sub(ktime_get(), vmx->entry_time));
3519
3520         vmx->vcpu.arch.nmi_injected = false;
3521         kvm_clear_exception_queue(&vmx->vcpu);
3522         kvm_clear_interrupt_queue(&vmx->vcpu);
3523
3524         if (!idtv_info_valid)
3525                 return;
3526
3527         vector = idt_vectoring_info & VECTORING_INFO_VECTOR_MASK;
3528         type = idt_vectoring_info & VECTORING_INFO_TYPE_MASK;
3529
3530         switch (type) {
3531         case INTR_TYPE_NMI_INTR:
3532                 vmx->vcpu.arch.nmi_injected = true;
3533                 /*
3534                  * SDM 3: 27.7.1.2 (September 2008)
3535                  * Clear bit "block by NMI" before VM entry if a NMI
3536                  * delivery faulted.
3537                  */
3538                 vmcs_clear_bits(GUEST_INTERRUPTIBILITY_INFO,
3539                                 GUEST_INTR_STATE_NMI);
3540                 break;
3541         case INTR_TYPE_SOFT_EXCEPTION:
3542                 vmx->vcpu.arch.event_exit_inst_len =
3543                         vmcs_read32(VM_EXIT_INSTRUCTION_LEN);
3544                 /* fall through */
3545         case INTR_TYPE_HARD_EXCEPTION:
3546                 if (idt_vectoring_info & VECTORING_INFO_DELIVER_CODE_MASK) {
3547                         u32 err = vmcs_read32(IDT_VECTORING_ERROR_CODE);
3548                         kvm_queue_exception_e(&vmx->vcpu, vector, err);
3549                 } else
3550                         kvm_queue_exception(&vmx->vcpu, vector);
3551                 break;
3552         case INTR_TYPE_SOFT_INTR:
3553                 vmx->vcpu.arch.event_exit_inst_len =
3554                         vmcs_read32(VM_EXIT_INSTRUCTION_LEN);
3555                 /* fall through */
3556         case INTR_TYPE_EXT_INTR:
3557                 kvm_queue_interrupt(&vmx->vcpu, vector,
3558                         type == INTR_TYPE_SOFT_INTR);
3559                 break;
3560         default:
3561                 break;
3562         }
3563 }
3564
3565 /*
3566  * Failure to inject an interrupt should give us the information
3567  * in IDT_VECTORING_INFO_FIELD.  However, if the failure occurs
3568  * when fetching the interrupt redirection bitmap in the real-mode
3569  * tss, this doesn't happen.  So we do it ourselves.
3570  */
3571 static void fixup_rmode_irq(struct vcpu_vmx *vmx)
3572 {
3573         vmx->rmode.irq.pending = 0;
3574         if (kvm_rip_read(&vmx->vcpu) + 1 != vmx->rmode.irq.rip)
3575                 return;
3576         kvm_rip_write(&vmx->vcpu, vmx->rmode.irq.rip);
3577         if (vmx->idt_vectoring_info & VECTORING_INFO_VALID_MASK) {
3578                 vmx->idt_vectoring_info &= ~VECTORING_INFO_TYPE_MASK;
3579                 vmx->idt_vectoring_info |= INTR_TYPE_EXT_INTR;
3580                 return;
3581         }
3582         vmx->idt_vectoring_info =
3583                 VECTORING_INFO_VALID_MASK
3584                 | INTR_TYPE_EXT_INTR
3585                 | vmx->rmode.irq.vector;
3586 }
3587
3588 #ifdef CONFIG_X86_64
3589 #define R "r"
3590 #define Q "q"
3591 #else
3592 #define R "e"
3593 #define Q "l"
3594 #endif
3595
3596 static void vmx_vcpu_run(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run)
3597 {
3598         struct vcpu_vmx *vmx = to_vmx(vcpu);
3599
3600         if (enable_ept && is_paging(vcpu)) {
3601                 vmcs_writel(GUEST_CR3, vcpu->arch.cr3);
3602                 ept_load_pdptrs(vcpu);
3603         }
3604         /* Record the guest's net vcpu time for enforced NMI injections. */
3605         if (unlikely(!cpu_has_virtual_nmis() && vmx->soft_vnmi_blocked))
3606                 vmx->entry_time = ktime_get();
3607
3608         /* Handle invalid guest state instead of entering VMX */
3609         if (vmx->emulation_required && emulate_invalid_guest_state) {
3610                 handle_invalid_guest_state(vcpu, kvm_run);
3611                 return;
3612         }
3613
3614         if (test_bit(VCPU_REGS_RSP, (unsigned long *)&vcpu->arch.regs_dirty))
3615                 vmcs_writel(GUEST_RSP, vcpu->arch.regs[VCPU_REGS_RSP]);
3616         if (test_bit(VCPU_REGS_RIP, (unsigned long *)&vcpu->arch.regs_dirty))
3617                 vmcs_writel(GUEST_RIP, vcpu->arch.regs[VCPU_REGS_RIP]);
3618
3619         /* When single-stepping over STI and MOV SS, we must clear the
3620          * corresponding interruptibility bits in the guest state. Otherwise
3621          * vmentry fails as it then expects bit 14 (BS) in pending debug
3622          * exceptions being set, but that's not correct for the guest debugging
3623          * case. */
3624         if (vcpu->guest_debug & KVM_GUESTDBG_SINGLESTEP)
3625                 vmx_set_interrupt_shadow(vcpu, 0);
3626
3627         /*
3628          * Loading guest fpu may have cleared host cr0.ts
3629          */
3630         vmcs_writel(HOST_CR0, read_cr0());
3631
3632         set_debugreg(vcpu->arch.dr6, 6);
3633
3634         asm(
3635                 /* Store host registers */
3636                 "push %%"R"dx; push %%"R"bp;"
3637                 "push %%"R"cx \n\t"
3638                 "cmp %%"R"sp, %c[host_rsp](%0) \n\t"
3639                 "je 1f \n\t"
3640                 "mov %%"R"sp, %c[host_rsp](%0) \n\t"
3641                 __ex(ASM_VMX_VMWRITE_RSP_RDX) "\n\t"
3642                 "1: \n\t"
3643                 /* Reload cr2 if changed */
3644                 "mov %c[cr2](%0), %%"R"ax \n\t"
3645                 "mov %%cr2, %%"R"dx \n\t"
3646                 "cmp %%"R"ax, %%"R"dx \n\t"
3647                 "je 2f \n\t"
3648                 "mov %%"R"ax, %%cr2 \n\t"
3649                 "2: \n\t"
3650                 /* Check if vmlaunch of vmresume is needed */
3651                 "cmpl $0, %c[launched](%0) \n\t"
3652                 /* Load guest registers.  Don't clobber flags. */
3653                 "mov %c[rax](%0), %%"R"ax \n\t"
3654                 "mov %c[rbx](%0), %%"R"bx \n\t"
3655                 "mov %c[rdx](%0), %%"R"dx \n\t"
3656                 "mov %c[rsi](%0), %%"R"si \n\t"
3657                 "mov %c[rdi](%0), %%"R"di \n\t"
3658                 "mov %c[rbp](%0), %%"R"bp \n\t"
3659 #ifdef CONFIG_X86_64
3660                 "mov %c[r8](%0),  %%r8  \n\t"
3661                 "mov %c[r9](%0),  %%r9  \n\t"
3662                 "mov %c[r10](%0), %%r10 \n\t"
3663                 "mov %c[r11](%0), %%r11 \n\t"
3664                 "mov %c[r12](%0), %%r12 \n\t"
3665                 "mov %c[r13](%0), %%r13 \n\t"
3666                 "mov %c[r14](%0), %%r14 \n\t"
3667                 "mov %c[r15](%0), %%r15 \n\t"
3668 #endif
3669                 "mov %c[rcx](%0), %%"R"cx \n\t" /* kills %0 (ecx) */
3670
3671                 /* Enter guest mode */
3672                 "jne .Llaunched \n\t"
3673                 __ex(ASM_VMX_VMLAUNCH) "\n\t"
3674                 "jmp .Lkvm_vmx_return \n\t"
3675                 ".Llaunched: " __ex(ASM_VMX_VMRESUME) "\n\t"
3676                 ".Lkvm_vmx_return: "
3677                 /* Save guest registers, load host registers, keep flags */
3678                 "xchg %0,     (%%"R"sp) \n\t"
3679                 "mov %%"R"ax, %c[rax](%0) \n\t"
3680                 "mov %%"R"bx, %c[rbx](%0) \n\t"
3681                 "push"Q" (%%"R"sp); pop"Q" %c[rcx](%0) \n\t"
3682                 "mov %%"R"dx, %c[rdx](%0) \n\t"
3683                 "mov %%"R"si, %c[rsi](%0) \n\t"
3684                 "mov %%"R"di, %c[rdi](%0) \n\t"
3685                 "mov %%"R"bp, %c[rbp](%0) \n\t"
3686 #ifdef CONFIG_X86_64
3687                 "mov %%r8,  %c[r8](%0) \n\t"
3688                 "mov %%r9,  %c[r9](%0) \n\t"
3689                 "mov %%r10, %c[r10](%0) \n\t"
3690                 "mov %%r11, %c[r11](%0) \n\t"
3691                 "mov %%r12, %c[r12](%0) \n\t"
3692                 "mov %%r13, %c[r13](%0) \n\t"
3693                 "mov %%r14, %c[r14](%0) \n\t"
3694                 "mov %%r15, %c[r15](%0) \n\t"
3695 #endif
3696                 "mov %%cr2, %%"R"ax   \n\t"
3697                 "mov %%"R"ax, %c[cr2](%0) \n\t"
3698
3699                 "pop  %%"R"bp; pop  %%"R"bp; pop  %%"R"dx \n\t"
3700                 "setbe %c[fail](%0) \n\t"
3701               : : "c"(vmx), "d"((unsigned long)HOST_RSP),
3702                 [launched]"i"(offsetof(struct vcpu_vmx, launched)),
3703                 [fail]"i"(offsetof(struct vcpu_vmx, fail)),
3704                 [host_rsp]"i"(offsetof(struct vcpu_vmx, host_rsp)),
3705                 [rax]"i"(offsetof(struct vcpu_vmx, vcpu.arch.regs[VCPU_REGS_RAX])),
3706                 [rbx]"i"(offsetof(struct vcpu_vmx, vcpu.arch.regs[VCPU_REGS_RBX])),
3707                 [rcx]"i"(offsetof(struct vcpu_vmx, vcpu.arch.regs[VCPU_REGS_RCX])),
3708                 [rdx]"i"(offsetof(struct vcpu_vmx, vcpu.arch.regs[VCPU_REGS_RDX])),
3709                 [rsi]"i"(offsetof(struct vcpu_vmx, vcpu.arch.regs[VCPU_REGS_RSI])),
3710                 [rdi]"i"(offsetof(struct vcpu_vmx, vcpu.arch.regs[VCPU_REGS_RDI])),
3711                 [rbp]"i"(offsetof(struct vcpu_vmx, vcpu.arch.regs[VCPU_REGS_RBP])),
3712 #ifdef CONFIG_X86_64
3713                 [r8]"i"(offsetof(struct vcpu_vmx, vcpu.arch.regs[VCPU_REGS_R8])),
3714                 [r9]"i"(offsetof(struct vcpu_vmx, vcpu.arch.regs[VCPU_REGS_R9])),
3715                 [r10]"i"(offsetof(struct vcpu_vmx, vcpu.arch.regs[VCPU_REGS_R10])),
3716                 [r11]"i"(offsetof(struct vcpu_vmx, vcpu.arch.regs[VCPU_REGS_R11])),
3717                 [r12]"i"(offsetof(struct vcpu_vmx, vcpu.arch.regs[VCPU_REGS_R12])),
3718                 [r13]"i"(offsetof(struct vcpu_vmx, vcpu.arch.regs[VCPU_REGS_R13])),
3719                 [r14]"i"(offsetof(struct vcpu_vmx, vcpu.arch.regs[VCPU_REGS_R14])),
3720                 [r15]"i"(offsetof(struct vcpu_vmx, vcpu.arch.regs[VCPU_REGS_R15])),
3721 #endif
3722                 [cr2]"i"(offsetof(struct vcpu_vmx, vcpu.arch.cr2))
3723               : "cc", "memory"
3724                 , R"bx", R"di", R"si"
3725 #ifdef CONFIG_X86_64
3726                 , "r8", "r9", "r10", "r11", "r12", "r13", "r14", "r15"
3727 #endif
3728               );
3729
3730         vcpu->arch.regs_avail = ~((1 << VCPU_REGS_RIP) | (1 << VCPU_REGS_RSP)
3731                                   | (1 << VCPU_EXREG_PDPTR));
3732         vcpu->arch.regs_dirty = 0;
3733
3734         get_debugreg(vcpu->arch.dr6, 6);
3735
3736         vmx->idt_vectoring_info = vmcs_read32(IDT_VECTORING_INFO_FIELD);
3737         if (vmx->rmode.irq.pending)
3738                 fixup_rmode_irq(vmx);
3739
3740         asm("mov %0, %%ds; mov %0, %%es" : : "r"(__USER_DS));
3741         vmx->launched = 1;
3742
3743         vmx_complete_interrupts(vmx);
3744 }
3745
3746 #undef R
3747 #undef Q
3748
3749 static void vmx_free_vmcs(struct kvm_vcpu *vcpu)
3750 {
3751         struct vcpu_vmx *vmx = to_vmx(vcpu);
3752
3753         if (vmx->vmcs) {
3754                 vcpu_clear(vmx);
3755                 free_vmcs(vmx->vmcs);
3756                 vmx->vmcs = NULL;
3757         }
3758 }
3759
3760 static void vmx_free_vcpu(struct kvm_vcpu *vcpu)
3761 {
3762         struct vcpu_vmx *vmx = to_vmx(vcpu);
3763
3764         spin_lock(&vmx_vpid_lock);
3765         if (vmx->vpid != 0)
3766                 __clear_bit(vmx->vpid, vmx_vpid_bitmap);
3767         spin_unlock(&vmx_vpid_lock);
3768         vmx_free_vmcs(vcpu);
3769         kfree(vmx->host_msrs);
3770         kfree(vmx->guest_msrs);
3771         kvm_vcpu_uninit(vcpu);
3772         kmem_cache_free(kvm_vcpu_cache, vmx);
3773 }
3774
3775 static struct kvm_vcpu *vmx_create_vcpu(struct kvm *kvm, unsigned int id)
3776 {
3777         int err;
3778         struct vcpu_vmx *vmx = kmem_cache_zalloc(kvm_vcpu_cache, GFP_KERNEL);
3779         int cpu;
3780
3781         if (!vmx)
3782                 return ERR_PTR(-ENOMEM);
3783
3784         allocate_vpid(vmx);
3785
3786         err = kvm_vcpu_init(&vmx->vcpu, kvm, id);
3787         if (err)
3788                 goto free_vcpu;
3789
3790         vmx->guest_msrs = kmalloc(PAGE_SIZE, GFP_KERNEL);
3791         if (!vmx->guest_msrs) {
3792                 err = -ENOMEM;
3793                 goto uninit_vcpu;
3794         }
3795
3796         vmx->host_msrs = kmalloc(PAGE_SIZE, GFP_KERNEL);
3797         if (!vmx->host_msrs)
3798                 goto free_guest_msrs;
3799
3800         vmx->vmcs = alloc_vmcs();
3801         if (!vmx->vmcs)
3802                 goto free_msrs;
3803
3804         vmcs_clear(vmx->vmcs);
3805
3806         cpu = get_cpu();
3807         vmx_vcpu_load(&vmx->vcpu, cpu);
3808         err = vmx_vcpu_setup(vmx);
3809         vmx_vcpu_put(&vmx->vcpu);
3810         put_cpu();
3811         if (err)
3812                 goto free_vmcs;
3813         if (vm_need_virtualize_apic_accesses(kvm))
3814                 if (alloc_apic_access_page(kvm) != 0)
3815                         goto free_vmcs;
3816
3817         if (enable_ept)
3818                 if (alloc_identity_pagetable(kvm) != 0)
3819                         goto free_vmcs;
3820
3821         return &vmx->vcpu;
3822
3823 free_vmcs:
3824         free_vmcs(vmx->vmcs);
3825 free_msrs:
3826         kfree(vmx->host_msrs);
3827 free_guest_msrs:
3828         kfree(vmx->guest_msrs);
3829 uninit_vcpu:
3830         kvm_vcpu_uninit(&vmx->vcpu);
3831 free_vcpu:
3832         kmem_cache_free(kvm_vcpu_cache, vmx);
3833         return ERR_PTR(err);
3834 }
3835
3836 static void __init vmx_check_processor_compat(void *rtn)
3837 {
3838         struct vmcs_config vmcs_conf;
3839
3840         *(int *)rtn = 0;
3841         if (setup_vmcs_config(&vmcs_conf) < 0)
3842                 *(int *)rtn = -EIO;
3843         if (memcmp(&vmcs_config, &vmcs_conf, sizeof(struct vmcs_config)) != 0) {
3844                 printk(KERN_ERR "kvm: CPU %d feature inconsistency!\n",
3845                                 smp_processor_id());
3846                 *(int *)rtn = -EIO;
3847         }
3848 }
3849
3850 static int get_ept_level(void)
3851 {
3852         return VMX_EPT_DEFAULT_GAW + 1;
3853 }
3854
3855 static u64 vmx_get_mt_mask(struct kvm_vcpu *vcpu, gfn_t gfn, bool is_mmio)
3856 {
3857         u64 ret;
3858
3859         /* For VT-d and EPT combination
3860          * 1. MMIO: always map as UC
3861          * 2. EPT with VT-d:
3862          *   a. VT-d without snooping control feature: can't guarantee the
3863          *      result, try to trust guest.
3864          *   b. VT-d with snooping control feature: snooping control feature of
3865          *      VT-d engine can guarantee the cache correctness. Just set it
3866          *      to WB to keep consistent with host. So the same as item 3.
3867          * 3. EPT without VT-d: always map as WB and set IGMT=1 to keep
3868          *    consistent with host MTRR
3869          */
3870         if (is_mmio)
3871                 ret = MTRR_TYPE_UNCACHABLE << VMX_EPT_MT_EPTE_SHIFT;
3872         else if (vcpu->kvm->arch.iommu_domain &&
3873                 !(vcpu->kvm->arch.iommu_flags & KVM_IOMMU_CACHE_COHERENCY))
3874                 ret = kvm_get_guest_memory_type(vcpu, gfn) <<
3875                       VMX_EPT_MT_EPTE_SHIFT;
3876         else
3877                 ret = (MTRR_TYPE_WRBACK << VMX_EPT_MT_EPTE_SHIFT)
3878                         | VMX_EPT_IGMT_BIT;
3879
3880         return ret;
3881 }
3882
3883 static const struct trace_print_flags vmx_exit_reasons_str[] = {
3884         { EXIT_REASON_EXCEPTION_NMI,           "exception" },
3885         { EXIT_REASON_EXTERNAL_INTERRUPT,      "ext_irq" },
3886         { EXIT_REASON_TRIPLE_FAULT,            "triple_fault" },
3887         { EXIT_REASON_NMI_WINDOW,              "nmi_window" },
3888         { EXIT_REASON_IO_INSTRUCTION,          "io_instruction" },
3889         { EXIT_REASON_CR_ACCESS,               "cr_access" },
3890         { EXIT_REASON_DR_ACCESS,               "dr_access" },
3891         { EXIT_REASON_CPUID,                   "cpuid" },
3892         { EXIT_REASON_MSR_READ,                "rdmsr" },
3893         { EXIT_REASON_MSR_WRITE,               "wrmsr" },
3894         { EXIT_REASON_PENDING_INTERRUPT,       "interrupt_window" },
3895         { EXIT_REASON_HLT,                     "halt" },
3896         { EXIT_REASON_INVLPG,                  "invlpg" },
3897         { EXIT_REASON_VMCALL,                  "hypercall" },
3898         { EXIT_REASON_TPR_BELOW_THRESHOLD,     "tpr_below_thres" },
3899         { EXIT_REASON_APIC_ACCESS,             "apic_access" },
3900         { EXIT_REASON_WBINVD,                  "wbinvd" },
3901         { EXIT_REASON_TASK_SWITCH,             "task_switch" },
3902         { EXIT_REASON_EPT_VIOLATION,           "ept_violation" },
3903         { -1, NULL }
3904 };
3905
3906 static struct kvm_x86_ops vmx_x86_ops = {
3907         .cpu_has_kvm_support = cpu_has_kvm_support,
3908         .disabled_by_bios = vmx_disabled_by_bios,
3909         .hardware_setup = hardware_setup,
3910         .hardware_unsetup = hardware_unsetup,
3911         .check_processor_compatibility = vmx_check_processor_compat,
3912         .hardware_enable = hardware_enable,
3913         .hardware_disable = hardware_disable,
3914         .cpu_has_accelerated_tpr = report_flexpriority,
3915
3916         .vcpu_create = vmx_create_vcpu,
3917         .vcpu_free = vmx_free_vcpu,
3918         .vcpu_reset = vmx_vcpu_reset,
3919
3920         .prepare_guest_switch = vmx_save_host_state,
3921         .vcpu_load = vmx_vcpu_load,
3922         .vcpu_put = vmx_vcpu_put,
3923
3924         .set_guest_debug = set_guest_debug,
3925         .get_msr = vmx_get_msr,
3926         .set_msr = vmx_set_msr,
3927         .get_segment_base = vmx_get_segment_base,
3928         .get_segment = vmx_get_segment,
3929         .set_segment = vmx_set_segment,
3930         .get_cpl = vmx_get_cpl,
3931         .get_cs_db_l_bits = vmx_get_cs_db_l_bits,
3932         .decache_cr4_guest_bits = vmx_decache_cr4_guest_bits,
3933         .set_cr0 = vmx_set_cr0,
3934         .set_cr3 = vmx_set_cr3,
3935         .set_cr4 = vmx_set_cr4,
3936         .set_efer = vmx_set_efer,
3937         .get_idt = vmx_get_idt,
3938         .set_idt = vmx_set_idt,
3939         .get_gdt = vmx_get_gdt,
3940         .set_gdt = vmx_set_gdt,
3941         .cache_reg = vmx_cache_reg,
3942         .get_rflags = vmx_get_rflags,
3943         .set_rflags = vmx_set_rflags,
3944
3945         .tlb_flush = vmx_flush_tlb,
3946
3947         .run = vmx_vcpu_run,
3948         .handle_exit = vmx_handle_exit,
3949         .skip_emulated_instruction = skip_emulated_instruction,
3950         .set_interrupt_shadow = vmx_set_interrupt_shadow,
3951         .get_interrupt_shadow = vmx_get_interrupt_shadow,
3952         .patch_hypercall = vmx_patch_hypercall,
3953         .set_irq = vmx_inject_irq,
3954         .set_nmi = vmx_inject_nmi,
3955         .queue_exception = vmx_queue_exception,
3956         .interrupt_allowed = vmx_interrupt_allowed,
3957         .nmi_allowed = vmx_nmi_allowed,
3958         .enable_nmi_window = enable_nmi_window,
3959         .enable_irq_window = enable_irq_window,
3960         .update_cr8_intercept = update_cr8_intercept,
3961
3962         .set_tss_addr = vmx_set_tss_addr,
3963         .get_tdp_level = get_ept_level,
3964         .get_mt_mask = vmx_get_mt_mask,
3965
3966         .exit_reasons_str = vmx_exit_reasons_str,
3967 };
3968
3969 static int __init vmx_init(void)
3970 {
3971         int r;
3972
3973         vmx_io_bitmap_a = (unsigned long *)__get_free_page(GFP_KERNEL);
3974         if (!vmx_io_bitmap_a)
3975                 return -ENOMEM;
3976
3977         vmx_io_bitmap_b = (unsigned long *)__get_free_page(GFP_KERNEL);
3978         if (!vmx_io_bitmap_b) {
3979                 r = -ENOMEM;
3980                 goto out;
3981         }
3982
3983         vmx_msr_bitmap_legacy = (unsigned long *)__get_free_page(GFP_KERNEL);
3984         if (!vmx_msr_bitmap_legacy) {
3985                 r = -ENOMEM;
3986                 goto out1;
3987         }
3988
3989         vmx_msr_bitmap_longmode = (unsigned long *)__get_free_page(GFP_KERNEL);
3990         if (!vmx_msr_bitmap_longmode) {
3991                 r = -ENOMEM;
3992                 goto out2;
3993         }
3994
3995         /*
3996          * Allow direct access to the PC debug port (it is often used for I/O
3997          * delays, but the vmexits simply slow things down).
3998          */
3999         memset(vmx_io_bitmap_a, 0xff, PAGE_SIZE);
4000         clear_bit(0x80, vmx_io_bitmap_a);
4001
4002         memset(vmx_io_bitmap_b, 0xff, PAGE_SIZE);
4003
4004         memset(vmx_msr_bitmap_legacy, 0xff, PAGE_SIZE);
4005         memset(vmx_msr_bitmap_longmode, 0xff, PAGE_SIZE);
4006
4007         set_bit(0, vmx_vpid_bitmap); /* 0 is reserved for host */
4008
4009         r = kvm_init(&vmx_x86_ops, sizeof(struct vcpu_vmx), THIS_MODULE);
4010         if (r)
4011                 goto out3;
4012
4013         vmx_disable_intercept_for_msr(MSR_FS_BASE, false);
4014         vmx_disable_intercept_for_msr(MSR_GS_BASE, false);
4015         vmx_disable_intercept_for_msr(MSR_KERNEL_GS_BASE, true);
4016         vmx_disable_intercept_for_msr(MSR_IA32_SYSENTER_CS, false);
4017         vmx_disable_intercept_for_msr(MSR_IA32_SYSENTER_ESP, false);
4018         vmx_disable_intercept_for_msr(MSR_IA32_SYSENTER_EIP, false);
4019
4020         if (enable_ept) {
4021                 bypass_guest_pf = 0;
4022                 kvm_mmu_set_base_ptes(VMX_EPT_READABLE_MASK |
4023                         VMX_EPT_WRITABLE_MASK);
4024                 kvm_mmu_set_mask_ptes(0ull, 0ull, 0ull, 0ull,
4025                                 VMX_EPT_EXECUTABLE_MASK);
4026                 kvm_enable_tdp();
4027         } else
4028                 kvm_disable_tdp();
4029
4030         if (bypass_guest_pf)
4031                 kvm_mmu_set_nonpresent_ptes(~0xffeull, 0ull);
4032
4033         ept_sync_global();
4034
4035         return 0;
4036
4037 out3:
4038         free_page((unsigned long)vmx_msr_bitmap_longmode);
4039 out2:
4040         free_page((unsigned long)vmx_msr_bitmap_legacy);
4041 out1:
4042         free_page((unsigned long)vmx_io_bitmap_b);
4043 out:
4044         free_page((unsigned long)vmx_io_bitmap_a);
4045         return r;
4046 }
4047
4048 static void __exit vmx_exit(void)
4049 {
4050         free_page((unsigned long)vmx_msr_bitmap_legacy);
4051         free_page((unsigned long)vmx_msr_bitmap_longmode);
4052         free_page((unsigned long)vmx_io_bitmap_b);
4053         free_page((unsigned long)vmx_io_bitmap_a);
4054
4055         kvm_exit();
4056 }
4057
4058 module_init(vmx_init)
4059 module_exit(vmx_exit)