KVM: VMX: trace clts and lmsw instructions as cr accesses
[safe/jmp/linux-2.6] / arch / x86 / kvm / vmx.c
index 686e1ab..7c7b2ee 100644 (file)
@@ -61,12 +61,52 @@ module_param_named(unrestricted_guest,
 static int __read_mostly emulate_invalid_guest_state = 0;
 module_param(emulate_invalid_guest_state, bool, S_IRUGO);
 
+#define KVM_GUEST_CR0_MASK_UNRESTRICTED_GUEST                          \
+       (X86_CR0_WP | X86_CR0_NE | X86_CR0_NW | X86_CR0_CD)
+#define KVM_GUEST_CR0_MASK                                             \
+       (KVM_GUEST_CR0_MASK_UNRESTRICTED_GUEST | X86_CR0_PG | X86_CR0_PE)
+#define KVM_VM_CR0_ALWAYS_ON_UNRESTRICTED_GUEST                                \
+       (X86_CR0_WP | X86_CR0_NE | X86_CR0_TS | X86_CR0_MP)
+#define KVM_VM_CR0_ALWAYS_ON                                           \
+       (KVM_VM_CR0_ALWAYS_ON_UNRESTRICTED_GUEST | X86_CR0_PG | X86_CR0_PE)
+#define KVM_CR4_GUEST_OWNED_BITS                                     \
+       (X86_CR4_PVI | X86_CR4_DE | X86_CR4_PCE | X86_CR4_OSFXSR      \
+        | X86_CR4_OSXMMEXCPT)
+
+#define KVM_PMODE_VM_CR4_ALWAYS_ON (X86_CR4_PAE | X86_CR4_VMXE)
+#define KVM_RMODE_VM_CR4_ALWAYS_ON (X86_CR4_VME | X86_CR4_PAE | X86_CR4_VMXE)
+
+/*
+ * These 2 parameters are used to config the controls for Pause-Loop Exiting:
+ * ple_gap:    upper bound on the amount of time between two successive
+ *             executions of PAUSE in a loop. Also indicate if ple enabled.
+ *             According to test, this time is usually small than 41 cycles.
+ * ple_window: upper bound on the amount of time a guest is allowed to execute
+ *             in a PAUSE loop. Tests indicate that most spinlocks are held for
+ *             less than 2^12 cycles
+ * Time is measured based on a counter that runs at the same rate as the TSC,
+ * refer SDM volume 3b section 21.6.13 & 22.1.3.
+ */
+#define KVM_VMX_DEFAULT_PLE_GAP    41
+#define KVM_VMX_DEFAULT_PLE_WINDOW 4096
+static int ple_gap = KVM_VMX_DEFAULT_PLE_GAP;
+module_param(ple_gap, int, S_IRUGO);
+
+static int ple_window = KVM_VMX_DEFAULT_PLE_WINDOW;
+module_param(ple_window, int, S_IRUGO);
+
 struct vmcs {
        u32 revision_id;
        u32 abort;
        char data[0];
 };
 
+struct shared_msr_entry {
+       unsigned index;
+       u64 data;
+       u64 mask;
+};
+
 struct vcpu_vmx {
        struct kvm_vcpu       vcpu;
        struct list_head      local_vcpus_link;
@@ -74,13 +114,12 @@ struct vcpu_vmx {
        int                   launched;
        u8                    fail;
        u32                   idt_vectoring_info;
-       struct kvm_msr_entry *guest_msrs;
-       struct kvm_msr_entry *host_msrs;
+       struct shared_msr_entry *guest_msrs;
        int                   nmsrs;
        int                   save_nmsrs;
-       int                   msr_offset_efer;
 #ifdef CONFIG_X86_64
-       int                   msr_offset_kernel_gs_base;
+       u64                   msr_host_kernel_gs_base;
+       u64                   msr_guest_kernel_gs_base;
 #endif
        struct vmcs          *vmcs;
        struct {
@@ -88,7 +127,6 @@ struct vcpu_vmx {
                u16           fs_sel, gs_sel, ldt_sel;
                int           gs_ldt_reload_needed;
                int           fs_reload_needed;
-               int           guest_efer_loaded;
        } host_state;
        struct {
                int vm86_active;
@@ -107,13 +145,14 @@ struct vcpu_vmx {
        } rmode;
        int vpid;
        bool emulation_required;
-       enum emulation_result invalid_state_emulation_result;
 
        /* Support for vnmi-less CPUs */
        int soft_vnmi_blocked;
        ktime_t entry_time;
        s64 vnmi_blocked_time;
        u32 exit_reason;
+
+       bool rdtscp_enabled;
 };
 
 static inline struct vcpu_vmx *to_vmx(struct kvm_vcpu *vcpu)
@@ -176,6 +215,8 @@ static struct kvm_vmx_segment_field {
        VMX_SEGMENT_FIELD(LDTR),
 };
 
+static u64 host_efer;
+
 static void ept_save_pdptrs(struct kvm_vcpu *vcpu);
 
 /*
@@ -184,28 +225,12 @@ static void ept_save_pdptrs(struct kvm_vcpu *vcpu);
  */
 static const u32 vmx_msr_index[] = {
 #ifdef CONFIG_X86_64
-       MSR_SYSCALL_MASK, MSR_LSTAR, MSR_CSTAR, MSR_KERNEL_GS_BASE,
+       MSR_SYSCALL_MASK, MSR_LSTAR, MSR_CSTAR,
 #endif
-       MSR_EFER, MSR_K6_STAR,
+       MSR_EFER, MSR_TSC_AUX, MSR_K6_STAR,
 };
 #define NR_VMX_MSR ARRAY_SIZE(vmx_msr_index)
 
-static void load_msrs(struct kvm_msr_entry *e, int n)
-{
-       int i;
-
-       for (i = 0; i < n; ++i)
-               wrmsrl(e[i].index, e[i].data);
-}
-
-static void save_msrs(struct kvm_msr_entry *e, int n)
-{
-       int i;
-
-       for (i = 0; i < n; ++i)
-               rdmsrl(e[i].index, e[i].data);
-}
-
 static inline int is_page_fault(u32 intr_info)
 {
        return (intr_info & (INTR_INFO_INTR_TYPE_MASK | INTR_INFO_VECTOR_MASK |
@@ -293,6 +318,11 @@ static inline bool cpu_has_vmx_ept_2m_page(void)
        return !!(vmx_capability.ept & VMX_EPT_2MB_PAGE_BIT);
 }
 
+static inline bool cpu_has_vmx_ept_1g_page(void)
+{
+       return !!(vmx_capability.ept & VMX_EPT_1GB_PAGE_BIT);
+}
+
 static inline int cpu_has_vmx_invept_individual_addr(void)
 {
        return !!(vmx_capability.ept & VMX_EPT_EXTENT_INDIVIDUAL_BIT);
@@ -320,6 +350,12 @@ static inline int cpu_has_vmx_unrestricted_guest(void)
                SECONDARY_EXEC_UNRESTRICTED_GUEST;
 }
 
+static inline int cpu_has_vmx_ple(void)
+{
+       return vmcs_config.cpu_based_2nd_exec_ctrl &
+               SECONDARY_EXEC_PAUSE_LOOP_EXITING;
+}
+
 static inline int vm_need_virtualize_apic_accesses(struct kvm *kvm)
 {
        return flexpriority_enabled &&
@@ -333,6 +369,12 @@ static inline int cpu_has_vmx_vpid(void)
                SECONDARY_EXEC_ENABLE_VPID;
 }
 
+static inline int cpu_has_vmx_rdtscp(void)
+{
+       return vmcs_config.cpu_based_2nd_exec_ctrl &
+               SECONDARY_EXEC_RDTSCP;
+}
+
 static inline int cpu_has_virtual_nmis(void)
 {
        return vmcs_config.pin_based_exec_ctrl & PIN_BASED_VIRTUAL_NMIS;
@@ -348,7 +390,7 @@ static int __find_msr_index(struct vcpu_vmx *vmx, u32 msr)
        int i;
 
        for (i = 0; i < vmx->nmsrs; ++i)
-               if (vmx->guest_msrs[i].index == msr)
+               if (vmx_msr_index[vmx->guest_msrs[i].index] == msr)
                        return i;
        return -1;
 }
@@ -379,7 +421,7 @@ static inline void __invept(int ext, u64 eptp, gpa_t gpa)
                        : : "a" (&operand), "c" (ext) : "cc", "memory");
 }
 
-static struct kvm_msr_entry *find_msr_entry(struct vcpu_vmx *vmx, u32 msr)
+static struct shared_msr_entry *find_msr_entry(struct vcpu_vmx *vmx, u32 msr)
 {
        int i;
 
@@ -540,10 +582,12 @@ static void update_exception_bitmap(struct kvm_vcpu *vcpu)
        eb = (1u << PF_VECTOR) | (1u << UD_VECTOR) | (1u << MC_VECTOR);
        if (!vcpu->fpu_active)
                eb |= 1u << NM_VECTOR;
+       /*
+        * Unconditionally intercept #DB so we can maintain dr6 without
+        * reading it every exit.
+        */
+       eb |= 1u << DB_VECTOR;
        if (vcpu->guest_debug & KVM_GUESTDBG_ENABLE) {
-               if (vcpu->guest_debug &
-                   (KVM_GUESTDBG_SINGLESTEP | KVM_GUESTDBG_USE_HW_BP))
-                       eb |= 1u << DB_VECTOR;
                if (vcpu->guest_debug & KVM_GUESTDBG_USE_SW_BP)
                        eb |= 1u << BP_VECTOR;
        }
@@ -568,15 +612,13 @@ static void reload_tss(void)
        load_TR_desc();
 }
 
-static void load_transition_efer(struct vcpu_vmx *vmx)
+static bool update_transition_efer(struct vcpu_vmx *vmx, int efer_offset)
 {
-       int efer_offset = vmx->msr_offset_efer;
-       u64 host_efer = vmx->host_msrs[efer_offset].data;
-       u64 guest_efer = vmx->guest_msrs[efer_offset].data;
+       u64 guest_efer;
        u64 ignore_bits;
 
-       if (efer_offset < 0)
-               return;
+       guest_efer = vmx->vcpu.arch.shadow_efer;
+
        /*
         * NX is emulated; LMA and LME handled by hardware; SCE meaninless
         * outside long mode
@@ -588,27 +630,17 @@ static void load_transition_efer(struct vcpu_vmx *vmx)
        if (guest_efer & EFER_LMA)
                ignore_bits &= ~(u64)EFER_SCE;
 #endif
-       if ((guest_efer & ~ignore_bits) == (host_efer & ~ignore_bits))
-               return;
-
-       vmx->host_state.guest_efer_loaded = 1;
        guest_efer &= ~ignore_bits;
        guest_efer |= host_efer & ignore_bits;
-       wrmsrl(MSR_EFER, guest_efer);
-       vmx->vcpu.stat.efer_reload++;
-}
-
-static void reload_host_efer(struct vcpu_vmx *vmx)
-{
-       if (vmx->host_state.guest_efer_loaded) {
-               vmx->host_state.guest_efer_loaded = 0;
-               load_msrs(vmx->host_msrs + vmx->msr_offset_efer, 1);
-       }
+       vmx->guest_msrs[efer_offset].data = guest_efer;
+       vmx->guest_msrs[efer_offset].mask = ~ignore_bits;
+       return true;
 }
 
 static void vmx_save_host_state(struct kvm_vcpu *vcpu)
 {
        struct vcpu_vmx *vmx = to_vmx(vcpu);
+       int i;
 
        if (vmx->host_state.loaded)
                return;
@@ -645,13 +677,15 @@ static void vmx_save_host_state(struct kvm_vcpu *vcpu)
 #endif
 
 #ifdef CONFIG_X86_64
-       if (is_long_mode(&vmx->vcpu))
-               save_msrs(vmx->host_msrs +
-                         vmx->msr_offset_kernel_gs_base, 1);
-
+       if (is_long_mode(&vmx->vcpu)) {
+               rdmsrl(MSR_KERNEL_GS_BASE, vmx->msr_host_kernel_gs_base);
+               wrmsrl(MSR_KERNEL_GS_BASE, vmx->msr_guest_kernel_gs_base);
+       }
 #endif
-       load_msrs(vmx->guest_msrs, vmx->save_nmsrs);
-       load_transition_efer(vmx);
+       for (i = 0; i < vmx->save_nmsrs; ++i)
+               kvm_set_shared_msr(vmx->guest_msrs[i].index,
+                                  vmx->guest_msrs[i].data,
+                                  vmx->guest_msrs[i].mask);
 }
 
 static void __vmx_load_host_state(struct vcpu_vmx *vmx)
@@ -679,9 +713,12 @@ static void __vmx_load_host_state(struct vcpu_vmx *vmx)
                local_irq_restore(flags);
        }
        reload_tss();
-       save_msrs(vmx->guest_msrs, vmx->save_nmsrs);
-       load_msrs(vmx->host_msrs, vmx->save_nmsrs);
-       reload_host_efer(vmx);
+#ifdef CONFIG_X86_64
+       if (is_long_mode(&vmx->vcpu)) {
+               rdmsrl(MSR_KERNEL_GS_BASE, vmx->msr_guest_kernel_gs_base);
+               wrmsrl(MSR_KERNEL_GS_BASE, vmx->msr_host_kernel_gs_base);
+       }
+#endif
 }
 
 static void vmx_load_host_state(struct vcpu_vmx *vmx)
@@ -704,7 +741,7 @@ static void vmx_vcpu_load(struct kvm_vcpu *vcpu, int cpu)
        if (vcpu->cpu != cpu) {
                vcpu_clear(vmx);
                kvm_migrate_timers(vcpu);
-               vpid_sync_vcpu_all(vmx);
+               set_bit(KVM_REQ_TLB_FLUSH, &vcpu->requests);
                local_irq_disable();
                list_add(&vmx->local_vcpus_link,
                         &per_cpu(vcpus_on_cpu, cpu));
@@ -778,7 +815,12 @@ static void vmx_fpu_deactivate(struct kvm_vcpu *vcpu)
 
 static unsigned long vmx_get_rflags(struct kvm_vcpu *vcpu)
 {
-       return vmcs_readl(GUEST_RFLAGS);
+       unsigned long rflags;
+
+       rflags = vmcs_readl(GUEST_RFLAGS);
+       if (to_vmx(vcpu)->rmode.vm86_active)
+               rflags &= ~(unsigned long)(X86_EFLAGS_IOPL | X86_EFLAGS_VM);
+       return rflags;
 }
 
 static void vmx_set_rflags(struct kvm_vcpu *vcpu, unsigned long rflags)
@@ -864,22 +906,22 @@ static void vmx_queue_exception(struct kvm_vcpu *vcpu, unsigned nr,
        vmcs_write32(VM_ENTRY_INTR_INFO_FIELD, intr_info);
 }
 
+static bool vmx_rdtscp_supported(void)
+{
+       return cpu_has_vmx_rdtscp();
+}
+
 /*
  * Swap MSR entry in host/guest MSR entry array.
  */
-#ifdef CONFIG_X86_64
 static void move_msr_up(struct vcpu_vmx *vmx, int from, int to)
 {
-       struct kvm_msr_entry tmp;
+       struct shared_msr_entry tmp;
 
        tmp = vmx->guest_msrs[to];
        vmx->guest_msrs[to] = vmx->guest_msrs[from];
        vmx->guest_msrs[from] = tmp;
-       tmp = vmx->host_msrs[to];
-       vmx->host_msrs[to] = vmx->host_msrs[from];
-       vmx->host_msrs[from] = tmp;
 }
-#endif
 
 /*
  * Set up the vmcs to automatically save and restore system
@@ -888,15 +930,13 @@ static void move_msr_up(struct vcpu_vmx *vmx, int from, int to)
  */
 static void setup_msrs(struct vcpu_vmx *vmx)
 {
-       int save_nmsrs;
+       int save_nmsrs, index;
        unsigned long *msr_bitmap;
 
        vmx_load_host_state(vmx);
        save_nmsrs = 0;
 #ifdef CONFIG_X86_64
        if (is_long_mode(&vmx->vcpu)) {
-               int index;
-
                index = __find_msr_index(vmx, MSR_SYSCALL_MASK);
                if (index >= 0)
                        move_msr_up(vmx, index, save_nmsrs++);
@@ -906,8 +946,8 @@ static void setup_msrs(struct vcpu_vmx *vmx)
                index = __find_msr_index(vmx, MSR_CSTAR);
                if (index >= 0)
                        move_msr_up(vmx, index, save_nmsrs++);
-               index = __find_msr_index(vmx, MSR_KERNEL_GS_BASE);
-               if (index >= 0)
+               index = __find_msr_index(vmx, MSR_TSC_AUX);
+               if (index >= 0 && vmx->rdtscp_enabled)
                        move_msr_up(vmx, index, save_nmsrs++);
                /*
                 * MSR_K6_STAR is only needed on long mode guests, and only
@@ -918,13 +958,11 @@ static void setup_msrs(struct vcpu_vmx *vmx)
                        move_msr_up(vmx, index, save_nmsrs++);
        }
 #endif
-       vmx->save_nmsrs = save_nmsrs;
+       index = __find_msr_index(vmx, MSR_EFER);
+       if (index >= 0 && update_transition_efer(vmx, index))
+               move_msr_up(vmx, index, save_nmsrs++);
 
-#ifdef CONFIG_X86_64
-       vmx->msr_offset_kernel_gs_base =
-               __find_msr_index(vmx, MSR_KERNEL_GS_BASE);
-#endif
-       vmx->msr_offset_efer = __find_msr_index(vmx, MSR_EFER);
+       vmx->save_nmsrs = save_nmsrs;
 
        if (cpu_has_vmx_msr_bitmap()) {
                if (is_long_mode(&vmx->vcpu))
@@ -966,7 +1004,7 @@ static void guest_write_tsc(u64 guest_tsc, u64 host_tsc)
 static int vmx_get_msr(struct kvm_vcpu *vcpu, u32 msr_index, u64 *pdata)
 {
        u64 data;
-       struct kvm_msr_entry *msr;
+       struct shared_msr_entry *msr;
 
        if (!pdata) {
                printk(KERN_ERR "BUG: get_msr called with NULL pdata\n");
@@ -981,9 +1019,13 @@ static int vmx_get_msr(struct kvm_vcpu *vcpu, u32 msr_index, u64 *pdata)
        case MSR_GS_BASE:
                data = vmcs_readl(GUEST_GS_BASE);
                break;
+       case MSR_KERNEL_GS_BASE:
+               vmx_load_host_state(to_vmx(vcpu));
+               data = to_vmx(vcpu)->msr_guest_kernel_gs_base;
+               break;
+#endif
        case MSR_EFER:
                return kvm_get_msr_common(vcpu, msr_index, pdata);
-#endif
        case MSR_IA32_TSC:
                data = guest_read_tsc();
                break;
@@ -996,10 +1038,15 @@ static int vmx_get_msr(struct kvm_vcpu *vcpu, u32 msr_index, u64 *pdata)
        case MSR_IA32_SYSENTER_ESP:
                data = vmcs_readl(GUEST_SYSENTER_ESP);
                break;
+       case MSR_TSC_AUX:
+               if (!to_vmx(vcpu)->rdtscp_enabled)
+                       return 1;
+               /* Otherwise falls through */
        default:
                vmx_load_host_state(to_vmx(vcpu));
                msr = find_msr_entry(to_vmx(vcpu), msr_index);
                if (msr) {
+                       vmx_load_host_state(to_vmx(vcpu));
                        data = msr->data;
                        break;
                }
@@ -1018,7 +1065,7 @@ static int vmx_get_msr(struct kvm_vcpu *vcpu, u32 msr_index, u64 *pdata)
 static int vmx_set_msr(struct kvm_vcpu *vcpu, u32 msr_index, u64 data)
 {
        struct vcpu_vmx *vmx = to_vmx(vcpu);
-       struct kvm_msr_entry *msr;
+       struct shared_msr_entry *msr;
        u64 host_tsc;
        int ret = 0;
 
@@ -1034,6 +1081,10 @@ static int vmx_set_msr(struct kvm_vcpu *vcpu, u32 msr_index, u64 data)
        case MSR_GS_BASE:
                vmcs_writel(GUEST_GS_BASE, data);
                break;
+       case MSR_KERNEL_GS_BASE:
+               vmx_load_host_state(vmx);
+               vmx->msr_guest_kernel_gs_base = data;
+               break;
 #endif
        case MSR_IA32_SYSENTER_CS:
                vmcs_write32(GUEST_SYSENTER_CS, data);
@@ -1054,11 +1105,19 @@ static int vmx_set_msr(struct kvm_vcpu *vcpu, u32 msr_index, u64 data)
                        vcpu->arch.pat = data;
                        break;
                }
-               /* Otherwise falls through to kvm_set_msr_common */
+               ret = kvm_set_msr_common(vcpu, msr_index, data);
+               break;
+       case MSR_TSC_AUX:
+               if (!vmx->rdtscp_enabled)
+                       return 1;
+               /* Check reserved bit, higher 32 bits should be zero */
+               if ((data >> 32) != 0)
+                       return 1;
+               /* Otherwise falls through */
        default:
-               vmx_load_host_state(vmx);
                msr = find_msr_entry(vmx, msr_index);
                if (msr) {
+                       vmx_load_host_state(vmx);
                        msr->data = data;
                        break;
                }
@@ -1087,30 +1146,14 @@ static void vmx_cache_reg(struct kvm_vcpu *vcpu, enum kvm_reg reg)
        }
 }
 
-static int set_guest_debug(struct kvm_vcpu *vcpu, struct kvm_guest_debug *dbg)
+static void set_guest_debug(struct kvm_vcpu *vcpu, struct kvm_guest_debug *dbg)
 {
-       int old_debug = vcpu->guest_debug;
-       unsigned long flags;
-
-       vcpu->guest_debug = dbg->control;
-       if (!(vcpu->guest_debug & KVM_GUESTDBG_ENABLE))
-               vcpu->guest_debug = 0;
-
        if (vcpu->guest_debug & KVM_GUESTDBG_USE_HW_BP)
                vmcs_writel(GUEST_DR7, dbg->arch.debugreg[7]);
        else
                vmcs_writel(GUEST_DR7, vcpu->arch.dr7);
 
-       flags = vmcs_readl(GUEST_RFLAGS);
-       if (vcpu->guest_debug & KVM_GUESTDBG_SINGLESTEP)
-               flags |= X86_EFLAGS_TF | X86_EFLAGS_RF;
-       else if (old_debug & KVM_GUESTDBG_SINGLESTEP)
-               flags &= ~(X86_EFLAGS_TF | X86_EFLAGS_RF);
-       vmcs_writel(GUEST_RFLAGS, flags);
-
        update_exception_bitmap(vcpu);
-
-       return 0;
 }
 
 static __init int cpu_has_kvm_support(void)
@@ -1129,12 +1172,15 @@ static __init int vmx_disabled_by_bios(void)
        /* locked but not enabled */
 }
 
-static void hardware_enable(void *garbage)
+static int hardware_enable(void *garbage)
 {
        int cpu = raw_smp_processor_id();
        u64 phys_addr = __pa(per_cpu(vmxarea, cpu));
        u64 old;
 
+       if (read_cr4() & X86_CR4_VMXE)
+               return -EBUSY;
+
        INIT_LIST_HEAD(&per_cpu(vcpus_on_cpu, cpu));
        rdmsrl(MSR_IA32_FEATURE_CONTROL, old);
        if ((old & (FEATURE_CONTROL_LOCKED |
@@ -1149,6 +1195,10 @@ static void hardware_enable(void *garbage)
        asm volatile (ASM_VMX_VMXON_RAX
                      : : "a"(&phys_addr), "m"(phys_addr)
                      : "memory", "cc");
+
+       ept_sync_global();
+
+       return 0;
 }
 
 static void vmclear_local_vcpus(void)
@@ -1222,6 +1272,8 @@ static __init int setup_vmcs_config(struct vmcs_config *vmcs_conf)
              CPU_BASED_USE_IO_BITMAPS |
              CPU_BASED_MOV_DR_EXITING |
              CPU_BASED_USE_TSC_OFFSETING |
+             CPU_BASED_MWAIT_EXITING |
+             CPU_BASED_MONITOR_EXITING |
              CPU_BASED_INVLPG_EXITING;
        opt = CPU_BASED_TPR_SHADOW |
              CPU_BASED_USE_MSR_BITMAPS |
@@ -1240,7 +1292,9 @@ static __init int setup_vmcs_config(struct vmcs_config *vmcs_conf)
                        SECONDARY_EXEC_WBINVD_EXITING |
                        SECONDARY_EXEC_ENABLE_VPID |
                        SECONDARY_EXEC_ENABLE_EPT |
-                       SECONDARY_EXEC_UNRESTRICTED_GUEST;
+                       SECONDARY_EXEC_UNRESTRICTED_GUEST |
+                       SECONDARY_EXEC_PAUSE_LOOP_EXITING |
+                       SECONDARY_EXEC_RDTSCP;
                if (adjust_vmx_controls(min2, opt2,
                                        MSR_IA32_VMX_PROCBASED_CTLS2,
                                        &_cpu_based_2nd_exec_control) < 0)
@@ -1254,12 +1308,9 @@ static __init int setup_vmcs_config(struct vmcs_config *vmcs_conf)
        if (_cpu_based_2nd_exec_control & SECONDARY_EXEC_ENABLE_EPT) {
                /* CR3 accesses and invlpg don't need to cause VM Exits when EPT
                   enabled */
-               min &= ~(CPU_BASED_CR3_LOAD_EXITING |
-                        CPU_BASED_CR3_STORE_EXITING |
-                        CPU_BASED_INVLPG_EXITING);
-               if (adjust_vmx_controls(min, opt, MSR_IA32_VMX_PROCBASED_CTLS,
-                                       &_cpu_based_exec_control) < 0)
-                       return -EIO;
+               _cpu_based_exec_control &= ~(CPU_BASED_CR3_LOAD_EXITING |
+                                            CPU_BASED_CR3_STORE_EXITING |
+                                            CPU_BASED_INVLPG_EXITING);
                rdmsr(MSR_IA32_VMX_EPT_VPID_CAP,
                      vmx_capability.ept, vmx_capability.vpid);
        }
@@ -1337,15 +1388,17 @@ static void free_kvm_area(void)
 {
        int cpu;
 
-       for_each_online_cpu(cpu)
+       for_each_possible_cpu(cpu) {
                free_vmcs(per_cpu(vmxarea, cpu));
+               per_cpu(vmxarea, cpu) = NULL;
+       }
 }
 
 static __init int alloc_kvm_area(void)
 {
        int cpu;
 
-       for_each_online_cpu(cpu) {
+       for_each_possible_cpu(cpu) {
                struct vmcs *vmcs;
 
                vmcs = alloc_vmcs_cpu(cpu);
@@ -1387,6 +1440,9 @@ static __init int hardware_setup(void)
        if (enable_ept && !cpu_has_vmx_ept_2m_page())
                kvm_disable_largepages();
 
+       if (!cpu_has_vmx_ple())
+               ple_gap = 0;
+
        return alloc_kvm_area();
 }
 
@@ -1452,8 +1508,12 @@ static void enter_pmode(struct kvm_vcpu *vcpu)
 static gva_t rmode_tss_base(struct kvm *kvm)
 {
        if (!kvm->arch.tss_addr) {
-               gfn_t base_gfn = kvm->memslots[0].base_gfn +
-                                kvm->memslots[0].npages - 3;
+               struct kvm_memslots *slots;
+               gfn_t base_gfn;
+
+               slots = rcu_dereference(kvm->memslots);
+               base_gfn = kvm->memslots->memslots[0].base_gfn +
+                                kvm->memslots->memslots[0].npages - 3;
                return base_gfn << PAGE_SHIFT;
        }
        return kvm->arch.tss_addr;
@@ -1529,8 +1589,16 @@ continue_rmode:
 static void vmx_set_efer(struct kvm_vcpu *vcpu, u64 efer)
 {
        struct vcpu_vmx *vmx = to_vmx(vcpu);
-       struct kvm_msr_entry *msr = find_msr_entry(vmx, MSR_EFER);
+       struct shared_msr_entry *msr = find_msr_entry(vmx, MSR_EFER);
+
+       if (!msr)
+               return;
 
+       /*
+        * Force kernel_gs_base reloading before EFER changes, as control
+        * of this msr depends on is_long_mode().
+        */
+       vmx_load_host_state(to_vmx(vcpu));
        vcpu->arch.shadow_efer = efer;
        if (!msr)
                return;
@@ -1587,8 +1655,10 @@ static void vmx_flush_tlb(struct kvm_vcpu *vcpu)
 
 static void vmx_decache_cr4_guest_bits(struct kvm_vcpu *vcpu)
 {
-       vcpu->arch.cr4 &= KVM_GUEST_CR4_MASK;
-       vcpu->arch.cr4 |= vmcs_readl(GUEST_CR4) & ~KVM_GUEST_CR4_MASK;
+       ulong cr4_guest_owned_bits = vcpu->arch.cr4_guest_owned_bits;
+
+       vcpu->arch.cr4 &= ~cr4_guest_owned_bits;
+       vcpu->arch.cr4 |= vmcs_readl(GUEST_CR4) & cr4_guest_owned_bits;
 }
 
 static void ept_load_pdptrs(struct kvm_vcpu *vcpu)
@@ -1633,8 +1703,7 @@ static void ept_update_paging_mode_cr0(unsigned long *hw_cr0,
                             (CPU_BASED_CR3_LOAD_EXITING |
                              CPU_BASED_CR3_STORE_EXITING));
                vcpu->arch.cr0 = cr0;
-               vmx_set_cr4(vcpu, vcpu->arch.cr4);
-               *hw_cr0 &= ~X86_CR0_WP;
+               vmx_set_cr4(vcpu, kvm_read_cr4(vcpu));
        } else if (!is_paging(vcpu)) {
                /* From nonpaging to paging */
                vmcs_write32(CPU_BASED_VM_EXEC_CONTROL,
@@ -1642,20 +1711,11 @@ static void ept_update_paging_mode_cr0(unsigned long *hw_cr0,
                             ~(CPU_BASED_CR3_LOAD_EXITING |
                               CPU_BASED_CR3_STORE_EXITING));
                vcpu->arch.cr0 = cr0;
-               vmx_set_cr4(vcpu, vcpu->arch.cr4);
-               if (!(vcpu->arch.cr0 & X86_CR0_WP))
-                       *hw_cr0 &= ~X86_CR0_WP;
+               vmx_set_cr4(vcpu, kvm_read_cr4(vcpu));
        }
-}
 
-static void ept_update_paging_mode_cr4(unsigned long *hw_cr4,
-                                       struct kvm_vcpu *vcpu)
-{
-       if (!is_paging(vcpu)) {
-               *hw_cr4 &= ~X86_CR4_PAE;
-               *hw_cr4 |= X86_CR4_PSE;
-       } else if (!(vcpu->arch.cr4 & X86_CR4_PAE))
-               *hw_cr4 &= ~X86_CR4_PAE;
+       if (!(cr0 & X86_CR0_WP))
+               *hw_cr0 &= ~X86_CR0_WP;
 }
 
 static void vmx_set_cr0(struct kvm_vcpu *vcpu, unsigned long cr0)
@@ -1720,6 +1780,7 @@ static void vmx_set_cr3(struct kvm_vcpu *vcpu, unsigned long cr3)
                vmcs_write64(EPT_POINTER, eptp);
                guest_cr3 = is_paging(vcpu) ? vcpu->arch.cr3 :
                        vcpu->kvm->arch.ept_identity_map_addr;
+               ept_load_pdptrs(vcpu);
        }
 
        vmx_flush_tlb(vcpu);
@@ -1734,8 +1795,14 @@ static void vmx_set_cr4(struct kvm_vcpu *vcpu, unsigned long cr4)
                    KVM_RMODE_VM_CR4_ALWAYS_ON : KVM_PMODE_VM_CR4_ALWAYS_ON);
 
        vcpu->arch.cr4 = cr4;
-       if (enable_ept)
-               ept_update_paging_mode_cr4(&hw_cr4, vcpu);
+       if (enable_ept) {
+               if (!is_paging(vcpu)) {
+                       hw_cr4 &= ~X86_CR4_PAE;
+                       hw_cr4 |= X86_CR4_PSE;
+               } else if (!(cr4 & X86_CR4_PAE)) {
+                       hw_cr4 &= ~X86_CR4_PAE;
+               }
+       }
 
        vmcs_writel(CR4_READ_SHADOW, cr4);
        vmcs_writel(GUEST_CR4, hw_cr4);
@@ -1773,16 +1840,13 @@ static void vmx_get_segment(struct kvm_vcpu *vcpu,
 
 static int vmx_get_cpl(struct kvm_vcpu *vcpu)
 {
-       struct kvm_segment kvm_seg;
-
        if (!(vcpu->arch.cr0 & X86_CR0_PE)) /* if real mode */
                return 0;
 
        if (vmx_get_rflags(vcpu) & X86_EFLAGS_VM) /* if virtual 8086 */
                return 3;
 
-       vmx_get_segment(vcpu, &kvm_seg, VCPU_SREG_CS);
-       return kvm_seg.selector & 3;
+       return vmcs_read16(GUEST_CS_SELECTOR) & 3;
 }
 
 static u32 vmx_segment_access_rights(struct kvm_segment *var)
@@ -2164,7 +2228,7 @@ static int alloc_apic_access_page(struct kvm *kvm)
        struct kvm_userspace_memory_region kvm_userspace_mem;
        int r = 0;
 
-       down_write(&kvm->slots_lock);
+       mutex_lock(&kvm->slots_lock);
        if (kvm->arch.apic_access_page)
                goto out;
        kvm_userspace_mem.slot = APIC_ACCESS_PAGE_PRIVATE_MEMSLOT;
@@ -2177,7 +2241,7 @@ static int alloc_apic_access_page(struct kvm *kvm)
 
        kvm->arch.apic_access_page = gfn_to_page(kvm, 0xfee00);
 out:
-       up_write(&kvm->slots_lock);
+       mutex_unlock(&kvm->slots_lock);
        return r;
 }
 
@@ -2186,7 +2250,7 @@ static int alloc_identity_pagetable(struct kvm *kvm)
        struct kvm_userspace_memory_region kvm_userspace_mem;
        int r = 0;
 
-       down_write(&kvm->slots_lock);
+       mutex_lock(&kvm->slots_lock);
        if (kvm->arch.ept_identity_pagetable)
                goto out;
        kvm_userspace_mem.slot = IDENTITY_PAGETABLE_PRIVATE_MEMSLOT;
@@ -2201,7 +2265,7 @@ static int alloc_identity_pagetable(struct kvm *kvm)
        kvm->arch.ept_identity_pagetable = gfn_to_page(kvm,
                        kvm->arch.ept_identity_map_addr >> PAGE_SHIFT);
 out:
-       up_write(&kvm->slots_lock);
+       mutex_unlock(&kvm->slots_lock);
        return r;
 }
 
@@ -2298,13 +2362,22 @@ static int vmx_vcpu_setup(struct vcpu_vmx *vmx)
                                ~SECONDARY_EXEC_VIRTUALIZE_APIC_ACCESSES;
                if (vmx->vpid == 0)
                        exec_control &= ~SECONDARY_EXEC_ENABLE_VPID;
-               if (!enable_ept)
+               if (!enable_ept) {
                        exec_control &= ~SECONDARY_EXEC_ENABLE_EPT;
+                       enable_unrestricted_guest = 0;
+               }
                if (!enable_unrestricted_guest)
                        exec_control &= ~SECONDARY_EXEC_UNRESTRICTED_GUEST;
+               if (!ple_gap)
+                       exec_control &= ~SECONDARY_EXEC_PAUSE_LOOP_EXITING;
                vmcs_write32(SECONDARY_VM_EXEC_CONTROL, exec_control);
        }
 
+       if (ple_gap) {
+               vmcs_write32(PLE_GAP, ple_gap);
+               vmcs_write32(PLE_WINDOW, ple_window);
+       }
+
        vmcs_write32(PAGE_FAULT_ERROR_CODE_MASK, !!bypass_guest_pf);
        vmcs_write32(PAGE_FAULT_ERROR_CODE_MATCH, !!bypass_guest_pf);
        vmcs_write32(CR3_TARGET_COUNT, 0);           /* 22.2.1 */
@@ -2364,18 +2437,15 @@ static int vmx_vcpu_setup(struct vcpu_vmx *vmx)
        for (i = 0; i < NR_VMX_MSR; ++i) {
                u32 index = vmx_msr_index[i];
                u32 data_low, data_high;
-               u64 data;
                int j = vmx->nmsrs;
 
                if (rdmsr_safe(index, &data_low, &data_high) < 0)
                        continue;
                if (wrmsr_safe(index, data_low, data_high) < 0)
                        continue;
-               data = data_low | ((u64)data_high << 32);
-               vmx->host_msrs[j].index = index;
-               vmx->host_msrs[j].reserved = 0;
-               vmx->host_msrs[j].data = data;
-               vmx->guest_msrs[j] = vmx->host_msrs[j];
+               vmx->guest_msrs[j].index = i;
+               vmx->guest_msrs[j].data = 0;
+               vmx->guest_msrs[j].mask = -1ull;
                ++vmx->nmsrs;
        }
 
@@ -2385,7 +2455,10 @@ static int vmx_vcpu_setup(struct vcpu_vmx *vmx)
        vmcs_write32(VM_ENTRY_CONTROLS, vmcs_config.vmentry_ctrl);
 
        vmcs_writel(CR0_GUEST_HOST_MASK, ~0UL);
-       vmcs_writel(CR4_GUEST_HOST_MASK, KVM_GUEST_CR4_MASK);
+       vmx->vcpu.arch.cr4_guest_owned_bits = KVM_CR4_GUEST_OWNED_BITS;
+       if (enable_ept)
+               vmx->vcpu.arch.cr4_guest_owned_bits |= X86_CR4_PGE;
+       vmcs_writel(CR4_GUEST_HOST_MASK, ~vmx->vcpu.arch.cr4_guest_owned_bits);
 
        tsc_base = vmx->vcpu.kvm->arch.vm_init_tsc;
        rdtscll(tsc_this);
@@ -2410,10 +2483,10 @@ static int vmx_vcpu_reset(struct kvm_vcpu *vcpu)
 {
        struct vcpu_vmx *vmx = to_vmx(vcpu);
        u64 msr;
-       int ret;
+       int ret, idx;
 
        vcpu->arch.regs_avail = ~((1 << VCPU_REGS_RIP) | (1 << VCPU_REGS_RSP));
-       down_read(&vcpu->kvm->slots_lock);
+       idx = srcu_read_lock(&vcpu->kvm->srcu);
        if (!init_rmode(vmx->vcpu.kvm)) {
                ret = -ENOMEM;
                goto out;
@@ -2506,7 +2579,7 @@ static int vmx_vcpu_reset(struct kvm_vcpu *vcpu)
        if (vmx->vpid != 0)
                vmcs_write16(VIRTUAL_PROCESSOR_ID, vmx->vpid);
 
-       vmx->vcpu.arch.cr0 = 0x60000010;
+       vmx->vcpu.arch.cr0 = X86_CR0_NW | X86_CR0_CD | X86_CR0_ET;
        vmx_set_cr0(&vmx->vcpu, vmx->vcpu.arch.cr0); /* enter rmode */
        vmx_set_cr4(&vmx->vcpu, 0);
        vmx_set_efer(&vmx->vcpu, 0);
@@ -2521,7 +2594,7 @@ static int vmx_vcpu_reset(struct kvm_vcpu *vcpu)
        vmx->emulation_required = 0;
 
 out:
-       up_read(&vcpu->kvm->slots_lock);
+       srcu_read_unlock(&vcpu->kvm->srcu, idx);
        return ret;
 }
 
@@ -2623,6 +2696,34 @@ static int vmx_nmi_allowed(struct kvm_vcpu *vcpu)
                                GUEST_INTR_STATE_NMI));
 }
 
+static bool vmx_get_nmi_mask(struct kvm_vcpu *vcpu)
+{
+       if (!cpu_has_virtual_nmis())
+               return to_vmx(vcpu)->soft_vnmi_blocked;
+       else
+               return !!(vmcs_read32(GUEST_INTERRUPTIBILITY_INFO) &
+                         GUEST_INTR_STATE_NMI);
+}
+
+static void vmx_set_nmi_mask(struct kvm_vcpu *vcpu, bool masked)
+{
+       struct vcpu_vmx *vmx = to_vmx(vcpu);
+
+       if (!cpu_has_virtual_nmis()) {
+               if (vmx->soft_vnmi_blocked != masked) {
+                       vmx->soft_vnmi_blocked = masked;
+                       vmx->vnmi_blocked_time = 0;
+               }
+       } else {
+               if (masked)
+                       vmcs_set_bits(GUEST_INTERRUPTIBILITY_INFO,
+                                     GUEST_INTR_STATE_NMI);
+               else
+                       vmcs_clear_bits(GUEST_INTERRUPTIBILITY_INFO,
+                                       GUEST_INTR_STATE_NMI);
+       }
+}
+
 static int vmx_interrupt_allowed(struct kvm_vcpu *vcpu)
 {
        return (vmcs_readl(GUEST_RFLAGS) & X86_EFLAGS_IF) &&
@@ -2655,7 +2756,7 @@ static int handle_rmode_exception(struct kvm_vcpu *vcpu,
         * Cause the #SS fault with 0 error code in VM86 mode.
         */
        if (((vec == GP_VECTOR) || (vec == SS_VECTOR)) && err_code == 0)
-               if (emulate_instruction(vcpu, NULL, 0, 0, 0) == EMULATE_DONE)
+               if (emulate_instruction(vcpu, 0, 0, 0) == EMULATE_DONE)
                        return 1;
        /*
         * Forward all other exceptions that are valid in real mode.
@@ -2706,15 +2807,16 @@ static void kvm_machine_check(void)
 #endif
 }
 
-static int handle_machine_check(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run)
+static int handle_machine_check(struct kvm_vcpu *vcpu)
 {
        /* already handled by vcpu_run */
        return 1;
 }
 
-static int handle_exception(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run)
+static int handle_exception(struct kvm_vcpu *vcpu)
 {
        struct vcpu_vmx *vmx = to_vmx(vcpu);
+       struct kvm_run *kvm_run = vcpu->run;
        u32 intr_info, ex_no, error_code;
        unsigned long cr2, rip, dr6;
        u32 vect_info;
@@ -2724,12 +2826,17 @@ static int handle_exception(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run)
        intr_info = vmcs_read32(VM_EXIT_INTR_INFO);
 
        if (is_machine_check(intr_info))
-               return handle_machine_check(vcpu, kvm_run);
+               return handle_machine_check(vcpu);
 
        if ((vect_info & VECTORING_INFO_VALID_MASK) &&
-                                               !is_page_fault(intr_info))
-               printk(KERN_ERR "%s: unexpected, vectoring info 0x%x "
-                      "intr info 0x%x\n", __func__, vect_info, intr_info);
+           !is_page_fault(intr_info)) {
+               vcpu->run->exit_reason = KVM_EXIT_INTERNAL_ERROR;
+               vcpu->run->internal.suberror = KVM_INTERNAL_ERROR_SIMUL_EX;
+               vcpu->run->internal.ndata = 2;
+               vcpu->run->internal.data[0] = vect_info;
+               vcpu->run->internal.data[1] = intr_info;
+               return 0;
+       }
 
        if ((intr_info & INTR_INFO_INTR_TYPE_MASK) == INTR_TYPE_NMI_INTR)
                return 1;  /* already handled by vmx_vcpu_run() */
@@ -2740,7 +2847,7 @@ static int handle_exception(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run)
        }
 
        if (is_invalid_opcode(intr_info)) {
-               er = emulate_instruction(vcpu, kvm_run, 0, 0, EMULTYPE_TRAP_UD);
+               er = emulate_instruction(vcpu, 0, 0, EMULTYPE_TRAP_UD);
                if (er != EMULATE_DONE)
                        kvm_queue_exception(vcpu, UD_VECTOR);
                return 1;
@@ -2799,20 +2906,19 @@ static int handle_exception(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run)
        return 0;
 }
 
-static int handle_external_interrupt(struct kvm_vcpu *vcpu,
-                                    struct kvm_run *kvm_run)
+static int handle_external_interrupt(struct kvm_vcpu *vcpu)
 {
        ++vcpu->stat.irq_exits;
        return 1;
 }
 
-static int handle_triple_fault(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run)
+static int handle_triple_fault(struct kvm_vcpu *vcpu)
 {
-       kvm_run->exit_reason = KVM_EXIT_SHUTDOWN;
+       vcpu->run->exit_reason = KVM_EXIT_SHUTDOWN;
        return 0;
 }
 
-static int handle_io(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run)
+static int handle_io(struct kvm_vcpu *vcpu)
 {
        unsigned long exit_qualification;
        int size, in, string;
@@ -2823,8 +2929,7 @@ static int handle_io(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run)
        string = (exit_qualification & 16) != 0;
 
        if (string) {
-               if (emulate_instruction(vcpu,
-                                       kvm_run, 0, 0, 0) == EMULATE_DO_MMIO)
+               if (emulate_instruction(vcpu, 0, 0, 0) == EMULATE_DO_MMIO)
                        return 0;
                return 1;
        }
@@ -2834,7 +2939,7 @@ static int handle_io(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run)
        port = exit_qualification >> 16;
 
        skip_emulated_instruction(vcpu);
-       return kvm_emulate_pio(vcpu, kvm_run, in, size, port);
+       return kvm_emulate_pio(vcpu, in, size, port);
 }
 
 static void
@@ -2848,7 +2953,7 @@ vmx_patch_hypercall(struct kvm_vcpu *vcpu, unsigned char *hypercall)
        hypercall[2] = 0xc1;
 }
 
-static int handle_cr(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run)
+static int handle_cr(struct kvm_vcpu *vcpu)
 {
        unsigned long exit_qualification, val;
        int cr;
@@ -2883,7 +2988,7 @@ static int handle_cr(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run)
                                        return 1;
                                if (cr8_prev <= cr8)
                                        return 1;
-                               kvm_run->exit_reason = KVM_EXIT_SET_TPR;
+                               vcpu->run->exit_reason = KVM_EXIT_SET_TPR;
                                return 0;
                        }
                };
@@ -2892,6 +2997,7 @@ static int handle_cr(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run)
                vmx_fpu_deactivate(vcpu);
                vcpu->arch.cr0 &= ~X86_CR0_TS;
                vmcs_writel(CR0_READ_SHADOW, vcpu->arch.cr0);
+               trace_kvm_cr_write(0, vcpu->arch.cr0);
                vmx_fpu_activate(vcpu);
                skip_emulated_instruction(vcpu);
                return 1;
@@ -2911,25 +3017,29 @@ static int handle_cr(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run)
                }
                break;
        case 3: /* lmsw */
-               kvm_lmsw(vcpu, (exit_qualification >> LMSW_SOURCE_DATA_SHIFT) & 0x0f);
+               val = (exit_qualification >> LMSW_SOURCE_DATA_SHIFT) & 0x0f;
+               trace_kvm_cr_write(0, (vcpu->arch.cr0 & ~0xful) | val);
+               kvm_lmsw(vcpu, val);
 
                skip_emulated_instruction(vcpu);
                return 1;
        default:
                break;
        }
-       kvm_run->exit_reason = 0;
+       vcpu->run->exit_reason = 0;
        pr_unimpl(vcpu, "unhandled control register: op %d cr %d\n",
               (int)(exit_qualification >> 4) & 3, cr);
        return 0;
 }
 
-static int handle_dr(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run)
+static int handle_dr(struct kvm_vcpu *vcpu)
 {
        unsigned long exit_qualification;
        unsigned long val;
        int dr, reg;
 
+       if (!kvm_require_cpl(vcpu, 0))
+               return 1;
        dr = vmcs_readl(GUEST_DR7);
        if (dr & DR7_GD) {
                /*
@@ -2938,13 +3048,13 @@ static int handle_dr(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run)
                 * guest debugging itself.
                 */
                if (vcpu->guest_debug & KVM_GUESTDBG_USE_HW_BP) {
-                       kvm_run->debug.arch.dr6 = vcpu->arch.dr6;
-                       kvm_run->debug.arch.dr7 = dr;
-                       kvm_run->debug.arch.pc =
+                       vcpu->run->debug.arch.dr6 = vcpu->arch.dr6;
+                       vcpu->run->debug.arch.dr7 = dr;
+                       vcpu->run->debug.arch.pc =
                                vmcs_readl(GUEST_CS_BASE) +
                                vmcs_readl(GUEST_RIP);
-                       kvm_run->debug.arch.exception = DB_VECTOR;
-                       kvm_run->exit_reason = KVM_EXIT_DEBUG;
+                       vcpu->run->debug.arch.exception = DB_VECTOR;
+                       vcpu->run->exit_reason = KVM_EXIT_DEBUG;
                        return 0;
                } else {
                        vcpu->arch.dr7 &= ~DR7_GD;
@@ -2982,7 +3092,7 @@ static int handle_dr(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run)
                                vcpu->arch.eff_db[dr] = val;
                        break;
                case 4 ... 5:
-                       if (vcpu->arch.cr4 & X86_CR4_DE)
+                       if (kvm_read_cr4_bits(vcpu, X86_CR4_DE))
                                kvm_queue_exception(vcpu, UD_VECTOR);
                        break;
                case 6:
@@ -3010,13 +3120,13 @@ static int handle_dr(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run)
        return 1;
 }
 
-static int handle_cpuid(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run)
+static int handle_cpuid(struct kvm_vcpu *vcpu)
 {
        kvm_emulate_cpuid(vcpu);
        return 1;
 }
 
-static int handle_rdmsr(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run)
+static int handle_rdmsr(struct kvm_vcpu *vcpu)
 {
        u32 ecx = vcpu->arch.regs[VCPU_REGS_RCX];
        u64 data;
@@ -3035,7 +3145,7 @@ static int handle_rdmsr(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run)
        return 1;
 }
 
-static int handle_wrmsr(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run)
+static int handle_wrmsr(struct kvm_vcpu *vcpu)
 {
        u32 ecx = vcpu->arch.regs[VCPU_REGS_RCX];
        u64 data = (vcpu->arch.regs[VCPU_REGS_RAX] & -1u)
@@ -3052,14 +3162,12 @@ static int handle_wrmsr(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run)
        return 1;
 }
 
-static int handle_tpr_below_threshold(struct kvm_vcpu *vcpu,
-                                     struct kvm_run *kvm_run)
+static int handle_tpr_below_threshold(struct kvm_vcpu *vcpu)
 {
        return 1;
 }
 
-static int handle_interrupt_window(struct kvm_vcpu *vcpu,
-                                  struct kvm_run *kvm_run)
+static int handle_interrupt_window(struct kvm_vcpu *vcpu)
 {
        u32 cpu_based_vm_exec_control;
 
@@ -3075,34 +3183,34 @@ static int handle_interrupt_window(struct kvm_vcpu *vcpu,
         * possible
         */
        if (!irqchip_in_kernel(vcpu->kvm) &&
-           kvm_run->request_interrupt_window &&
+           vcpu->run->request_interrupt_window &&
            !kvm_cpu_has_interrupt(vcpu)) {
-               kvm_run->exit_reason = KVM_EXIT_IRQ_WINDOW_OPEN;
+               vcpu->run->exit_reason = KVM_EXIT_IRQ_WINDOW_OPEN;
                return 0;
        }
        return 1;
 }
 
-static int handle_halt(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run)
+static int handle_halt(struct kvm_vcpu *vcpu)
 {
        skip_emulated_instruction(vcpu);
        return kvm_emulate_halt(vcpu);
 }
 
-static int handle_vmcall(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run)
+static int handle_vmcall(struct kvm_vcpu *vcpu)
 {
        skip_emulated_instruction(vcpu);
        kvm_emulate_hypercall(vcpu);
        return 1;
 }
 
-static int handle_vmx_insn(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run)
+static int handle_vmx_insn(struct kvm_vcpu *vcpu)
 {
        kvm_queue_exception(vcpu, UD_VECTOR);
        return 1;
 }
 
-static int handle_invlpg(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run)
+static int handle_invlpg(struct kvm_vcpu *vcpu)
 {
        unsigned long exit_qualification = vmcs_readl(EXIT_QUALIFICATION);
 
@@ -3111,14 +3219,14 @@ static int handle_invlpg(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run)
        return 1;
 }
 
-static int handle_wbinvd(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run)
+static int handle_wbinvd(struct kvm_vcpu *vcpu)
 {
        skip_emulated_instruction(vcpu);
        /* TODO: Add support for VT-d/pass-through device */
        return 1;
 }
 
-static int handle_apic_access(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run)
+static int handle_apic_access(struct kvm_vcpu *vcpu)
 {
        unsigned long exit_qualification;
        enum emulation_result er;
@@ -3127,18 +3235,18 @@ static int handle_apic_access(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run)
        exit_qualification = vmcs_readl(EXIT_QUALIFICATION);
        offset = exit_qualification & 0xffful;
 
-       er = emulate_instruction(vcpu, kvm_run, 0, 0, 0);
+       er = emulate_instruction(vcpu, 0, 0, 0);
 
        if (er !=  EMULATE_DONE) {
                printk(KERN_ERR
                       "Fail to handle apic access vmexit! Offset is 0x%lx\n",
                       offset);
-               return -ENOTSUPP;
+               return -ENOEXEC;
        }
        return 1;
 }
 
-static int handle_task_switch(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run)
+static int handle_task_switch(struct kvm_vcpu *vcpu)
 {
        struct vcpu_vmx *vmx = to_vmx(vcpu);
        unsigned long exit_qualification;
@@ -3192,7 +3300,7 @@ static int handle_task_switch(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run)
        return 1;
 }
 
-static int handle_ept_violation(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run)
+static int handle_ept_violation(struct kvm_vcpu *vcpu)
 {
        unsigned long exit_qualification;
        gpa_t gpa;
@@ -3202,7 +3310,7 @@ static int handle_ept_violation(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run)
 
        if (exit_qualification & (1 << 6)) {
                printk(KERN_ERR "EPT: GPA exceeds GAW!\n");
-               return -ENOTSUPP;
+               return -EINVAL;
        }
 
        gla_validity = (exit_qualification >> 7) & 0x3;
@@ -3213,8 +3321,8 @@ static int handle_ept_violation(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run)
                        vmcs_readl(GUEST_LINEAR_ADDRESS));
                printk(KERN_ERR "EPT: Exit qualification is 0x%lx\n",
                        (long unsigned int)exit_qualification);
-               kvm_run->exit_reason = KVM_EXIT_UNKNOWN;
-               kvm_run->hw.hardware_exit_reason = EXIT_REASON_EPT_VIOLATION;
+               vcpu->run->exit_reason = KVM_EXIT_UNKNOWN;
+               vcpu->run->hw.hardware_exit_reason = EXIT_REASON_EPT_VIOLATION;
                return 0;
        }
 
@@ -3284,7 +3392,7 @@ static void ept_misconfig_inspect_spte(struct kvm_vcpu *vcpu, u64 spte,
        }
 }
 
-static int handle_ept_misconfig(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run)
+static int handle_ept_misconfig(struct kvm_vcpu *vcpu)
 {
        u64 sptes[4];
        int nr_sptes, i;
@@ -3300,13 +3408,13 @@ static int handle_ept_misconfig(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run)
        for (i = PT64_ROOT_LEVEL; i > PT64_ROOT_LEVEL - nr_sptes; --i)
                ept_misconfig_inspect_spte(vcpu, sptes[i-1], i);
 
-       kvm_run->exit_reason = KVM_EXIT_UNKNOWN;
-       kvm_run->hw.hardware_exit_reason = EXIT_REASON_EPT_MISCONFIG;
+       vcpu->run->exit_reason = KVM_EXIT_UNKNOWN;
+       vcpu->run->hw.hardware_exit_reason = EXIT_REASON_EPT_MISCONFIG;
 
        return 0;
 }
 
-static int handle_nmi_window(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run)
+static int handle_nmi_window(struct kvm_vcpu *vcpu)
 {
        u32 cpu_based_vm_exec_control;
 
@@ -3319,36 +3427,56 @@ static int handle_nmi_window(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run)
        return 1;
 }
 
-static void handle_invalid_guest_state(struct kvm_vcpu *vcpu,
-                               struct kvm_run *kvm_run)
+static int handle_invalid_guest_state(struct kvm_vcpu *vcpu)
 {
        struct vcpu_vmx *vmx = to_vmx(vcpu);
        enum emulation_result err = EMULATE_DONE;
-
-       local_irq_enable();
-       preempt_enable();
+       int ret = 1;
 
        while (!guest_state_valid(vcpu)) {
-               err = emulate_instruction(vcpu, kvm_run, 0, 0, 0);
+               err = emulate_instruction(vcpu, 0, 0, 0);
 
-               if (err == EMULATE_DO_MMIO)
-                       break;
+               if (err == EMULATE_DO_MMIO) {
+                       ret = 0;
+                       goto out;
+               }
 
                if (err != EMULATE_DONE) {
                        kvm_report_emulation_failure(vcpu, "emulation failure");
-                       break;
+                       vcpu->run->exit_reason = KVM_EXIT_INTERNAL_ERROR;
+                       vcpu->run->internal.suberror = KVM_INTERNAL_ERROR_EMULATION;
+                       vcpu->run->internal.ndata = 0;
+                       ret = 0;
+                       goto out;
                }
 
                if (signal_pending(current))
-                       break;
+                       goto out;
                if (need_resched())
                        schedule();
        }
 
-       preempt_disable();
-       local_irq_disable();
+       vmx->emulation_required = 0;
+out:
+       return ret;
+}
+
+/*
+ * Indicate a busy-waiting vcpu in spinlock. We do not enable the PAUSE
+ * exiting, so only get here on cpu with PAUSE-Loop-Exiting.
+ */
+static int handle_pause(struct kvm_vcpu *vcpu)
+{
+       skip_emulated_instruction(vcpu);
+       kvm_vcpu_on_spin(vcpu);
 
-       vmx->invalid_state_emulation_result = err;
+       return 1;
+}
+
+static int handle_invalid_op(struct kvm_vcpu *vcpu)
+{
+       kvm_queue_exception(vcpu, UD_VECTOR);
+       return 1;
 }
 
 /*
@@ -3356,8 +3484,7 @@ static void handle_invalid_guest_state(struct kvm_vcpu *vcpu,
  * may resume.  Otherwise they set the kvm_run parameter to indicate what needs
  * to be done to userspace and return 0.
  */
-static int (*kvm_vmx_exit_handlers[])(struct kvm_vcpu *vcpu,
-                                     struct kvm_run *kvm_run) = {
+static int (*kvm_vmx_exit_handlers[])(struct kvm_vcpu *vcpu) = {
        [EXIT_REASON_EXCEPTION_NMI]           = handle_exception,
        [EXIT_REASON_EXTERNAL_INTERRUPT]      = handle_external_interrupt,
        [EXIT_REASON_TRIPLE_FAULT]            = handle_triple_fault,
@@ -3388,6 +3515,9 @@ static int (*kvm_vmx_exit_handlers[])(struct kvm_vcpu *vcpu,
        [EXIT_REASON_MCE_DURING_VMENTRY]      = handle_machine_check,
        [EXIT_REASON_EPT_VIOLATION]           = handle_ept_violation,
        [EXIT_REASON_EPT_MISCONFIG]           = handle_ept_misconfig,
+       [EXIT_REASON_PAUSE_INSTRUCTION]       = handle_pause,
+       [EXIT_REASON_MWAIT_INSTRUCTION]       = handle_invalid_op,
+       [EXIT_REASON_MONITOR_INSTRUCTION]     = handle_invalid_op,
 };
 
 static const int kvm_vmx_max_exit_handlers =
@@ -3397,7 +3527,7 @@ static const int kvm_vmx_max_exit_handlers =
  * The guest has exited.  See if we can fix it or if we need userspace
  * assistance.
  */
-static int vmx_handle_exit(struct kvm_run *kvm_run, struct kvm_vcpu *vcpu)
+static int vmx_handle_exit(struct kvm_vcpu *vcpu)
 {
        struct vcpu_vmx *vmx = to_vmx(vcpu);
        u32 exit_reason = vmx->exit_reason;
@@ -3405,13 +3535,9 @@ static int vmx_handle_exit(struct kvm_run *kvm_run, struct kvm_vcpu *vcpu)
 
        trace_kvm_exit(exit_reason, kvm_rip_read(vcpu));
 
-       /* If we need to emulate an MMIO from handle_invalid_guest_state
-        * we just return 0 */
-       if (vmx->emulation_required && emulate_invalid_guest_state) {
-               if (guest_state_valid(vcpu))
-                       vmx->emulation_required = 0;
-               return vmx->invalid_state_emulation_result != EMULATE_DO_MMIO;
-       }
+       /* If guest state is invalid, start emulating */
+       if (vmx->emulation_required && emulate_invalid_guest_state)
+               return handle_invalid_guest_state(vcpu);
 
        /* Access CR3 don't cause VMExit in paging mode, so we need
         * to sync with guest real CR3. */
@@ -3419,8 +3545,8 @@ static int vmx_handle_exit(struct kvm_run *kvm_run, struct kvm_vcpu *vcpu)
                vcpu->arch.cr3 = vmcs_readl(GUEST_CR3);
 
        if (unlikely(vmx->fail)) {
-               kvm_run->exit_reason = KVM_EXIT_FAIL_ENTRY;
-               kvm_run->fail_entry.hardware_entry_failure_reason
+               vcpu->run->exit_reason = KVM_EXIT_FAIL_ENTRY;
+               vcpu->run->fail_entry.hardware_entry_failure_reason
                        = vmcs_read32(VM_INSTRUCTION_ERROR);
                return 0;
        }
@@ -3453,10 +3579,10 @@ static int vmx_handle_exit(struct kvm_run *kvm_run, struct kvm_vcpu *vcpu)
 
        if (exit_reason < kvm_vmx_max_exit_handlers
            && kvm_vmx_exit_handlers[exit_reason])
-               return kvm_vmx_exit_handlers[exit_reason](vcpu, kvm_run);
+               return kvm_vmx_exit_handlers[exit_reason](vcpu);
        else {
-               kvm_run->exit_reason = KVM_EXIT_UNKNOWN;
-               kvm_run->hw.hardware_exit_reason = exit_reason;
+               vcpu->run->exit_reason = KVM_EXIT_UNKNOWN;
+               vcpu->run->hw.hardware_exit_reason = exit_reason;
        }
        return 0;
 }
@@ -3594,23 +3720,18 @@ static void fixup_rmode_irq(struct vcpu_vmx *vmx)
 #define Q "l"
 #endif
 
-static void vmx_vcpu_run(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run)
+static void vmx_vcpu_run(struct kvm_vcpu *vcpu)
 {
        struct vcpu_vmx *vmx = to_vmx(vcpu);
 
-       if (enable_ept && is_paging(vcpu)) {
-               vmcs_writel(GUEST_CR3, vcpu->arch.cr3);
-               ept_load_pdptrs(vcpu);
-       }
        /* Record the guest's net vcpu time for enforced NMI injections. */
        if (unlikely(!cpu_has_virtual_nmis() && vmx->soft_vnmi_blocked))
                vmx->entry_time = ktime_get();
 
-       /* Handle invalid guest state instead of entering VMX */
-       if (vmx->emulation_required && emulate_invalid_guest_state) {
-               handle_invalid_guest_state(vcpu, kvm_run);
+       /* Don't enter VMX if guest state is invalid, let the exit handler
+          start emulation until we arrive back to a valid state */
+       if (vmx->emulation_required && emulate_invalid_guest_state)
                return;
-       }
 
        if (test_bit(VCPU_REGS_RSP, (unsigned long *)&vcpu->arch.regs_dirty))
                vmcs_writel(GUEST_RSP, vcpu->arch.regs[VCPU_REGS_RSP]);
@@ -3630,7 +3751,8 @@ static void vmx_vcpu_run(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run)
         */
        vmcs_writel(HOST_CR0, read_cr0());
 
-       set_debugreg(vcpu->arch.dr6, 6);
+       if (vcpu->arch.switch_db_regs)
+               set_debugreg(vcpu->arch.dr6, 6);
 
        asm(
                /* Store host registers */
@@ -3732,7 +3854,8 @@ static void vmx_vcpu_run(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run)
                                  | (1 << VCPU_EXREG_PDPTR));
        vcpu->arch.regs_dirty = 0;
 
-       get_debugreg(vcpu->arch.dr6, 6);
+       if (vcpu->arch.switch_db_regs)
+               get_debugreg(vcpu->arch.dr6, 6);
 
        vmx->idt_vectoring_info = vmcs_read32(IDT_VECTORING_INFO_FIELD);
        if (vmx->rmode.irq.pending)
@@ -3767,7 +3890,6 @@ static void vmx_free_vcpu(struct kvm_vcpu *vcpu)
                __clear_bit(vmx->vpid, vmx_vpid_bitmap);
        spin_unlock(&vmx_vpid_lock);
        vmx_free_vmcs(vcpu);
-       kfree(vmx->host_msrs);
        kfree(vmx->guest_msrs);
        kvm_vcpu_uninit(vcpu);
        kmem_cache_free(kvm_vcpu_cache, vmx);
@@ -3794,10 +3916,6 @@ static struct kvm_vcpu *vmx_create_vcpu(struct kvm *kvm, unsigned int id)
                goto uninit_vcpu;
        }
 
-       vmx->host_msrs = kmalloc(PAGE_SIZE, GFP_KERNEL);
-       if (!vmx->host_msrs)
-               goto free_guest_msrs;
-
        vmx->vmcs = alloc_vmcs();
        if (!vmx->vmcs)
                goto free_msrs;
@@ -3828,8 +3946,6 @@ static struct kvm_vcpu *vmx_create_vcpu(struct kvm *kvm, unsigned int id)
 free_vmcs:
        free_vmcs(vmx->vmcs);
 free_msrs:
-       kfree(vmx->host_msrs);
-free_guest_msrs:
        kfree(vmx->guest_msrs);
 uninit_vcpu:
        kvm_vcpu_uninit(&vmx->vcpu);
@@ -3885,29 +4001,85 @@ static u64 vmx_get_mt_mask(struct kvm_vcpu *vcpu, gfn_t gfn, bool is_mmio)
        return ret;
 }
 
+#define _ER(x) { EXIT_REASON_##x, #x }
+
 static const struct trace_print_flags vmx_exit_reasons_str[] = {
-       { EXIT_REASON_EXCEPTION_NMI,           "exception" },
-       { EXIT_REASON_EXTERNAL_INTERRUPT,      "ext_irq" },
-       { EXIT_REASON_TRIPLE_FAULT,            "triple_fault" },
-       { EXIT_REASON_NMI_WINDOW,              "nmi_window" },
-       { EXIT_REASON_IO_INSTRUCTION,          "io_instruction" },
-       { EXIT_REASON_CR_ACCESS,               "cr_access" },
-       { EXIT_REASON_DR_ACCESS,               "dr_access" },
-       { EXIT_REASON_CPUID,                   "cpuid" },
-       { EXIT_REASON_MSR_READ,                "rdmsr" },
-       { EXIT_REASON_MSR_WRITE,               "wrmsr" },
-       { EXIT_REASON_PENDING_INTERRUPT,       "interrupt_window" },
-       { EXIT_REASON_HLT,                     "halt" },
-       { EXIT_REASON_INVLPG,                  "invlpg" },
-       { EXIT_REASON_VMCALL,                  "hypercall" },
-       { EXIT_REASON_TPR_BELOW_THRESHOLD,     "tpr_below_thres" },
-       { EXIT_REASON_APIC_ACCESS,             "apic_access" },
-       { EXIT_REASON_WBINVD,                  "wbinvd" },
-       { EXIT_REASON_TASK_SWITCH,             "task_switch" },
-       { EXIT_REASON_EPT_VIOLATION,           "ept_violation" },
+       _ER(EXCEPTION_NMI),
+       _ER(EXTERNAL_INTERRUPT),
+       _ER(TRIPLE_FAULT),
+       _ER(PENDING_INTERRUPT),
+       _ER(NMI_WINDOW),
+       _ER(TASK_SWITCH),
+       _ER(CPUID),
+       _ER(HLT),
+       _ER(INVLPG),
+       _ER(RDPMC),
+       _ER(RDTSC),
+       _ER(VMCALL),
+       _ER(VMCLEAR),
+       _ER(VMLAUNCH),
+       _ER(VMPTRLD),
+       _ER(VMPTRST),
+       _ER(VMREAD),
+       _ER(VMRESUME),
+       _ER(VMWRITE),
+       _ER(VMOFF),
+       _ER(VMON),
+       _ER(CR_ACCESS),
+       _ER(DR_ACCESS),
+       _ER(IO_INSTRUCTION),
+       _ER(MSR_READ),
+       _ER(MSR_WRITE),
+       _ER(MWAIT_INSTRUCTION),
+       _ER(MONITOR_INSTRUCTION),
+       _ER(PAUSE_INSTRUCTION),
+       _ER(MCE_DURING_VMENTRY),
+       _ER(TPR_BELOW_THRESHOLD),
+       _ER(APIC_ACCESS),
+       _ER(EPT_VIOLATION),
+       _ER(EPT_MISCONFIG),
+       _ER(WBINVD),
        { -1, NULL }
 };
 
+#undef _ER
+
+static int vmx_get_lpage_level(void)
+{
+       if (enable_ept && !cpu_has_vmx_ept_1g_page())
+               return PT_DIRECTORY_LEVEL;
+       else
+               /* For shadow and EPT supported 1GB page */
+               return PT_PDPE_LEVEL;
+}
+
+static inline u32 bit(int bitno)
+{
+       return 1 << (bitno & 31);
+}
+
+static void vmx_cpuid_update(struct kvm_vcpu *vcpu)
+{
+       struct kvm_cpuid_entry2 *best;
+       struct vcpu_vmx *vmx = to_vmx(vcpu);
+       u32 exec_control;
+
+       vmx->rdtscp_enabled = false;
+       if (vmx_rdtscp_supported()) {
+               exec_control = vmcs_read32(SECONDARY_VM_EXEC_CONTROL);
+               if (exec_control & SECONDARY_EXEC_RDTSCP) {
+                       best = kvm_find_cpuid_entry(vcpu, 0x80000001, 0);
+                       if (best && (best->edx & bit(X86_FEATURE_RDTSCP)))
+                               vmx->rdtscp_enabled = true;
+                       else {
+                               exec_control &= ~SECONDARY_EXEC_RDTSCP;
+                               vmcs_write32(SECONDARY_VM_EXEC_CONTROL,
+                                               exec_control);
+                       }
+               }
+       }
+}
+
 static struct kvm_x86_ops vmx_x86_ops = {
        .cpu_has_kvm_support = cpu_has_kvm_support,
        .disabled_by_bios = vmx_disabled_by_bios,
@@ -3960,6 +4132,8 @@ static struct kvm_x86_ops vmx_x86_ops = {
        .queue_exception = vmx_queue_exception,
        .interrupt_allowed = vmx_interrupt_allowed,
        .nmi_allowed = vmx_nmi_allowed,
+       .get_nmi_mask = vmx_get_nmi_mask,
+       .set_nmi_mask = vmx_set_nmi_mask,
        .enable_nmi_window = enable_nmi_window,
        .enable_irq_window = enable_irq_window,
        .update_cr8_intercept = update_cr8_intercept,
@@ -3969,11 +4143,21 @@ static struct kvm_x86_ops vmx_x86_ops = {
        .get_mt_mask = vmx_get_mt_mask,
 
        .exit_reasons_str = vmx_exit_reasons_str,
+       .get_lpage_level = vmx_get_lpage_level,
+
+       .cpuid_update = vmx_cpuid_update,
+
+       .rdtscp_supported = vmx_rdtscp_supported,
 };
 
 static int __init vmx_init(void)
 {
-       int r;
+       int r, i;
+
+       rdmsrl_safe(MSR_EFER, &host_efer);
+
+       for (i = 0; i < NR_VMX_MSR; ++i)
+               kvm_define_shared_msr(i, vmx_msr_index[i]);
 
        vmx_io_bitmap_a = (unsigned long *)__get_free_page(GFP_KERNEL);
        if (!vmx_io_bitmap_a)
@@ -4035,8 +4219,6 @@ static int __init vmx_init(void)
        if (bypass_guest_pf)
                kvm_mmu_set_nonpresent_ptes(~0xffeull, 0ull);
 
-       ept_sync_global();
-
        return 0;
 
 out3: