KVM: MMU: Remove user access when allowing kernel access to gpte.w=0 page
[safe/jmp/linux-2.6] / arch / x86 / kvm / vmx.c
index 1773017..859a01a 100644 (file)
@@ -26,6 +26,8 @@
 #include <linux/sched.h>
 #include <linux/moduleparam.h>
 #include <linux/ftrace_event.h>
+#include <linux/slab.h>
+#include <linux/tboot.h>
 #include "kvm_cache_regs.h"
 #include "x86.h"
 
@@ -61,6 +63,23 @@ module_param_named(unrestricted_guest,
 static int __read_mostly emulate_invalid_guest_state = 0;
 module_param(emulate_invalid_guest_state, bool, S_IRUGO);
 
+#define KVM_GUEST_CR0_MASK_UNRESTRICTED_GUEST                          \
+       (X86_CR0_WP | X86_CR0_NE | X86_CR0_NW | X86_CR0_CD)
+#define KVM_GUEST_CR0_MASK                                             \
+       (KVM_GUEST_CR0_MASK_UNRESTRICTED_GUEST | X86_CR0_PG | X86_CR0_PE)
+#define KVM_VM_CR0_ALWAYS_ON_UNRESTRICTED_GUEST                                \
+       (X86_CR0_WP | X86_CR0_NE)
+#define KVM_VM_CR0_ALWAYS_ON                                           \
+       (KVM_VM_CR0_ALWAYS_ON_UNRESTRICTED_GUEST | X86_CR0_PG | X86_CR0_PE)
+#define KVM_CR4_GUEST_OWNED_BITS                                     \
+       (X86_CR4_PVI | X86_CR4_DE | X86_CR4_PCE | X86_CR4_OSFXSR      \
+        | X86_CR4_OSXMMEXCPT)
+
+#define KVM_PMODE_VM_CR4_ALWAYS_ON (X86_CR4_PAE | X86_CR4_VMXE)
+#define KVM_RMODE_VM_CR4_ALWAYS_ON (X86_CR4_VME | X86_CR4_PAE | X86_CR4_VMXE)
+
+#define RMODE_GUEST_OWNED_EFLAGS_BITS (~(X86_EFLAGS_IOPL | X86_EFLAGS_VM))
+
 /*
  * These 2 parameters are used to config the controls for Pause-Loop Exiting:
  * ple_gap:    upper bound on the amount of time between two successive
@@ -80,12 +99,20 @@ module_param(ple_gap, int, S_IRUGO);
 static int ple_window = KVM_VMX_DEFAULT_PLE_WINDOW;
 module_param(ple_window, int, S_IRUGO);
 
+#define NR_AUTOLOAD_MSRS 1
+
 struct vmcs {
        u32 revision_id;
        u32 abort;
        char data[0];
 };
 
+struct shared_msr_entry {
+       unsigned index;
+       u64 data;
+       u64 mask;
+};
+
 struct vcpu_vmx {
        struct kvm_vcpu       vcpu;
        struct list_head      local_vcpus_link;
@@ -93,25 +120,28 @@ struct vcpu_vmx {
        int                   launched;
        u8                    fail;
        u32                   idt_vectoring_info;
-       struct kvm_msr_entry *guest_msrs;
-       struct kvm_msr_entry *host_msrs;
+       struct shared_msr_entry *guest_msrs;
        int                   nmsrs;
        int                   save_nmsrs;
-       int                   msr_offset_efer;
 #ifdef CONFIG_X86_64
-       int                   msr_offset_kernel_gs_base;
+       u64                   msr_host_kernel_gs_base;
+       u64                   msr_guest_kernel_gs_base;
 #endif
        struct vmcs          *vmcs;
+       struct msr_autoload {
+               unsigned nr;
+               struct vmx_msr_entry guest[NR_AUTOLOAD_MSRS];
+               struct vmx_msr_entry host[NR_AUTOLOAD_MSRS];
+       } msr_autoload;
        struct {
                int           loaded;
                u16           fs_sel, gs_sel, ldt_sel;
                int           gs_ldt_reload_needed;
                int           fs_reload_needed;
-               int           guest_efer_loaded;
        } host_state;
        struct {
                int vm86_active;
-               u8 save_iopl;
+               ulong save_rflags;
                struct kvm_save_segment {
                        u16 selector;
                        unsigned long base;
@@ -132,6 +162,8 @@ struct vcpu_vmx {
        ktime_t entry_time;
        s64 vnmi_blocked_time;
        u32 exit_reason;
+
+       bool rdtscp_enabled;
 };
 
 static inline struct vcpu_vmx *to_vmx(struct kvm_vcpu *vcpu)
@@ -194,6 +226,8 @@ static struct kvm_vmx_segment_field {
        VMX_SEGMENT_FIELD(LDTR),
 };
 
+static u64 host_efer;
+
 static void ept_save_pdptrs(struct kvm_vcpu *vcpu);
 
 /*
@@ -202,78 +236,62 @@ static void ept_save_pdptrs(struct kvm_vcpu *vcpu);
  */
 static const u32 vmx_msr_index[] = {
 #ifdef CONFIG_X86_64
-       MSR_SYSCALL_MASK, MSR_LSTAR, MSR_CSTAR, MSR_KERNEL_GS_BASE,
+       MSR_SYSCALL_MASK, MSR_LSTAR, MSR_CSTAR,
 #endif
-       MSR_EFER, MSR_K6_STAR,
+       MSR_EFER, MSR_TSC_AUX, MSR_K6_STAR,
 };
 #define NR_VMX_MSR ARRAY_SIZE(vmx_msr_index)
 
-static void load_msrs(struct kvm_msr_entry *e, int n)
-{
-       int i;
-
-       for (i = 0; i < n; ++i)
-               wrmsrl(e[i].index, e[i].data);
-}
-
-static void save_msrs(struct kvm_msr_entry *e, int n)
-{
-       int i;
-
-       for (i = 0; i < n; ++i)
-               rdmsrl(e[i].index, e[i].data);
-}
-
-static inline int is_page_fault(u32 intr_info)
+static inline bool is_page_fault(u32 intr_info)
 {
        return (intr_info & (INTR_INFO_INTR_TYPE_MASK | INTR_INFO_VECTOR_MASK |
                             INTR_INFO_VALID_MASK)) ==
                (INTR_TYPE_HARD_EXCEPTION | PF_VECTOR | INTR_INFO_VALID_MASK);
 }
 
-static inline int is_no_device(u32 intr_info)
+static inline bool is_no_device(u32 intr_info)
 {
        return (intr_info & (INTR_INFO_INTR_TYPE_MASK | INTR_INFO_VECTOR_MASK |
                             INTR_INFO_VALID_MASK)) ==
                (INTR_TYPE_HARD_EXCEPTION | NM_VECTOR | INTR_INFO_VALID_MASK);
 }
 
-static inline int is_invalid_opcode(u32 intr_info)
+static inline bool is_invalid_opcode(u32 intr_info)
 {
        return (intr_info & (INTR_INFO_INTR_TYPE_MASK | INTR_INFO_VECTOR_MASK |
                             INTR_INFO_VALID_MASK)) ==
                (INTR_TYPE_HARD_EXCEPTION | UD_VECTOR | INTR_INFO_VALID_MASK);
 }
 
-static inline int is_external_interrupt(u32 intr_info)
+static inline bool is_external_interrupt(u32 intr_info)
 {
        return (intr_info & (INTR_INFO_INTR_TYPE_MASK | INTR_INFO_VALID_MASK))
                == (INTR_TYPE_EXT_INTR | INTR_INFO_VALID_MASK);
 }
 
-static inline int is_machine_check(u32 intr_info)
+static inline bool is_machine_check(u32 intr_info)
 {
        return (intr_info & (INTR_INFO_INTR_TYPE_MASK | INTR_INFO_VECTOR_MASK |
                             INTR_INFO_VALID_MASK)) ==
                (INTR_TYPE_HARD_EXCEPTION | MC_VECTOR | INTR_INFO_VALID_MASK);
 }
 
-static inline int cpu_has_vmx_msr_bitmap(void)
+static inline bool cpu_has_vmx_msr_bitmap(void)
 {
        return vmcs_config.cpu_based_exec_ctrl & CPU_BASED_USE_MSR_BITMAPS;
 }
 
-static inline int cpu_has_vmx_tpr_shadow(void)
+static inline bool cpu_has_vmx_tpr_shadow(void)
 {
        return vmcs_config.cpu_based_exec_ctrl & CPU_BASED_TPR_SHADOW;
 }
 
-static inline int vm_need_tpr_shadow(struct kvm *kvm)
+static inline bool vm_need_tpr_shadow(struct kvm *kvm)
 {
        return (cpu_has_vmx_tpr_shadow()) && (irqchip_in_kernel(kvm));
 }
 
-static inline int cpu_has_secondary_exec_ctrls(void)
+static inline bool cpu_has_secondary_exec_ctrls(void)
 {
        return vmcs_config.cpu_based_exec_ctrl &
                CPU_BASED_ACTIVATE_SECONDARY_CONTROLS;
@@ -293,71 +311,80 @@ static inline bool cpu_has_vmx_flexpriority(void)
 
 static inline bool cpu_has_vmx_ept_execute_only(void)
 {
-       return !!(vmx_capability.ept & VMX_EPT_EXECUTE_ONLY_BIT);
+       return vmx_capability.ept & VMX_EPT_EXECUTE_ONLY_BIT;
 }
 
 static inline bool cpu_has_vmx_eptp_uncacheable(void)
 {
-       return !!(vmx_capability.ept & VMX_EPTP_UC_BIT);
+       return vmx_capability.ept & VMX_EPTP_UC_BIT;
 }
 
 static inline bool cpu_has_vmx_eptp_writeback(void)
 {
-       return !!(vmx_capability.ept & VMX_EPTP_WB_BIT);
+       return vmx_capability.ept & VMX_EPTP_WB_BIT;
 }
 
 static inline bool cpu_has_vmx_ept_2m_page(void)
 {
-       return !!(vmx_capability.ept & VMX_EPT_2MB_PAGE_BIT);
+       return vmx_capability.ept & VMX_EPT_2MB_PAGE_BIT;
+}
+
+static inline bool cpu_has_vmx_ept_1g_page(void)
+{
+       return vmx_capability.ept & VMX_EPT_1GB_PAGE_BIT;
 }
 
-static inline int cpu_has_vmx_invept_individual_addr(void)
+static inline bool cpu_has_vmx_invept_individual_addr(void)
 {
-       return !!(vmx_capability.ept & VMX_EPT_EXTENT_INDIVIDUAL_BIT);
+       return vmx_capability.ept & VMX_EPT_EXTENT_INDIVIDUAL_BIT;
 }
 
-static inline int cpu_has_vmx_invept_context(void)
+static inline bool cpu_has_vmx_invept_context(void)
 {
-       return !!(vmx_capability.ept & VMX_EPT_EXTENT_CONTEXT_BIT);
+       return vmx_capability.ept & VMX_EPT_EXTENT_CONTEXT_BIT;
 }
 
-static inline int cpu_has_vmx_invept_global(void)
+static inline bool cpu_has_vmx_invept_global(void)
 {
-       return !!(vmx_capability.ept & VMX_EPT_EXTENT_GLOBAL_BIT);
+       return vmx_capability.ept & VMX_EPT_EXTENT_GLOBAL_BIT;
 }
 
-static inline int cpu_has_vmx_ept(void)
+static inline bool cpu_has_vmx_ept(void)
 {
        return vmcs_config.cpu_based_2nd_exec_ctrl &
                SECONDARY_EXEC_ENABLE_EPT;
 }
 
-static inline int cpu_has_vmx_unrestricted_guest(void)
+static inline bool cpu_has_vmx_unrestricted_guest(void)
 {
        return vmcs_config.cpu_based_2nd_exec_ctrl &
                SECONDARY_EXEC_UNRESTRICTED_GUEST;
 }
 
-static inline int cpu_has_vmx_ple(void)
+static inline bool cpu_has_vmx_ple(void)
 {
        return vmcs_config.cpu_based_2nd_exec_ctrl &
                SECONDARY_EXEC_PAUSE_LOOP_EXITING;
 }
 
-static inline int vm_need_virtualize_apic_accesses(struct kvm *kvm)
+static inline bool vm_need_virtualize_apic_accesses(struct kvm *kvm)
 {
-       return flexpriority_enabled &&
-               (cpu_has_vmx_virtualize_apic_accesses()) &&
-               (irqchip_in_kernel(kvm));
+       return flexpriority_enabled && irqchip_in_kernel(kvm);
 }
 
-static inline int cpu_has_vmx_vpid(void)
+static inline bool cpu_has_vmx_vpid(void)
 {
        return vmcs_config.cpu_based_2nd_exec_ctrl &
                SECONDARY_EXEC_ENABLE_VPID;
 }
 
-static inline int cpu_has_virtual_nmis(void)
+static inline bool cpu_has_vmx_rdtscp(void)
+{
+       return vmcs_config.cpu_based_2nd_exec_ctrl &
+               SECONDARY_EXEC_RDTSCP;
+}
+
+static inline bool cpu_has_virtual_nmis(void)
 {
        return vmcs_config.pin_based_exec_ctrl & PIN_BASED_VIRTUAL_NMIS;
 }
@@ -372,7 +399,7 @@ static int __find_msr_index(struct vcpu_vmx *vmx, u32 msr)
        int i;
 
        for (i = 0; i < vmx->nmsrs; ++i)
-               if (vmx->guest_msrs[i].index == msr)
+               if (vmx_msr_index[vmx->guest_msrs[i].index] == msr)
                        return i;
        return -1;
 }
@@ -403,7 +430,7 @@ static inline void __invept(int ext, u64 eptp, gpa_t gpa)
                        : : "a" (&operand), "c" (ext) : "cc", "memory");
 }
 
-static struct kvm_msr_entry *find_msr_entry(struct vcpu_vmx *vmx, u32 msr)
+static struct shared_msr_entry *find_msr_entry(struct vcpu_vmx *vmx, u32 msr)
 {
        int i;
 
@@ -561,50 +588,81 @@ static void update_exception_bitmap(struct kvm_vcpu *vcpu)
 {
        u32 eb;
 
-       eb = (1u << PF_VECTOR) | (1u << UD_VECTOR) | (1u << MC_VECTOR);
-       if (!vcpu->fpu_active)
-               eb |= 1u << NM_VECTOR;
-       /*
-        * Unconditionally intercept #DB so we can maintain dr6 without
-        * reading it every exit.
-        */
-       eb |= 1u << DB_VECTOR;
-       if (vcpu->guest_debug & KVM_GUESTDBG_ENABLE) {
-               if (vcpu->guest_debug & KVM_GUESTDBG_USE_SW_BP)
-                       eb |= 1u << BP_VECTOR;
-       }
+       eb = (1u << PF_VECTOR) | (1u << UD_VECTOR) | (1u << MC_VECTOR) |
+            (1u << NM_VECTOR) | (1u << DB_VECTOR);
+       if ((vcpu->guest_debug &
+            (KVM_GUESTDBG_ENABLE | KVM_GUESTDBG_USE_SW_BP)) ==
+           (KVM_GUESTDBG_ENABLE | KVM_GUESTDBG_USE_SW_BP))
+               eb |= 1u << BP_VECTOR;
        if (to_vmx(vcpu)->rmode.vm86_active)
                eb = ~0;
        if (enable_ept)
                eb &= ~(1u << PF_VECTOR); /* bypass_guest_pf = 0 */
+       if (vcpu->fpu_active)
+               eb &= ~(1u << NM_VECTOR);
        vmcs_write32(EXCEPTION_BITMAP, eb);
 }
 
+static void clear_atomic_switch_msr(struct vcpu_vmx *vmx, unsigned msr)
+{
+       unsigned i;
+       struct msr_autoload *m = &vmx->msr_autoload;
+
+       for (i = 0; i < m->nr; ++i)
+               if (m->guest[i].index == msr)
+                       break;
+
+       if (i == m->nr)
+               return;
+       --m->nr;
+       m->guest[i] = m->guest[m->nr];
+       m->host[i] = m->host[m->nr];
+       vmcs_write32(VM_ENTRY_MSR_LOAD_COUNT, m->nr);
+       vmcs_write32(VM_EXIT_MSR_LOAD_COUNT, m->nr);
+}
+
+static void add_atomic_switch_msr(struct vcpu_vmx *vmx, unsigned msr,
+                                 u64 guest_val, u64 host_val)
+{
+       unsigned i;
+       struct msr_autoload *m = &vmx->msr_autoload;
+
+       for (i = 0; i < m->nr; ++i)
+               if (m->guest[i].index == msr)
+                       break;
+
+       if (i == m->nr) {
+               ++m->nr;
+               vmcs_write32(VM_ENTRY_MSR_LOAD_COUNT, m->nr);
+               vmcs_write32(VM_EXIT_MSR_LOAD_COUNT, m->nr);
+       }
+
+       m->guest[i].index = msr;
+       m->guest[i].value = guest_val;
+       m->host[i].index = msr;
+       m->host[i].value = host_val;
+}
+
 static void reload_tss(void)
 {
        /*
         * VT restores TR but not its size.  Useless.
         */
-       struct descriptor_table gdt;
+       struct desc_ptr gdt;
        struct desc_struct *descs;
 
-       kvm_get_gdt(&gdt);
-       descs = (void *)gdt.base;
+       native_store_gdt(&gdt);
+       descs = (void *)gdt.address;
        descs[GDT_ENTRY_TSS].type = 9; /* available TSS */
        load_TR_desc();
 }
 
-static void load_transition_efer(struct vcpu_vmx *vmx)
+static bool update_transition_efer(struct vcpu_vmx *vmx, int efer_offset)
 {
-       int efer_offset = vmx->msr_offset_efer;
-       u64 host_efer;
        u64 guest_efer;
        u64 ignore_bits;
 
-       if (efer_offset < 0)
-               return;
-       host_efer = vmx->host_msrs[efer_offset].data;
-       guest_efer = vmx->guest_msrs[efer_offset].data;
+       guest_efer = vmx->vcpu.arch.efer;
 
        /*
         * NX is emulated; LMA and LME handled by hardware; SCE meaninless
@@ -617,27 +675,65 @@ static void load_transition_efer(struct vcpu_vmx *vmx)
        if (guest_efer & EFER_LMA)
                ignore_bits &= ~(u64)EFER_SCE;
 #endif
-       if ((guest_efer & ~ignore_bits) == (host_efer & ~ignore_bits))
-               return;
-
-       vmx->host_state.guest_efer_loaded = 1;
        guest_efer &= ~ignore_bits;
        guest_efer |= host_efer & ignore_bits;
-       wrmsrl(MSR_EFER, guest_efer);
-       vmx->vcpu.stat.efer_reload++;
+       vmx->guest_msrs[efer_offset].data = guest_efer;
+       vmx->guest_msrs[efer_offset].mask = ~ignore_bits;
+
+       clear_atomic_switch_msr(vmx, MSR_EFER);
+       /* On ept, can't emulate nx, and must switch nx atomically */
+       if (enable_ept && ((vmx->vcpu.arch.efer ^ host_efer) & EFER_NX)) {
+               guest_efer = vmx->vcpu.arch.efer;
+               if (!(guest_efer & EFER_LMA))
+                       guest_efer &= ~EFER_LME;
+               add_atomic_switch_msr(vmx, MSR_EFER, guest_efer, host_efer);
+               return false;
+       }
+
+       return true;
 }
 
-static void reload_host_efer(struct vcpu_vmx *vmx)
+static unsigned long segment_base(u16 selector)
 {
-       if (vmx->host_state.guest_efer_loaded) {
-               vmx->host_state.guest_efer_loaded = 0;
-               load_msrs(vmx->host_msrs + vmx->msr_offset_efer, 1);
+       struct desc_ptr gdt;
+       struct desc_struct *d;
+       unsigned long table_base;
+       unsigned long v;
+
+       if (!(selector & ~3))
+               return 0;
+
+       native_store_gdt(&gdt);
+       table_base = gdt.address;
+
+       if (selector & 4) {           /* from ldt */
+               u16 ldt_selector = kvm_read_ldt();
+
+               if (!(ldt_selector & ~3))
+                       return 0;
+
+               table_base = segment_base(ldt_selector);
        }
+       d = (struct desc_struct *)(table_base + (selector & ~7));
+       v = get_desc_base(d);
+#ifdef CONFIG_X86_64
+       if (d->s == 0 && (d->type == 2 || d->type == 9 || d->type == 11))
+               v |= ((unsigned long)((struct ldttss_desc64 *)d)->base3) << 32;
+#endif
+       return v;
+}
+
+static inline unsigned long kvm_read_tr_base(void)
+{
+       u16 tr;
+       asm("str %0" : "=g"(tr));
+       return segment_base(tr);
 }
 
 static void vmx_save_host_state(struct kvm_vcpu *vcpu)
 {
        struct vcpu_vmx *vmx = to_vmx(vcpu);
+       int i;
 
        if (vmx->host_state.loaded)
                return;
@@ -674,13 +770,15 @@ static void vmx_save_host_state(struct kvm_vcpu *vcpu)
 #endif
 
 #ifdef CONFIG_X86_64
-       if (is_long_mode(&vmx->vcpu))
-               save_msrs(vmx->host_msrs +
-                         vmx->msr_offset_kernel_gs_base, 1);
-
+       if (is_long_mode(&vmx->vcpu)) {
+               rdmsrl(MSR_KERNEL_GS_BASE, vmx->msr_host_kernel_gs_base);
+               wrmsrl(MSR_KERNEL_GS_BASE, vmx->msr_guest_kernel_gs_base);
+       }
 #endif
-       load_msrs(vmx->guest_msrs, vmx->save_nmsrs);
-       load_transition_efer(vmx);
+       for (i = 0; i < vmx->save_nmsrs; ++i)
+               kvm_set_shared_msr(vmx->guest_msrs[i].index,
+                                  vmx->guest_msrs[i].data,
+                                  vmx->guest_msrs[i].mask);
 }
 
 static void __vmx_load_host_state(struct vcpu_vmx *vmx)
@@ -708,9 +806,12 @@ static void __vmx_load_host_state(struct vcpu_vmx *vmx)
                local_irq_restore(flags);
        }
        reload_tss();
-       save_msrs(vmx->guest_msrs, vmx->save_nmsrs);
-       load_msrs(vmx->host_msrs, vmx->save_nmsrs);
-       reload_host_efer(vmx);
+#ifdef CONFIG_X86_64
+       if (is_long_mode(&vmx->vcpu)) {
+               rdmsrl(MSR_KERNEL_GS_BASE, vmx->msr_guest_kernel_gs_base);
+               wrmsrl(MSR_KERNEL_GS_BASE, vmx->msr_host_kernel_gs_base);
+       }
+#endif
 }
 
 static void vmx_load_host_state(struct vcpu_vmx *vmx)
@@ -753,7 +854,7 @@ static void vmx_vcpu_load(struct kvm_vcpu *vcpu, int cpu)
        }
 
        if (vcpu->cpu != cpu) {
-               struct descriptor_table dt;
+               struct desc_ptr dt;
                unsigned long sysenter_esp;
 
                vcpu->cpu = cpu;
@@ -762,8 +863,8 @@ static void vmx_vcpu_load(struct kvm_vcpu *vcpu, int cpu)
                 * processors.
                 */
                vmcs_writel(HOST_TR_BASE, kvm_read_tr_base()); /* 22.2.4 */
-               kvm_get_gdt(&dt);
-               vmcs_writel(HOST_GDTR_BASE, dt.base);   /* 22.2.4 */
+               native_store_gdt(&dt);
+               vmcs_writel(HOST_GDTR_BASE, dt.address);   /* 22.2.4 */
 
                rdmsrl(MSR_IA32_SYSENTER_ESP, sysenter_esp);
                vmcs_writel(HOST_IA32_SYSENTER_ESP, sysenter_esp); /* 22.2.3 */
@@ -787,38 +888,51 @@ static void vmx_vcpu_put(struct kvm_vcpu *vcpu)
 
 static void vmx_fpu_activate(struct kvm_vcpu *vcpu)
 {
+       ulong cr0;
+
        if (vcpu->fpu_active)
                return;
        vcpu->fpu_active = 1;
-       vmcs_clear_bits(GUEST_CR0, X86_CR0_TS);
-       if (vcpu->arch.cr0 & X86_CR0_TS)
-               vmcs_set_bits(GUEST_CR0, X86_CR0_TS);
+       cr0 = vmcs_readl(GUEST_CR0);
+       cr0 &= ~(X86_CR0_TS | X86_CR0_MP);
+       cr0 |= kvm_read_cr0_bits(vcpu, X86_CR0_TS | X86_CR0_MP);
+       vmcs_writel(GUEST_CR0, cr0);
        update_exception_bitmap(vcpu);
+       vcpu->arch.cr0_guest_owned_bits = X86_CR0_TS;
+       vmcs_writel(CR0_GUEST_HOST_MASK, ~vcpu->arch.cr0_guest_owned_bits);
 }
 
+static void vmx_decache_cr0_guest_bits(struct kvm_vcpu *vcpu);
+
 static void vmx_fpu_deactivate(struct kvm_vcpu *vcpu)
 {
-       if (!vcpu->fpu_active)
-               return;
-       vcpu->fpu_active = 0;
-       vmcs_set_bits(GUEST_CR0, X86_CR0_TS);
+       vmx_decache_cr0_guest_bits(vcpu);
+       vmcs_set_bits(GUEST_CR0, X86_CR0_TS | X86_CR0_MP);
        update_exception_bitmap(vcpu);
+       vcpu->arch.cr0_guest_owned_bits = 0;
+       vmcs_writel(CR0_GUEST_HOST_MASK, ~vcpu->arch.cr0_guest_owned_bits);
+       vmcs_writel(CR0_READ_SHADOW, vcpu->arch.cr0);
 }
 
 static unsigned long vmx_get_rflags(struct kvm_vcpu *vcpu)
 {
-       unsigned long rflags;
+       unsigned long rflags, save_rflags;
 
        rflags = vmcs_readl(GUEST_RFLAGS);
-       if (to_vmx(vcpu)->rmode.vm86_active)
-               rflags &= ~(unsigned long)(X86_EFLAGS_IOPL | X86_EFLAGS_VM);
+       if (to_vmx(vcpu)->rmode.vm86_active) {
+               rflags &= RMODE_GUEST_OWNED_EFLAGS_BITS;
+               save_rflags = to_vmx(vcpu)->rmode.save_rflags;
+               rflags |= save_rflags & ~RMODE_GUEST_OWNED_EFLAGS_BITS;
+       }
        return rflags;
 }
 
 static void vmx_set_rflags(struct kvm_vcpu *vcpu, unsigned long rflags)
 {
-       if (to_vmx(vcpu)->rmode.vm86_active)
+       if (to_vmx(vcpu)->rmode.vm86_active) {
+               to_vmx(vcpu)->rmode.save_rflags = rflags;
                rflags |= X86_EFLAGS_IOPL | X86_EFLAGS_VM;
+       }
        vmcs_writel(GUEST_RFLAGS, rflags);
 }
 
@@ -828,9 +942,9 @@ static u32 vmx_get_interrupt_shadow(struct kvm_vcpu *vcpu, int mask)
        int ret = 0;
 
        if (interruptibility & GUEST_INTR_STATE_STI)
-               ret |= X86_SHADOW_INT_STI;
+               ret |= KVM_X86_SHADOW_INT_STI;
        if (interruptibility & GUEST_INTR_STATE_MOV_SS)
-               ret |= X86_SHADOW_INT_MOV_SS;
+               ret |= KVM_X86_SHADOW_INT_MOV_SS;
 
        return ret & mask;
 }
@@ -842,9 +956,9 @@ static void vmx_set_interrupt_shadow(struct kvm_vcpu *vcpu, int mask)
 
        interruptibility &= ~(GUEST_INTR_STATE_STI | GUEST_INTR_STATE_MOV_SS);
 
-       if (mask & X86_SHADOW_INT_MOV_SS)
+       if (mask & KVM_X86_SHADOW_INT_MOV_SS)
                interruptibility |= GUEST_INTR_STATE_MOV_SS;
-       if (mask & X86_SHADOW_INT_STI)
+       else if (mask & KVM_X86_SHADOW_INT_STI)
                interruptibility |= GUEST_INTR_STATE_STI;
 
        if ((interruptibility != interruptibility_old))
@@ -864,7 +978,8 @@ static void skip_emulated_instruction(struct kvm_vcpu *vcpu)
 }
 
 static void vmx_queue_exception(struct kvm_vcpu *vcpu, unsigned nr,
-                               bool has_error_code, u32 error_code)
+                               bool has_error_code, u32 error_code,
+                               bool reinject)
 {
        struct vcpu_vmx *vmx = to_vmx(vcpu);
        u32 intr_info = nr | INTR_INFO_VALID_MASK;
@@ -898,22 +1013,22 @@ static void vmx_queue_exception(struct kvm_vcpu *vcpu, unsigned nr,
        vmcs_write32(VM_ENTRY_INTR_INFO_FIELD, intr_info);
 }
 
+static bool vmx_rdtscp_supported(void)
+{
+       return cpu_has_vmx_rdtscp();
+}
+
 /*
  * Swap MSR entry in host/guest MSR entry array.
  */
-#ifdef CONFIG_X86_64
 static void move_msr_up(struct vcpu_vmx *vmx, int from, int to)
 {
-       struct kvm_msr_entry tmp;
+       struct shared_msr_entry tmp;
 
        tmp = vmx->guest_msrs[to];
        vmx->guest_msrs[to] = vmx->guest_msrs[from];
        vmx->guest_msrs[from] = tmp;
-       tmp = vmx->host_msrs[to];
-       vmx->host_msrs[to] = vmx->host_msrs[from];
-       vmx->host_msrs[from] = tmp;
 }
-#endif
 
 /*
  * Set up the vmcs to automatically save and restore system
@@ -922,15 +1037,13 @@ static void move_msr_up(struct vcpu_vmx *vmx, int from, int to)
  */
 static void setup_msrs(struct vcpu_vmx *vmx)
 {
-       int save_nmsrs;
+       int save_nmsrs, index;
        unsigned long *msr_bitmap;
 
        vmx_load_host_state(vmx);
        save_nmsrs = 0;
 #ifdef CONFIG_X86_64
        if (is_long_mode(&vmx->vcpu)) {
-               int index;
-
                index = __find_msr_index(vmx, MSR_SYSCALL_MASK);
                if (index >= 0)
                        move_msr_up(vmx, index, save_nmsrs++);
@@ -940,25 +1053,23 @@ static void setup_msrs(struct vcpu_vmx *vmx)
                index = __find_msr_index(vmx, MSR_CSTAR);
                if (index >= 0)
                        move_msr_up(vmx, index, save_nmsrs++);
-               index = __find_msr_index(vmx, MSR_KERNEL_GS_BASE);
-               if (index >= 0)
+               index = __find_msr_index(vmx, MSR_TSC_AUX);
+               if (index >= 0 && vmx->rdtscp_enabled)
                        move_msr_up(vmx, index, save_nmsrs++);
                /*
                 * MSR_K6_STAR is only needed on long mode guests, and only
                 * if efer.sce is enabled.
                 */
                index = __find_msr_index(vmx, MSR_K6_STAR);
-               if ((index >= 0) && (vmx->vcpu.arch.shadow_efer & EFER_SCE))
+               if ((index >= 0) && (vmx->vcpu.arch.efer & EFER_SCE))
                        move_msr_up(vmx, index, save_nmsrs++);
        }
 #endif
-       vmx->save_nmsrs = save_nmsrs;
+       index = __find_msr_index(vmx, MSR_EFER);
+       if (index >= 0 && update_transition_efer(vmx, index))
+               move_msr_up(vmx, index, save_nmsrs++);
 
-#ifdef CONFIG_X86_64
-       vmx->msr_offset_kernel_gs_base =
-               __find_msr_index(vmx, MSR_KERNEL_GS_BASE);
-#endif
-       vmx->msr_offset_efer = __find_msr_index(vmx, MSR_EFER);
+       vmx->save_nmsrs = save_nmsrs;
 
        if (cpu_has_vmx_msr_bitmap()) {
                if (is_long_mode(&vmx->vcpu))
@@ -1000,7 +1111,7 @@ static void guest_write_tsc(u64 guest_tsc, u64 host_tsc)
 static int vmx_get_msr(struct kvm_vcpu *vcpu, u32 msr_index, u64 *pdata)
 {
        u64 data;
-       struct kvm_msr_entry *msr;
+       struct shared_msr_entry *msr;
 
        if (!pdata) {
                printk(KERN_ERR "BUG: get_msr called with NULL pdata\n");
@@ -1015,9 +1126,13 @@ static int vmx_get_msr(struct kvm_vcpu *vcpu, u32 msr_index, u64 *pdata)
        case MSR_GS_BASE:
                data = vmcs_readl(GUEST_GS_BASE);
                break;
+       case MSR_KERNEL_GS_BASE:
+               vmx_load_host_state(to_vmx(vcpu));
+               data = to_vmx(vcpu)->msr_guest_kernel_gs_base;
+               break;
+#endif
        case MSR_EFER:
                return kvm_get_msr_common(vcpu, msr_index, pdata);
-#endif
        case MSR_IA32_TSC:
                data = guest_read_tsc();
                break;
@@ -1030,7 +1145,12 @@ static int vmx_get_msr(struct kvm_vcpu *vcpu, u32 msr_index, u64 *pdata)
        case MSR_IA32_SYSENTER_ESP:
                data = vmcs_readl(GUEST_SYSENTER_ESP);
                break;
+       case MSR_TSC_AUX:
+               if (!to_vmx(vcpu)->rdtscp_enabled)
+                       return 1;
+               /* Otherwise falls through */
        default:
+               vmx_load_host_state(to_vmx(vcpu));
                msr = find_msr_entry(to_vmx(vcpu), msr_index);
                if (msr) {
                        vmx_load_host_state(to_vmx(vcpu));
@@ -1052,7 +1172,7 @@ static int vmx_get_msr(struct kvm_vcpu *vcpu, u32 msr_index, u64 *pdata)
 static int vmx_set_msr(struct kvm_vcpu *vcpu, u32 msr_index, u64 data)
 {
        struct vcpu_vmx *vmx = to_vmx(vcpu);
-       struct kvm_msr_entry *msr;
+       struct shared_msr_entry *msr;
        u64 host_tsc;
        int ret = 0;
 
@@ -1068,6 +1188,10 @@ static int vmx_set_msr(struct kvm_vcpu *vcpu, u32 msr_index, u64 data)
        case MSR_GS_BASE:
                vmcs_writel(GUEST_GS_BASE, data);
                break;
+       case MSR_KERNEL_GS_BASE:
+               vmx_load_host_state(vmx);
+               vmx->msr_guest_kernel_gs_base = data;
+               break;
 #endif
        case MSR_IA32_SYSENTER_CS:
                vmcs_write32(GUEST_SYSENTER_CS, data);
@@ -1088,7 +1212,15 @@ static int vmx_set_msr(struct kvm_vcpu *vcpu, u32 msr_index, u64 data)
                        vcpu->arch.pat = data;
                        break;
                }
-               /* Otherwise falls through to kvm_set_msr_common */
+               ret = kvm_set_msr_common(vcpu, msr_index, data);
+               break;
+       case MSR_TSC_AUX:
+               if (!vmx->rdtscp_enabled)
+                       return 1;
+               /* Check reserved bit, higher 32 bits should be zero */
+               if ((data >> 32) != 0)
+                       return 1;
+               /* Otherwise falls through */
        default:
                msr = find_msr_entry(vmx, msr_index);
                if (msr) {
@@ -1141,9 +1273,16 @@ static __init int vmx_disabled_by_bios(void)
        u64 msr;
 
        rdmsrl(MSR_IA32_FEATURE_CONTROL, msr);
-       return (msr & (FEATURE_CONTROL_LOCKED |
-                      FEATURE_CONTROL_VMXON_ENABLED))
-           == FEATURE_CONTROL_LOCKED;
+       if (msr & FEATURE_CONTROL_LOCKED) {
+               if (!(msr & FEATURE_CONTROL_VMXON_ENABLED_INSIDE_SMX)
+                       && tboot_enabled())
+                       return 1;
+               if (!(msr & FEATURE_CONTROL_VMXON_ENABLED_OUTSIDE_SMX)
+                       && !tboot_enabled())
+                       return 1;
+       }
+
+       return 0;
        /* locked but not enabled */
 }
 
@@ -1151,21 +1290,23 @@ static int hardware_enable(void *garbage)
 {
        int cpu = raw_smp_processor_id();
        u64 phys_addr = __pa(per_cpu(vmxarea, cpu));
-       u64 old;
+       u64 old, test_bits;
 
        if (read_cr4() & X86_CR4_VMXE)
                return -EBUSY;
 
        INIT_LIST_HEAD(&per_cpu(vcpus_on_cpu, cpu));
        rdmsrl(MSR_IA32_FEATURE_CONTROL, old);
-       if ((old & (FEATURE_CONTROL_LOCKED |
-                   FEATURE_CONTROL_VMXON_ENABLED))
-           != (FEATURE_CONTROL_LOCKED |
-               FEATURE_CONTROL_VMXON_ENABLED))
+
+       test_bits = FEATURE_CONTROL_LOCKED;
+       test_bits |= FEATURE_CONTROL_VMXON_ENABLED_OUTSIDE_SMX;
+       if (tboot_enabled())
+               test_bits |= FEATURE_CONTROL_VMXON_ENABLED_INSIDE_SMX;
+
+       if ((old & test_bits) != test_bits) {
                /* enable and lock */
-               wrmsrl(MSR_IA32_FEATURE_CONTROL, old |
-                      FEATURE_CONTROL_LOCKED |
-                      FEATURE_CONTROL_VMXON_ENABLED);
+               wrmsrl(MSR_IA32_FEATURE_CONTROL, old | test_bits);
+       }
        write_cr4(read_cr4() | X86_CR4_VMXE); /* FIXME: not cpu hotplug safe */
        asm volatile (ASM_VMX_VMXON_RAX
                      : : "a"(&phys_addr), "m"(phys_addr)
@@ -1247,6 +1388,8 @@ static __init int setup_vmcs_config(struct vmcs_config *vmcs_conf)
              CPU_BASED_USE_IO_BITMAPS |
              CPU_BASED_MOV_DR_EXITING |
              CPU_BASED_USE_TSC_OFFSETING |
+             CPU_BASED_MWAIT_EXITING |
+             CPU_BASED_MONITOR_EXITING |
              CPU_BASED_INVLPG_EXITING;
        opt = CPU_BASED_TPR_SHADOW |
              CPU_BASED_USE_MSR_BITMAPS |
@@ -1266,7 +1409,8 @@ static __init int setup_vmcs_config(struct vmcs_config *vmcs_conf)
                        SECONDARY_EXEC_ENABLE_VPID |
                        SECONDARY_EXEC_ENABLE_EPT |
                        SECONDARY_EXEC_UNRESTRICTED_GUEST |
-                       SECONDARY_EXEC_PAUSE_LOOP_EXITING;
+                       SECONDARY_EXEC_PAUSE_LOOP_EXITING |
+                       SECONDARY_EXEC_RDTSCP;
                if (adjust_vmx_controls(min2, opt2,
                                        MSR_IA32_VMX_PROCBASED_CTLS2,
                                        &_cpu_based_2nd_exec_control) < 0)
@@ -1452,8 +1596,8 @@ static void enter_pmode(struct kvm_vcpu *vcpu)
        vmcs_write32(GUEST_TR_AR_BYTES, vmx->rmode.tr.ar);
 
        flags = vmcs_readl(GUEST_RFLAGS);
-       flags &= ~(X86_EFLAGS_IOPL | X86_EFLAGS_VM);
-       flags |= (vmx->rmode.save_iopl << IOPL_SHIFT);
+       flags &= RMODE_GUEST_OWNED_EFLAGS_BITS;
+       flags |= vmx->rmode.save_rflags & ~RMODE_GUEST_OWNED_EFLAGS_BITS;
        vmcs_writel(GUEST_RFLAGS, flags);
 
        vmcs_writel(GUEST_CR4, (vmcs_readl(GUEST_CR4) & ~X86_CR4_VME) |
@@ -1480,8 +1624,12 @@ static void enter_pmode(struct kvm_vcpu *vcpu)
 static gva_t rmode_tss_base(struct kvm *kvm)
 {
        if (!kvm->arch.tss_addr) {
-               gfn_t base_gfn = kvm->memslots[0].base_gfn +
-                                kvm->memslots[0].npages - 3;
+               struct kvm_memslots *slots;
+               gfn_t base_gfn;
+
+               slots = kvm_memslots(kvm);
+               base_gfn = kvm->memslots->memslots[0].base_gfn +
+                                kvm->memslots->memslots[0].npages - 3;
                return base_gfn << PAGE_SHIFT;
        }
        return kvm->arch.tss_addr;
@@ -1522,8 +1670,7 @@ static void enter_rmode(struct kvm_vcpu *vcpu)
        vmcs_write32(GUEST_TR_AR_BYTES, 0x008b);
 
        flags = vmcs_readl(GUEST_RFLAGS);
-       vmx->rmode.save_iopl
-               = (flags & X86_EFLAGS_IOPL) >> IOPL_SHIFT;
+       vmx->rmode.save_rflags = flags;
 
        flags |= X86_EFLAGS_IOPL | X86_EFLAGS_VM;
 
@@ -1557,11 +1704,17 @@ continue_rmode:
 static void vmx_set_efer(struct kvm_vcpu *vcpu, u64 efer)
 {
        struct vcpu_vmx *vmx = to_vmx(vcpu);
-       struct kvm_msr_entry *msr = find_msr_entry(vmx, MSR_EFER);
+       struct shared_msr_entry *msr = find_msr_entry(vmx, MSR_EFER);
 
-       vcpu->arch.shadow_efer = efer;
        if (!msr)
                return;
+
+       /*
+        * Force kernel_gs_base reloading before EFER changes, as control
+        * of this msr depends on is_long_mode().
+        */
+       vmx_load_host_state(to_vmx(vcpu));
+       vcpu->arch.efer = efer;
        if (efer & EFER_LMA) {
                vmcs_write32(VM_ENTRY_CONTROLS,
                             vmcs_read32(VM_ENTRY_CONTROLS) |
@@ -1591,17 +1744,18 @@ static void enter_lmode(struct kvm_vcpu *vcpu)
                             (guest_tr_ar & ~AR_TYPE_MASK)
                             | AR_TYPE_BUSY_64_TSS);
        }
-       vcpu->arch.shadow_efer |= EFER_LMA;
-       vmx_set_efer(vcpu, vcpu->arch.shadow_efer);
+       vcpu->arch.efer |= EFER_LMA;
+       vmx_set_efer(vcpu, vcpu->arch.efer);
 }
 
 static void exit_lmode(struct kvm_vcpu *vcpu)
 {
-       vcpu->arch.shadow_efer &= ~EFER_LMA;
+       vcpu->arch.efer &= ~EFER_LMA;
 
        vmcs_write32(VM_ENTRY_CONTROLS,
                     vmcs_read32(VM_ENTRY_CONTROLS)
                     & ~VM_ENTRY_IA32E_MODE);
+       vmx_set_efer(vcpu, vcpu->arch.efer);
 }
 
 #endif
@@ -1613,10 +1767,20 @@ static void vmx_flush_tlb(struct kvm_vcpu *vcpu)
                ept_sync_context(construct_eptp(vcpu->arch.mmu.root_hpa));
 }
 
+static void vmx_decache_cr0_guest_bits(struct kvm_vcpu *vcpu)
+{
+       ulong cr0_guest_owned_bits = vcpu->arch.cr0_guest_owned_bits;
+
+       vcpu->arch.cr0 &= ~cr0_guest_owned_bits;
+       vcpu->arch.cr0 |= vmcs_readl(GUEST_CR0) & cr0_guest_owned_bits;
+}
+
 static void vmx_decache_cr4_guest_bits(struct kvm_vcpu *vcpu)
 {
-       vcpu->arch.cr4 &= KVM_GUEST_CR4_MASK;
-       vcpu->arch.cr4 |= vmcs_readl(GUEST_CR4) & ~KVM_GUEST_CR4_MASK;
+       ulong cr4_guest_owned_bits = vcpu->arch.cr4_guest_owned_bits;
+
+       vcpu->arch.cr4 &= ~cr4_guest_owned_bits;
+       vcpu->arch.cr4 |= vmcs_readl(GUEST_CR4) & cr4_guest_owned_bits;
 }
 
 static void ept_load_pdptrs(struct kvm_vcpu *vcpu)
@@ -1661,7 +1825,7 @@ static void ept_update_paging_mode_cr0(unsigned long *hw_cr0,
                             (CPU_BASED_CR3_LOAD_EXITING |
                              CPU_BASED_CR3_STORE_EXITING));
                vcpu->arch.cr0 = cr0;
-               vmx_set_cr4(vcpu, vcpu->arch.cr4);
+               vmx_set_cr4(vcpu, kvm_read_cr4(vcpu));
        } else if (!is_paging(vcpu)) {
                /* From nonpaging to paging */
                vmcs_write32(CPU_BASED_VM_EXEC_CONTROL,
@@ -1669,23 +1833,13 @@ static void ept_update_paging_mode_cr0(unsigned long *hw_cr0,
                             ~(CPU_BASED_CR3_LOAD_EXITING |
                               CPU_BASED_CR3_STORE_EXITING));
                vcpu->arch.cr0 = cr0;
-               vmx_set_cr4(vcpu, vcpu->arch.cr4);
+               vmx_set_cr4(vcpu, kvm_read_cr4(vcpu));
        }
 
        if (!(cr0 & X86_CR0_WP))
                *hw_cr0 &= ~X86_CR0_WP;
 }
 
-static void ept_update_paging_mode_cr4(unsigned long *hw_cr4,
-                                       struct kvm_vcpu *vcpu)
-{
-       if (!is_paging(vcpu)) {
-               *hw_cr4 &= ~X86_CR4_PAE;
-               *hw_cr4 |= X86_CR4_PSE;
-       } else if (!(vcpu->arch.cr4 & X86_CR4_PAE))
-               *hw_cr4 &= ~X86_CR4_PAE;
-}
-
 static void vmx_set_cr0(struct kvm_vcpu *vcpu, unsigned long cr0)
 {
        struct vcpu_vmx *vmx = to_vmx(vcpu);
@@ -1697,8 +1851,6 @@ static void vmx_set_cr0(struct kvm_vcpu *vcpu, unsigned long cr0)
        else
                hw_cr0 = (cr0 & ~KVM_GUEST_CR0_MASK) | KVM_VM_CR0_ALWAYS_ON;
 
-       vmx_fpu_deactivate(vcpu);
-
        if (vmx->rmode.vm86_active && (cr0 & X86_CR0_PE))
                enter_pmode(vcpu);
 
@@ -1706,7 +1858,7 @@ static void vmx_set_cr0(struct kvm_vcpu *vcpu, unsigned long cr0)
                enter_rmode(vcpu);
 
 #ifdef CONFIG_X86_64
-       if (vcpu->arch.shadow_efer & EFER_LME) {
+       if (vcpu->arch.efer & EFER_LME) {
                if (!is_paging(vcpu) && (cr0 & X86_CR0_PG))
                        enter_lmode(vcpu);
                if (is_paging(vcpu) && !(cr0 & X86_CR0_PG))
@@ -1717,12 +1869,12 @@ static void vmx_set_cr0(struct kvm_vcpu *vcpu, unsigned long cr0)
        if (enable_ept)
                ept_update_paging_mode_cr0(&hw_cr0, cr0, vcpu);
 
+       if (!vcpu->fpu_active)
+               hw_cr0 |= X86_CR0_TS | X86_CR0_MP;
+
        vmcs_writel(CR0_READ_SHADOW, cr0);
        vmcs_writel(GUEST_CR0, hw_cr0);
        vcpu->arch.cr0 = cr0;
-
-       if (!(cr0 & X86_CR0_TS) || !(cr0 & X86_CR0_PE))
-               vmx_fpu_activate(vcpu);
 }
 
 static u64 construct_eptp(unsigned long root_hpa)
@@ -1748,12 +1900,11 @@ static void vmx_set_cr3(struct kvm_vcpu *vcpu, unsigned long cr3)
                vmcs_write64(EPT_POINTER, eptp);
                guest_cr3 = is_paging(vcpu) ? vcpu->arch.cr3 :
                        vcpu->kvm->arch.ept_identity_map_addr;
+               ept_load_pdptrs(vcpu);
        }
 
        vmx_flush_tlb(vcpu);
        vmcs_writel(GUEST_CR3, guest_cr3);
-       if (vcpu->arch.cr0 & X86_CR0_PE)
-               vmx_fpu_deactivate(vcpu);
 }
 
 static void vmx_set_cr4(struct kvm_vcpu *vcpu, unsigned long cr4)
@@ -1762,8 +1913,14 @@ static void vmx_set_cr4(struct kvm_vcpu *vcpu, unsigned long cr4)
                    KVM_RMODE_VM_CR4_ALWAYS_ON : KVM_PMODE_VM_CR4_ALWAYS_ON);
 
        vcpu->arch.cr4 = cr4;
-       if (enable_ept)
-               ept_update_paging_mode_cr4(&hw_cr4, vcpu);
+       if (enable_ept) {
+               if (!is_paging(vcpu)) {
+                       hw_cr4 &= ~X86_CR4_PAE;
+                       hw_cr4 |= X86_CR4_PSE;
+               } else if (!(cr4 & X86_CR4_PAE)) {
+                       hw_cr4 &= ~X86_CR4_PAE;
+               }
+       }
 
        vmcs_writel(CR4_READ_SHADOW, cr4);
        vmcs_writel(GUEST_CR4, hw_cr4);
@@ -1801,7 +1958,7 @@ static void vmx_get_segment(struct kvm_vcpu *vcpu,
 
 static int vmx_get_cpl(struct kvm_vcpu *vcpu)
 {
-       if (!(vcpu->arch.cr0 & X86_CR0_PE)) /* if real mode */
+       if (!is_protmode(vcpu))
                return 0;
 
        if (vmx_get_rflags(vcpu) & X86_EFLAGS_VM) /* if virtual 8086 */
@@ -1884,28 +2041,28 @@ static void vmx_get_cs_db_l_bits(struct kvm_vcpu *vcpu, int *db, int *l)
        *l = (ar >> 13) & 1;
 }
 
-static void vmx_get_idt(struct kvm_vcpu *vcpu, struct descriptor_table *dt)
+static void vmx_get_idt(struct kvm_vcpu *vcpu, struct desc_ptr *dt)
 {
-       dt->limit = vmcs_read32(GUEST_IDTR_LIMIT);
-       dt->base = vmcs_readl(GUEST_IDTR_BASE);
+       dt->size = vmcs_read32(GUEST_IDTR_LIMIT);
+       dt->address = vmcs_readl(GUEST_IDTR_BASE);
 }
 
-static void vmx_set_idt(struct kvm_vcpu *vcpu, struct descriptor_table *dt)
+static void vmx_set_idt(struct kvm_vcpu *vcpu, struct desc_ptr *dt)
 {
-       vmcs_write32(GUEST_IDTR_LIMIT, dt->limit);
-       vmcs_writel(GUEST_IDTR_BASE, dt->base);
+       vmcs_write32(GUEST_IDTR_LIMIT, dt->size);
+       vmcs_writel(GUEST_IDTR_BASE, dt->address);
 }
 
-static void vmx_get_gdt(struct kvm_vcpu *vcpu, struct descriptor_table *dt)
+static void vmx_get_gdt(struct kvm_vcpu *vcpu, struct desc_ptr *dt)
 {
-       dt->limit = vmcs_read32(GUEST_GDTR_LIMIT);
-       dt->base = vmcs_readl(GUEST_GDTR_BASE);
+       dt->size = vmcs_read32(GUEST_GDTR_LIMIT);
+       dt->address = vmcs_readl(GUEST_GDTR_BASE);
 }
 
-static void vmx_set_gdt(struct kvm_vcpu *vcpu, struct descriptor_table *dt)
+static void vmx_set_gdt(struct kvm_vcpu *vcpu, struct desc_ptr *dt)
 {
-       vmcs_write32(GUEST_GDTR_LIMIT, dt->limit);
-       vmcs_writel(GUEST_GDTR_BASE, dt->base);
+       vmcs_write32(GUEST_GDTR_LIMIT, dt->size);
+       vmcs_writel(GUEST_GDTR_BASE, dt->address);
 }
 
 static bool rmode_segment_valid(struct kvm_vcpu *vcpu, int seg)
@@ -2056,7 +2213,7 @@ static bool cs_ss_rpl_check(struct kvm_vcpu *vcpu)
 static bool guest_state_valid(struct kvm_vcpu *vcpu)
 {
        /* real mode guest state checks */
-       if (!(vcpu->arch.cr0 & X86_CR0_PE)) {
+       if (!is_protmode(vcpu)) {
                if (!rmode_segment_valid(vcpu, VCPU_SREG_CS))
                        return false;
                if (!rmode_segment_valid(vcpu, VCPU_SREG_SS))
@@ -2189,7 +2346,7 @@ static int alloc_apic_access_page(struct kvm *kvm)
        struct kvm_userspace_memory_region kvm_userspace_mem;
        int r = 0;
 
-       down_write(&kvm->slots_lock);
+       mutex_lock(&kvm->slots_lock);
        if (kvm->arch.apic_access_page)
                goto out;
        kvm_userspace_mem.slot = APIC_ACCESS_PAGE_PRIVATE_MEMSLOT;
@@ -2202,7 +2359,7 @@ static int alloc_apic_access_page(struct kvm *kvm)
 
        kvm->arch.apic_access_page = gfn_to_page(kvm, 0xfee00);
 out:
-       up_write(&kvm->slots_lock);
+       mutex_unlock(&kvm->slots_lock);
        return r;
 }
 
@@ -2211,7 +2368,7 @@ static int alloc_identity_pagetable(struct kvm *kvm)
        struct kvm_userspace_memory_region kvm_userspace_mem;
        int r = 0;
 
-       down_write(&kvm->slots_lock);
+       mutex_lock(&kvm->slots_lock);
        if (kvm->arch.ept_identity_pagetable)
                goto out;
        kvm_userspace_mem.slot = IDENTITY_PAGETABLE_PRIVATE_MEMSLOT;
@@ -2226,7 +2383,7 @@ static int alloc_identity_pagetable(struct kvm *kvm)
        kvm->arch.ept_identity_pagetable = gfn_to_page(kvm,
                        kvm->arch.ept_identity_map_addr >> PAGE_SHIFT);
 out:
-       up_write(&kvm->slots_lock);
+       mutex_unlock(&kvm->slots_lock);
        return r;
 }
 
@@ -2246,6 +2403,16 @@ static void allocate_vpid(struct vcpu_vmx *vmx)
        spin_unlock(&vmx_vpid_lock);
 }
 
+static void free_vpid(struct vcpu_vmx *vmx)
+{
+       if (!enable_vpid)
+               return;
+       spin_lock(&vmx_vpid_lock);
+       if (vmx->vpid != 0)
+               __clear_bit(vmx->vpid, vmx_vpid_bitmap);
+       spin_unlock(&vmx_vpid_lock);
+}
+
 static void __vmx_disable_intercept_for_msr(unsigned long *msr_bitmap, u32 msr)
 {
        int f = sizeof(unsigned long);
@@ -2284,7 +2451,7 @@ static int vmx_vcpu_setup(struct vcpu_vmx *vmx)
        u32 junk;
        u64 host_pat, tsc_this, tsc_base;
        unsigned long a;
-       struct descriptor_table dt;
+       struct desc_ptr dt;
        int i;
        unsigned long kvm_vmx_return;
        u32 exec_control;
@@ -2323,8 +2490,10 @@ static int vmx_vcpu_setup(struct vcpu_vmx *vmx)
                                ~SECONDARY_EXEC_VIRTUALIZE_APIC_ACCESSES;
                if (vmx->vpid == 0)
                        exec_control &= ~SECONDARY_EXEC_ENABLE_VPID;
-               if (!enable_ept)
+               if (!enable_ept) {
                        exec_control &= ~SECONDARY_EXEC_ENABLE_EPT;
+                       enable_unrestricted_guest = 0;
+               }
                if (!enable_unrestricted_guest)
                        exec_control &= ~SECONDARY_EXEC_UNRESTRICTED_GUEST;
                if (!ple_gap)
@@ -2363,14 +2532,16 @@ static int vmx_vcpu_setup(struct vcpu_vmx *vmx)
 
        vmcs_write16(HOST_TR_SELECTOR, GDT_ENTRY_TSS*8);  /* 22.2.4 */
 
-       kvm_get_idt(&dt);
-       vmcs_writel(HOST_IDTR_BASE, dt.base);   /* 22.2.4 */
+       native_store_idt(&dt);
+       vmcs_writel(HOST_IDTR_BASE, dt.address);   /* 22.2.4 */
 
        asm("mov $.Lkvm_vmx_return, %0" : "=r"(kvm_vmx_return));
        vmcs_writel(HOST_RIP, kvm_vmx_return); /* 22.2.5 */
        vmcs_write32(VM_EXIT_MSR_STORE_COUNT, 0);
        vmcs_write32(VM_EXIT_MSR_LOAD_COUNT, 0);
+       vmcs_write64(VM_EXIT_MSR_LOAD_ADDR, __pa(vmx->msr_autoload.host));
        vmcs_write32(VM_ENTRY_MSR_LOAD_COUNT, 0);
+       vmcs_write64(VM_ENTRY_MSR_LOAD_ADDR, __pa(vmx->msr_autoload.guest));
 
        rdmsr(MSR_IA32_SYSENTER_CS, host_sysenter_cs, junk);
        vmcs_write32(HOST_IA32_SYSENTER_CS, host_sysenter_cs);
@@ -2396,18 +2567,15 @@ static int vmx_vcpu_setup(struct vcpu_vmx *vmx)
        for (i = 0; i < NR_VMX_MSR; ++i) {
                u32 index = vmx_msr_index[i];
                u32 data_low, data_high;
-               u64 data;
                int j = vmx->nmsrs;
 
                if (rdmsr_safe(index, &data_low, &data_high) < 0)
                        continue;
                if (wrmsr_safe(index, data_low, data_high) < 0)
                        continue;
-               data = data_low | ((u64)data_high << 32);
-               vmx->host_msrs[j].index = index;
-               vmx->host_msrs[j].reserved = 0;
-               vmx->host_msrs[j].data = data;
-               vmx->guest_msrs[j] = vmx->host_msrs[j];
+               vmx->guest_msrs[j].index = i;
+               vmx->guest_msrs[j].data = 0;
+               vmx->guest_msrs[j].mask = -1ull;
                ++vmx->nmsrs;
        }
 
@@ -2417,7 +2585,10 @@ static int vmx_vcpu_setup(struct vcpu_vmx *vmx)
        vmcs_write32(VM_ENTRY_CONTROLS, vmcs_config.vmentry_ctrl);
 
        vmcs_writel(CR0_GUEST_HOST_MASK, ~0UL);
-       vmcs_writel(CR4_GUEST_HOST_MASK, KVM_GUEST_CR4_MASK);
+       vmx->vcpu.arch.cr4_guest_owned_bits = KVM_CR4_GUEST_OWNED_BITS;
+       if (enable_ept)
+               vmx->vcpu.arch.cr4_guest_owned_bits |= X86_CR4_PGE;
+       vmcs_writel(CR4_GUEST_HOST_MASK, ~vmx->vcpu.arch.cr4_guest_owned_bits);
 
        tsc_base = vmx->vcpu.kvm->arch.vm_init_tsc;
        rdtscll(tsc_this);
@@ -2442,10 +2613,10 @@ static int vmx_vcpu_reset(struct kvm_vcpu *vcpu)
 {
        struct vcpu_vmx *vmx = to_vmx(vcpu);
        u64 msr;
-       int ret;
+       int ret, idx;
 
        vcpu->arch.regs_avail = ~((1 << VCPU_REGS_RIP) | (1 << VCPU_REGS_RSP));
-       down_read(&vcpu->kvm->slots_lock);
+       idx = srcu_read_lock(&vcpu->kvm->srcu);
        if (!init_rmode(vmx->vcpu.kvm)) {
                ret = -ENOMEM;
                goto out;
@@ -2539,7 +2710,7 @@ static int vmx_vcpu_reset(struct kvm_vcpu *vcpu)
                vmcs_write16(VIRTUAL_PROCESSOR_ID, vmx->vpid);
 
        vmx->vcpu.arch.cr0 = X86_CR0_NW | X86_CR0_CD | X86_CR0_ET;
-       vmx_set_cr0(&vmx->vcpu, vmx->vcpu.arch.cr0); /* enter rmode */
+       vmx_set_cr0(&vmx->vcpu, kvm_read_cr0(vcpu)); /* enter rmode */
        vmx_set_cr4(&vmx->vcpu, 0);
        vmx_set_efer(&vmx->vcpu, 0);
        vmx_fpu_activate(&vmx->vcpu);
@@ -2553,7 +2724,7 @@ static int vmx_vcpu_reset(struct kvm_vcpu *vcpu)
        vmx->emulation_required = 0;
 
 out:
-       up_read(&vcpu->kvm->slots_lock);
+       srcu_read_unlock(&vcpu->kvm->srcu, idx);
        return ret;
 }
 
@@ -2651,8 +2822,35 @@ static int vmx_nmi_allowed(struct kvm_vcpu *vcpu)
                return 0;
 
        return  !(vmcs_read32(GUEST_INTERRUPTIBILITY_INFO) &
-                       (GUEST_INTR_STATE_STI | GUEST_INTR_STATE_MOV_SS |
-                               GUEST_INTR_STATE_NMI));
+                       (GUEST_INTR_STATE_MOV_SS | GUEST_INTR_STATE_NMI));
+}
+
+static bool vmx_get_nmi_mask(struct kvm_vcpu *vcpu)
+{
+       if (!cpu_has_virtual_nmis())
+               return to_vmx(vcpu)->soft_vnmi_blocked;
+       else
+               return !!(vmcs_read32(GUEST_INTERRUPTIBILITY_INFO) &
+                         GUEST_INTR_STATE_NMI);
+}
+
+static void vmx_set_nmi_mask(struct kvm_vcpu *vcpu, bool masked)
+{
+       struct vcpu_vmx *vmx = to_vmx(vcpu);
+
+       if (!cpu_has_virtual_nmis()) {
+               if (vmx->soft_vnmi_blocked != masked) {
+                       vmx->soft_vnmi_blocked = masked;
+                       vmx->vnmi_blocked_time = 0;
+               }
+       } else {
+               if (masked)
+                       vmcs_set_bits(GUEST_INTERRUPTIBILITY_INFO,
+                                     GUEST_INTR_STATE_NMI);
+               else
+                       vmcs_clear_bits(GUEST_INTERRUPTIBILITY_INFO,
+                                       GUEST_INTR_STATE_NMI);
+       }
 }
 
 static int vmx_interrupt_allowed(struct kvm_vcpu *vcpu)
@@ -2702,6 +2900,12 @@ static int handle_rmode_exception(struct kvm_vcpu *vcpu,
                kvm_queue_exception(vcpu, vec);
                return 1;
        case BP_VECTOR:
+               /*
+                * Update instruction length as we may reinject the exception
+                * from user space while in guest debugging mode.
+                */
+               to_vmx(vcpu)->vcpu.arch.event_exit_inst_len =
+                       vmcs_read32(VM_EXIT_INSTRUCTION_LEN);
                if (vcpu->guest_debug & KVM_GUESTDBG_USE_SW_BP)
                        return 0;
                /* fall through */
@@ -2760,9 +2964,14 @@ static int handle_exception(struct kvm_vcpu *vcpu)
                return handle_machine_check(vcpu);
 
        if ((vect_info & VECTORING_INFO_VALID_MASK) &&
-                                               !is_page_fault(intr_info))
-               printk(KERN_ERR "%s: unexpected, vectoring info 0x%x "
-                      "intr info 0x%x\n", __func__, vect_info, intr_info);
+           !is_page_fault(intr_info)) {
+               vcpu->run->exit_reason = KVM_EXIT_INTERNAL_ERROR;
+               vcpu->run->internal.suberror = KVM_INTERNAL_ERROR_SIMUL_EX;
+               vcpu->run->internal.ndata = 2;
+               vcpu->run->internal.data[0] = vect_info;
+               vcpu->run->internal.data[1] = intr_info;
+               return 0;
+       }
 
        if ((intr_info & INTR_INFO_INTR_TYPE_MASK) == INTR_TYPE_NMI_INTR)
                return 1;  /* already handled by vmx_vcpu_run() */
@@ -2819,6 +3028,13 @@ static int handle_exception(struct kvm_vcpu *vcpu)
                kvm_run->debug.arch.dr7 = vmcs_readl(GUEST_DR7);
                /* fall through */
        case BP_VECTOR:
+               /*
+                * Update instruction length as we may reinject #BP from
+                * user space while in guest debugging mode. Reading it for
+                * #DB as well causes no harm, it is not used in that case.
+                */
+               vmx->vcpu.arch.event_exit_inst_len =
+                       vmcs_read32(VM_EXIT_INSTRUCTION_LEN);
                kvm_run->exit_reason = KVM_EXIT_DEBUG;
                kvm_run->debug.arch.pc = vmcs_readl(GUEST_CS_BASE) + rip;
                kvm_run->debug.arch.exception = ex_no;
@@ -2850,22 +3066,20 @@ static int handle_io(struct kvm_vcpu *vcpu)
        int size, in, string;
        unsigned port;
 
-       ++vcpu->stat.io_exits;
        exit_qualification = vmcs_readl(EXIT_QUALIFICATION);
        string = (exit_qualification & 16) != 0;
+       in = (exit_qualification & 8) != 0;
 
-       if (string) {
-               if (emulate_instruction(vcpu, 0, 0, 0) == EMULATE_DO_MMIO)
-                       return 0;
-               return 1;
-       }
+       ++vcpu->stat.io_exits;
 
-       size = (exit_qualification & 7) + 1;
-       in = (exit_qualification & 8) != 0;
-       port = exit_qualification >> 16;
+       if (string || in)
+               return !(emulate_instruction(vcpu, 0, 0, 0) == EMULATE_DO_MMIO);
 
+       port = exit_qualification >> 16;
+       size = (exit_qualification & 7) + 1;
        skip_emulated_instruction(vcpu);
-       return kvm_emulate_pio(vcpu, in, size, port);
+
+       return kvm_fast_pio_out(vcpu, size, port);
 }
 
 static void
@@ -2920,11 +3134,10 @@ static int handle_cr(struct kvm_vcpu *vcpu)
                };
                break;
        case 2: /* clts */
-               vmx_fpu_deactivate(vcpu);
-               vcpu->arch.cr0 &= ~X86_CR0_TS;
-               vmcs_writel(CR0_READ_SHADOW, vcpu->arch.cr0);
-               vmx_fpu_activate(vcpu);
+               vmx_set_cr0(vcpu, kvm_read_cr0_bits(vcpu, ~X86_CR0_TS));
+               trace_kvm_cr_write(0, kvm_read_cr0(vcpu));
                skip_emulated_instruction(vcpu);
+               vmx_fpu_activate(vcpu);
                return 1;
        case 1: /*mov from cr*/
                switch (cr) {
@@ -2942,7 +3155,9 @@ static int handle_cr(struct kvm_vcpu *vcpu)
                }
                break;
        case 3: /* lmsw */
-               kvm_lmsw(vcpu, (exit_qualification >> LMSW_SOURCE_DATA_SHIFT) & 0x0f);
+               val = (exit_qualification >> LMSW_SOURCE_DATA_SHIFT) & 0x0f;
+               trace_kvm_cr_write(0, (kvm_read_cr0(vcpu) & ~0xful) | val);
+               kvm_lmsw(vcpu, val);
 
                skip_emulated_instruction(vcpu);
                return 1;
@@ -2958,9 +3173,9 @@ static int handle_cr(struct kvm_vcpu *vcpu)
 static int handle_dr(struct kvm_vcpu *vcpu)
 {
        unsigned long exit_qualification;
-       unsigned long val;
        int dr, reg;
 
+       /* Do not handle if the CPL > 0, will trigger GP on re-entry */
        if (!kvm_require_cpl(vcpu, 0))
                return 1;
        dr = vmcs_readl(GUEST_DR7);
@@ -2992,57 +3207,20 @@ static int handle_dr(struct kvm_vcpu *vcpu)
        dr = exit_qualification & DEBUG_REG_ACCESS_NUM;
        reg = DEBUG_REG_ACCESS_REG(exit_qualification);
        if (exit_qualification & TYPE_MOV_FROM_DR) {
-               switch (dr) {
-               case 0 ... 3:
-                       val = vcpu->arch.db[dr];
-                       break;
-               case 6:
-                       val = vcpu->arch.dr6;
-                       break;
-               case 7:
-                       val = vcpu->arch.dr7;
-                       break;
-               default:
-                       val = 0;
-               }
-               kvm_register_write(vcpu, reg, val);
-       } else {
-               val = vcpu->arch.regs[reg];
-               switch (dr) {
-               case 0 ... 3:
-                       vcpu->arch.db[dr] = val;
-                       if (!(vcpu->guest_debug & KVM_GUESTDBG_USE_HW_BP))
-                               vcpu->arch.eff_db[dr] = val;
-                       break;
-               case 4 ... 5:
-                       if (vcpu->arch.cr4 & X86_CR4_DE)
-                               kvm_queue_exception(vcpu, UD_VECTOR);
-                       break;
-               case 6:
-                       if (val & 0xffffffff00000000ULL) {
-                               kvm_queue_exception(vcpu, GP_VECTOR);
-                               break;
-                       }
-                       vcpu->arch.dr6 = (val & DR6_VOLATILE) | DR6_FIXED_1;
-                       break;
-               case 7:
-                       if (val & 0xffffffff00000000ULL) {
-                               kvm_queue_exception(vcpu, GP_VECTOR);
-                               break;
-                       }
-                       vcpu->arch.dr7 = (val & DR7_VOLATILE) | DR7_FIXED_1;
-                       if (!(vcpu->guest_debug & KVM_GUESTDBG_USE_HW_BP)) {
-                               vmcs_writel(GUEST_DR7, vcpu->arch.dr7);
-                               vcpu->arch.switch_db_regs =
-                                       (val & DR7_BP_EN_MASK);
-                       }
-                       break;
-               }
-       }
+               unsigned long val;
+               if (!kvm_get_dr(vcpu, dr, &val))
+                       kvm_register_write(vcpu, reg, val);
+       } else
+               kvm_set_dr(vcpu, dr, vcpu->arch.regs[reg]);
        skip_emulated_instruction(vcpu);
        return 1;
 }
 
+static void vmx_set_dr7(struct kvm_vcpu *vcpu, unsigned long val)
+{
+       vmcs_writel(GUEST_DR7, val);
+}
+
 static int handle_cpuid(struct kvm_vcpu *vcpu)
 {
        kvm_emulate_cpuid(vcpu);
@@ -3055,6 +3233,7 @@ static int handle_rdmsr(struct kvm_vcpu *vcpu)
        u64 data;
 
        if (vmx_get_msr(vcpu, ecx, &data)) {
+               trace_kvm_msr_read_ex(ecx);
                kvm_inject_gp(vcpu, 0);
                return 1;
        }
@@ -3074,13 +3253,13 @@ static int handle_wrmsr(struct kvm_vcpu *vcpu)
        u64 data = (vcpu->arch.regs[VCPU_REGS_RAX] & -1u)
                | ((u64)(vcpu->arch.regs[VCPU_REGS_RDX] & -1u) << 32);
 
-       trace_kvm_msr_write(ecx, data);
-
        if (vmx_set_msr(vcpu, ecx, data) != 0) {
+               trace_kvm_msr_write_ex(ecx, data);
                kvm_inject_gp(vcpu, 0);
                return 1;
        }
 
+       trace_kvm_msr_write(ecx, data);
        skip_emulated_instruction(vcpu);
        return 1;
 }
@@ -3173,6 +3352,8 @@ static int handle_task_switch(struct kvm_vcpu *vcpu)
 {
        struct vcpu_vmx *vmx = to_vmx(vcpu);
        unsigned long exit_qualification;
+       bool has_error_code = false;
+       u32 error_code = 0;
        u16 tss_selector;
        int reason, type, idt_v;
 
@@ -3195,6 +3376,13 @@ static int handle_task_switch(struct kvm_vcpu *vcpu)
                        kvm_clear_interrupt_queue(vcpu);
                        break;
                case INTR_TYPE_HARD_EXCEPTION:
+                       if (vmx->idt_vectoring_info &
+                           VECTORING_INFO_DELIVER_CODE_MASK) {
+                               has_error_code = true;
+                               error_code =
+                                       vmcs_read32(IDT_VECTORING_ERROR_CODE);
+                       }
+                       /* fall through */
                case INTR_TYPE_SOFT_EXCEPTION:
                        kvm_clear_exception_queue(vcpu);
                        break;
@@ -3209,8 +3397,13 @@ static int handle_task_switch(struct kvm_vcpu *vcpu)
                       type != INTR_TYPE_NMI_INTR))
                skip_emulated_instruction(vcpu);
 
-       if (!kvm_task_switch(vcpu, tss_selector, reason))
+       if (kvm_task_switch(vcpu, tss_selector, reason,
+                               has_error_code, error_code) == EMULATE_FAIL) {
+               vcpu->run->exit_reason = KVM_EXIT_INTERNAL_ERROR;
+               vcpu->run->internal.suberror = KVM_INTERNAL_ERROR_EMULATION;
+               vcpu->run->internal.ndata = 0;
                return 0;
+       }
 
        /* clear all local breakpoint enable flags */
        vmcs_writel(GUEST_DR7, vmcs_readl(GUEST_DR7) & ~55);
@@ -3365,9 +3558,9 @@ static int handle_invalid_guest_state(struct kvm_vcpu *vcpu)
                }
 
                if (err != EMULATE_DONE) {
-                       kvm_report_emulation_failure(vcpu, "emulation failure");
                        vcpu->run->exit_reason = KVM_EXIT_INTERNAL_ERROR;
                        vcpu->run->internal.suberror = KVM_INTERNAL_ERROR_EMULATION;
+                       vcpu->run->internal.ndata = 0;
                        ret = 0;
                        goto out;
                }
@@ -3395,6 +3588,12 @@ static int handle_pause(struct kvm_vcpu *vcpu)
        return 1;
 }
 
+static int handle_invalid_op(struct kvm_vcpu *vcpu)
+{
+       kvm_queue_exception(vcpu, UD_VECTOR);
+       return 1;
+}
+
 /*
  * The exit handlers return 1 if the exit was handled fully and guest execution
  * may resume.  Otherwise they set the kvm_run parameter to indicate what needs
@@ -3432,6 +3631,8 @@ static int (*kvm_vmx_exit_handlers[])(struct kvm_vcpu *vcpu) = {
        [EXIT_REASON_EPT_VIOLATION]           = handle_ept_violation,
        [EXIT_REASON_EPT_MISCONFIG]           = handle_ept_misconfig,
        [EXIT_REASON_PAUSE_INSTRUCTION]       = handle_pause,
+       [EXIT_REASON_MWAIT_INSTRUCTION]       = handle_invalid_op,
+       [EXIT_REASON_MONITOR_INSTRUCTION]     = handle_invalid_op,
 };
 
 static const int kvm_vmx_max_exit_handlers =
@@ -3447,7 +3648,7 @@ static int vmx_handle_exit(struct kvm_vcpu *vcpu)
        u32 exit_reason = vmx->exit_reason;
        u32 vectoring_info = vmx->idt_vectoring_info;
 
-       trace_kvm_exit(exit_reason, kvm_rip_read(vcpu));
+       trace_kvm_exit(exit_reason, vcpu);
 
        /* If guest state is invalid, start emulating */
        if (vmx->emulation_required && emulate_invalid_guest_state)
@@ -3532,8 +3733,11 @@ static void vmx_complete_interrupts(struct vcpu_vmx *vmx)
 
        /* We need to handle NMIs before interrupts are enabled */
        if ((exit_intr_info & INTR_INFO_INTR_TYPE_MASK) == INTR_TYPE_NMI_INTR &&
-           (exit_intr_info & INTR_INFO_VALID_MASK))
+           (exit_intr_info & INTR_INFO_VALID_MASK)) {
+               kvm_before_handle_nmi(&vmx->vcpu);
                asm("int $2");
+               kvm_after_handle_nmi(&vmx->vcpu);
+       }
 
        idtv_info_valid = idt_vectoring_info & VECTORING_INFO_VALID_MASK;
 
@@ -3638,10 +3842,6 @@ static void vmx_vcpu_run(struct kvm_vcpu *vcpu)
 {
        struct vcpu_vmx *vmx = to_vmx(vcpu);
 
-       if (enable_ept && is_paging(vcpu)) {
-               vmcs_writel(GUEST_CR3, vcpu->arch.cr3);
-               ept_load_pdptrs(vcpu);
-       }
        /* Record the guest's net vcpu time for enforced NMI injections. */
        if (unlikely(!cpu_has_virtual_nmis() && vmx->soft_vnmi_blocked))
                vmx->entry_time = ktime_get();
@@ -3669,9 +3869,6 @@ static void vmx_vcpu_run(struct kvm_vcpu *vcpu)
         */
        vmcs_writel(HOST_CR0, read_cr0());
 
-       if (vcpu->arch.switch_db_regs)
-               set_debugreg(vcpu->arch.dr6, 6);
-
        asm(
                /* Store host registers */
                "push %%"R"dx; push %%"R"bp;"
@@ -3772,9 +3969,6 @@ static void vmx_vcpu_run(struct kvm_vcpu *vcpu)
                                  | (1 << VCPU_EXREG_PDPTR));
        vcpu->arch.regs_dirty = 0;
 
-       if (vcpu->arch.switch_db_regs)
-               get_debugreg(vcpu->arch.dr6, 6);
-
        vmx->idt_vectoring_info = vmcs_read32(IDT_VECTORING_INFO_FIELD);
        if (vmx->rmode.irq.pending)
                fixup_rmode_irq(vmx);
@@ -3803,12 +3997,8 @@ static void vmx_free_vcpu(struct kvm_vcpu *vcpu)
 {
        struct vcpu_vmx *vmx = to_vmx(vcpu);
 
-       spin_lock(&vmx_vpid_lock);
-       if (vmx->vpid != 0)
-               __clear_bit(vmx->vpid, vmx_vpid_bitmap);
-       spin_unlock(&vmx_vpid_lock);
+       free_vpid(vmx);
        vmx_free_vmcs(vcpu);
-       kfree(vmx->host_msrs);
        kfree(vmx->guest_msrs);
        kvm_vcpu_uninit(vcpu);
        kmem_cache_free(kvm_vcpu_cache, vmx);
@@ -3835,10 +4025,6 @@ static struct kvm_vcpu *vmx_create_vcpu(struct kvm *kvm, unsigned int id)
                goto uninit_vcpu;
        }
 
-       vmx->host_msrs = kmalloc(PAGE_SIZE, GFP_KERNEL);
-       if (!vmx->host_msrs)
-               goto free_guest_msrs;
-
        vmx->vmcs = alloc_vmcs();
        if (!vmx->vmcs)
                goto free_msrs;
@@ -3869,12 +4055,11 @@ static struct kvm_vcpu *vmx_create_vcpu(struct kvm *kvm, unsigned int id)
 free_vmcs:
        free_vmcs(vmx->vmcs);
 free_msrs:
-       kfree(vmx->host_msrs);
-free_guest_msrs:
        kfree(vmx->guest_msrs);
 uninit_vcpu:
        kvm_vcpu_uninit(&vmx->vcpu);
 free_vcpu:
+       free_vpid(vmx);
        kmem_cache_free(kvm_vcpu_cache, vmx);
        return ERR_PTR(err);
 }
@@ -3910,7 +4095,7 @@ static u64 vmx_get_mt_mask(struct kvm_vcpu *vcpu, gfn_t gfn, bool is_mmio)
         *   b. VT-d with snooping control feature: snooping control feature of
         *      VT-d engine can guarantee the cache correctness. Just set it
         *      to WB to keep consistent with host. So the same as item 3.
-        * 3. EPT without VT-d: always map as WB and set IGMT=1 to keep
+        * 3. EPT without VT-d: always map as WB and set IPAT=1 to keep
         *    consistent with host MTRR
         */
        if (is_mmio)
@@ -3921,37 +4106,92 @@ static u64 vmx_get_mt_mask(struct kvm_vcpu *vcpu, gfn_t gfn, bool is_mmio)
                      VMX_EPT_MT_EPTE_SHIFT;
        else
                ret = (MTRR_TYPE_WRBACK << VMX_EPT_MT_EPTE_SHIFT)
-                       | VMX_EPT_IGMT_BIT;
+                       | VMX_EPT_IPAT_BIT;
 
        return ret;
 }
 
+#define _ER(x) { EXIT_REASON_##x, #x }
+
 static const struct trace_print_flags vmx_exit_reasons_str[] = {
-       { EXIT_REASON_EXCEPTION_NMI,           "exception" },
-       { EXIT_REASON_EXTERNAL_INTERRUPT,      "ext_irq" },
-       { EXIT_REASON_TRIPLE_FAULT,            "triple_fault" },
-       { EXIT_REASON_NMI_WINDOW,              "nmi_window" },
-       { EXIT_REASON_IO_INSTRUCTION,          "io_instruction" },
-       { EXIT_REASON_CR_ACCESS,               "cr_access" },
-       { EXIT_REASON_DR_ACCESS,               "dr_access" },
-       { EXIT_REASON_CPUID,                   "cpuid" },
-       { EXIT_REASON_MSR_READ,                "rdmsr" },
-       { EXIT_REASON_MSR_WRITE,               "wrmsr" },
-       { EXIT_REASON_PENDING_INTERRUPT,       "interrupt_window" },
-       { EXIT_REASON_HLT,                     "halt" },
-       { EXIT_REASON_INVLPG,                  "invlpg" },
-       { EXIT_REASON_VMCALL,                  "hypercall" },
-       { EXIT_REASON_TPR_BELOW_THRESHOLD,     "tpr_below_thres" },
-       { EXIT_REASON_APIC_ACCESS,             "apic_access" },
-       { EXIT_REASON_WBINVD,                  "wbinvd" },
-       { EXIT_REASON_TASK_SWITCH,             "task_switch" },
-       { EXIT_REASON_EPT_VIOLATION,           "ept_violation" },
+       _ER(EXCEPTION_NMI),
+       _ER(EXTERNAL_INTERRUPT),
+       _ER(TRIPLE_FAULT),
+       _ER(PENDING_INTERRUPT),
+       _ER(NMI_WINDOW),
+       _ER(TASK_SWITCH),
+       _ER(CPUID),
+       _ER(HLT),
+       _ER(INVLPG),
+       _ER(RDPMC),
+       _ER(RDTSC),
+       _ER(VMCALL),
+       _ER(VMCLEAR),
+       _ER(VMLAUNCH),
+       _ER(VMPTRLD),
+       _ER(VMPTRST),
+       _ER(VMREAD),
+       _ER(VMRESUME),
+       _ER(VMWRITE),
+       _ER(VMOFF),
+       _ER(VMON),
+       _ER(CR_ACCESS),
+       _ER(DR_ACCESS),
+       _ER(IO_INSTRUCTION),
+       _ER(MSR_READ),
+       _ER(MSR_WRITE),
+       _ER(MWAIT_INSTRUCTION),
+       _ER(MONITOR_INSTRUCTION),
+       _ER(PAUSE_INSTRUCTION),
+       _ER(MCE_DURING_VMENTRY),
+       _ER(TPR_BELOW_THRESHOLD),
+       _ER(APIC_ACCESS),
+       _ER(EPT_VIOLATION),
+       _ER(EPT_MISCONFIG),
+       _ER(WBINVD),
        { -1, NULL }
 };
 
-static bool vmx_gb_page_enable(void)
+#undef _ER
+
+static int vmx_get_lpage_level(void)
+{
+       if (enable_ept && !cpu_has_vmx_ept_1g_page())
+               return PT_DIRECTORY_LEVEL;
+       else
+               /* For shadow and EPT supported 1GB page */
+               return PT_PDPE_LEVEL;
+}
+
+static inline u32 bit(int bitno)
+{
+       return 1 << (bitno & 31);
+}
+
+static void vmx_cpuid_update(struct kvm_vcpu *vcpu)
+{
+       struct kvm_cpuid_entry2 *best;
+       struct vcpu_vmx *vmx = to_vmx(vcpu);
+       u32 exec_control;
+
+       vmx->rdtscp_enabled = false;
+       if (vmx_rdtscp_supported()) {
+               exec_control = vmcs_read32(SECONDARY_VM_EXEC_CONTROL);
+               if (exec_control & SECONDARY_EXEC_RDTSCP) {
+                       best = kvm_find_cpuid_entry(vcpu, 0x80000001, 0);
+                       if (best && (best->edx & bit(X86_FEATURE_RDTSCP)))
+                               vmx->rdtscp_enabled = true;
+                       else {
+                               exec_control &= ~SECONDARY_EXEC_RDTSCP;
+                               vmcs_write32(SECONDARY_VM_EXEC_CONTROL,
+                                               exec_control);
+                       }
+               }
+       }
+}
+
+static void vmx_set_supported_cpuid(u32 func, struct kvm_cpuid_entry2 *entry)
 {
-       return false;
 }
 
 static struct kvm_x86_ops vmx_x86_ops = {
@@ -3980,6 +4220,7 @@ static struct kvm_x86_ops vmx_x86_ops = {
        .set_segment = vmx_set_segment,
        .get_cpl = vmx_get_cpl,
        .get_cs_db_l_bits = vmx_get_cs_db_l_bits,
+       .decache_cr0_guest_bits = vmx_decache_cr0_guest_bits,
        .decache_cr4_guest_bits = vmx_decache_cr4_guest_bits,
        .set_cr0 = vmx_set_cr0,
        .set_cr3 = vmx_set_cr3,
@@ -3989,9 +4230,12 @@ static struct kvm_x86_ops vmx_x86_ops = {
        .set_idt = vmx_set_idt,
        .get_gdt = vmx_get_gdt,
        .set_gdt = vmx_set_gdt,
+       .set_dr7 = vmx_set_dr7,
        .cache_reg = vmx_cache_reg,
        .get_rflags = vmx_get_rflags,
        .set_rflags = vmx_set_rflags,
+       .fpu_activate = vmx_fpu_activate,
+       .fpu_deactivate = vmx_fpu_deactivate,
 
        .tlb_flush = vmx_flush_tlb,
 
@@ -4006,6 +4250,8 @@ static struct kvm_x86_ops vmx_x86_ops = {
        .queue_exception = vmx_queue_exception,
        .interrupt_allowed = vmx_interrupt_allowed,
        .nmi_allowed = vmx_nmi_allowed,
+       .get_nmi_mask = vmx_get_nmi_mask,
+       .set_nmi_mask = vmx_set_nmi_mask,
        .enable_nmi_window = enable_nmi_window,
        .enable_irq_window = enable_irq_window,
        .update_cr8_intercept = update_cr8_intercept,
@@ -4015,12 +4261,23 @@ static struct kvm_x86_ops vmx_x86_ops = {
        .get_mt_mask = vmx_get_mt_mask,
 
        .exit_reasons_str = vmx_exit_reasons_str,
-       .gb_page_enable = vmx_gb_page_enable,
+       .get_lpage_level = vmx_get_lpage_level,
+
+       .cpuid_update = vmx_cpuid_update,
+
+       .rdtscp_supported = vmx_rdtscp_supported,
+
+       .set_supported_cpuid = vmx_set_supported_cpuid,
 };
 
 static int __init vmx_init(void)
 {
-       int r;
+       int r, i;
+
+       rdmsrl_safe(MSR_EFER, &host_efer);
+
+       for (i = 0; i < NR_VMX_MSR; ++i)
+               kvm_define_shared_msr(i, vmx_msr_index[i]);
 
        vmx_io_bitmap_a = (unsigned long *)__get_free_page(GFP_KERNEL);
        if (!vmx_io_bitmap_a)
@@ -4058,7 +4315,8 @@ static int __init vmx_init(void)
 
        set_bit(0, vmx_vpid_bitmap); /* 0 is reserved for host */
 
-       r = kvm_init(&vmx_x86_ops, sizeof(struct vcpu_vmx), THIS_MODULE);
+       r = kvm_init(&vmx_x86_ops, sizeof(struct vcpu_vmx),
+                    __alignof__(struct vcpu_vmx), THIS_MODULE);
        if (r)
                goto out3;