KVM: fix segment_base() error checking
[safe/jmp/linux-2.6] / arch / x86 / kvm / x86.c
index 1de2ad7..814e72a 100644 (file)
@@ -39,6 +39,7 @@
 #include <linux/cpufreq.h>
 #include <linux/user-return-notifier.h>
 #include <linux/srcu.h>
+#include <linux/slab.h>
 #include <trace/events/kvm.h>
 #undef TRACE_INCLUDE_FILE
 #define CREATE_TRACE_POINTS
@@ -224,20 +225,22 @@ static void drop_user_return_notifiers(void *ignore)
 
 unsigned long segment_base(u16 selector)
 {
-       struct descriptor_table gdt;
+       struct desc_ptr gdt;
        struct desc_struct *d;
        unsigned long table_base;
        unsigned long v;
 
-       if (selector == 0)
+       if (!(selector & ~3))
                return 0;
 
-       kvm_get_gdt(&gdt);
-       table_base = gdt.base;
+       native_store_gdt(&gdt);
+       table_base = gdt.address;
 
        if (selector & 4) {           /* from ldt */
                u16 ldt_selector = kvm_read_ldt();
 
+               if (!(ldt_selector & ~3))
+                       return 0;
                table_base = segment_base(ldt_selector);
        }
        d = (struct desc_struct *)(table_base + (selector & ~7));
@@ -428,41 +431,38 @@ out:
 
 void kvm_set_cr0(struct kvm_vcpu *vcpu, unsigned long cr0)
 {
-       if (cr0 & CR0_RESERVED_BITS) {
-               printk(KERN_DEBUG "set_cr0: 0x%lx #GP, reserved bits 0x%lx\n",
-                      cr0, kvm_read_cr0(vcpu));
+       cr0 |= X86_CR0_ET;
+
+#ifdef CONFIG_X86_64
+       if (cr0 & 0xffffffff00000000UL) {
                kvm_inject_gp(vcpu, 0);
                return;
        }
+#endif
+
+       cr0 &= ~CR0_RESERVED_BITS;
 
        if ((cr0 & X86_CR0_NW) && !(cr0 & X86_CR0_CD)) {
-               printk(KERN_DEBUG "set_cr0: #GP, CD == 0 && NW == 1\n");
                kvm_inject_gp(vcpu, 0);
                return;
        }
 
        if ((cr0 & X86_CR0_PG) && !(cr0 & X86_CR0_PE)) {
-               printk(KERN_DEBUG "set_cr0: #GP, set PG flag "
-                      "and a clear PE flag\n");
                kvm_inject_gp(vcpu, 0);
                return;
        }
 
        if (!is_paging(vcpu) && (cr0 & X86_CR0_PG)) {
 #ifdef CONFIG_X86_64
-               if ((vcpu->arch.shadow_efer & EFER_LME)) {
+               if ((vcpu->arch.efer & EFER_LME)) {
                        int cs_db, cs_l;
 
                        if (!is_pae(vcpu)) {
-                               printk(KERN_DEBUG "set_cr0: #GP, start paging "
-                                      "in long mode while PAE is disabled\n");
                                kvm_inject_gp(vcpu, 0);
                                return;
                        }
                        kvm_x86_ops->get_cs_db_l_bits(vcpu, &cs_db, &cs_l);
                        if (cs_l) {
-                               printk(KERN_DEBUG "set_cr0: #GP, start paging "
-                                      "in long mode while CS.L == 1\n");
                                kvm_inject_gp(vcpu, 0);
                                return;
 
@@ -470,8 +470,6 @@ void kvm_set_cr0(struct kvm_vcpu *vcpu, unsigned long cr0)
                } else
 #endif
                if (is_pae(vcpu) && !load_pdptrs(vcpu, vcpu->arch.cr3)) {
-                       printk(KERN_DEBUG "set_cr0: #GP, pdptrs "
-                              "reserved bits\n");
                        kvm_inject_gp(vcpu, 0);
                        return;
                }
@@ -479,7 +477,6 @@ void kvm_set_cr0(struct kvm_vcpu *vcpu, unsigned long cr0)
        }
 
        kvm_x86_ops->set_cr0(vcpu, cr0);
-       vcpu->arch.cr0 = cr0;
 
        kvm_mmu_reset_context(vcpu);
        return;
@@ -498,28 +495,23 @@ void kvm_set_cr4(struct kvm_vcpu *vcpu, unsigned long cr4)
        unsigned long pdptr_bits = X86_CR4_PGE | X86_CR4_PSE | X86_CR4_PAE;
 
        if (cr4 & CR4_RESERVED_BITS) {
-               printk(KERN_DEBUG "set_cr4: #GP, reserved bits\n");
                kvm_inject_gp(vcpu, 0);
                return;
        }
 
        if (is_long_mode(vcpu)) {
                if (!(cr4 & X86_CR4_PAE)) {
-                       printk(KERN_DEBUG "set_cr4: #GP, clearing PAE while "
-                              "in long mode\n");
                        kvm_inject_gp(vcpu, 0);
                        return;
                }
        } else if (is_paging(vcpu) && (cr4 & X86_CR4_PAE)
                   && ((cr4 ^ old_cr4) & pdptr_bits)
                   && !load_pdptrs(vcpu, vcpu->arch.cr3)) {
-               printk(KERN_DEBUG "set_cr4: #GP, pdptrs reserved bits\n");
                kvm_inject_gp(vcpu, 0);
                return;
        }
 
        if (cr4 & X86_CR4_VMXE) {
-               printk(KERN_DEBUG "set_cr4: #GP, setting VMXE\n");
                kvm_inject_gp(vcpu, 0);
                return;
        }
@@ -540,21 +532,16 @@ void kvm_set_cr3(struct kvm_vcpu *vcpu, unsigned long cr3)
 
        if (is_long_mode(vcpu)) {
                if (cr3 & CR3_L_MODE_RESERVED_BITS) {
-                       printk(KERN_DEBUG "set_cr3: #GP, reserved bits\n");
                        kvm_inject_gp(vcpu, 0);
                        return;
                }
        } else {
                if (is_pae(vcpu)) {
                        if (cr3 & CR3_PAE_RESERVED_BITS) {
-                               printk(KERN_DEBUG
-                                      "set_cr3: #GP, reserved bits\n");
                                kvm_inject_gp(vcpu, 0);
                                return;
                        }
                        if (is_paging(vcpu) && !load_pdptrs(vcpu, cr3)) {
-                               printk(KERN_DEBUG "set_cr3: #GP, pdptrs "
-                                      "reserved bits\n");
                                kvm_inject_gp(vcpu, 0);
                                return;
                        }
@@ -586,7 +573,6 @@ EXPORT_SYMBOL_GPL(kvm_set_cr3);
 void kvm_set_cr8(struct kvm_vcpu *vcpu, unsigned long cr8)
 {
        if (cr8 & CR8_RESERVED_BITS) {
-               printk(KERN_DEBUG "set_cr8: #GP, reserved bits 0x%lx\n", cr8);
                kvm_inject_gp(vcpu, 0);
                return;
        }
@@ -620,9 +606,11 @@ static inline u32 bit(int bitno)
  * kvm-specific. Those are put in the beginning of the list.
  */
 
-#define KVM_SAVE_MSRS_BEGIN    2
+#define KVM_SAVE_MSRS_BEGIN    5
 static u32 msrs_to_save[] = {
        MSR_KVM_SYSTEM_TIME, MSR_KVM_WALL_CLOCK,
+       HV_X64_MSR_GUEST_OS_ID, HV_X64_MSR_HYPERCALL,
+       HV_X64_MSR_APIC_ASSIST_PAGE,
        MSR_IA32_SYSENTER_CS, MSR_IA32_SYSENTER_ESP, MSR_IA32_SYSENTER_EIP,
        MSR_K6_STAR,
 #ifdef CONFIG_X86_64
@@ -640,15 +628,12 @@ static u32 emulated_msrs[] = {
 static void set_efer(struct kvm_vcpu *vcpu, u64 efer)
 {
        if (efer & efer_reserved_bits) {
-               printk(KERN_DEBUG "set_efer: 0x%llx #GP, reserved bits\n",
-                      efer);
                kvm_inject_gp(vcpu, 0);
                return;
        }
 
        if (is_paging(vcpu)
-           && (vcpu->arch.shadow_efer & EFER_LME) != (efer & EFER_LME)) {
-               printk(KERN_DEBUG "set_efer: #GP, change LME while paging\n");
+           && (vcpu->arch.efer & EFER_LME) != (efer & EFER_LME)) {
                kvm_inject_gp(vcpu, 0);
                return;
        }
@@ -658,7 +643,6 @@ static void set_efer(struct kvm_vcpu *vcpu, u64 efer)
 
                feat = kvm_find_cpuid_entry(vcpu, 0x80000001, 0);
                if (!feat || !(feat->edx & bit(X86_FEATURE_FXSR_OPT))) {
-                       printk(KERN_DEBUG "set_efer: #GP, enable FFXSR w/o CPUID capability\n");
                        kvm_inject_gp(vcpu, 0);
                        return;
                }
@@ -669,7 +653,6 @@ static void set_efer(struct kvm_vcpu *vcpu, u64 efer)
 
                feat = kvm_find_cpuid_entry(vcpu, 0x80000001, 0);
                if (!feat || !(feat->ecx & bit(X86_FEATURE_SVM))) {
-                       printk(KERN_DEBUG "set_efer: #GP, enable SVM w/o SVM\n");
                        kvm_inject_gp(vcpu, 0);
                        return;
                }
@@ -678,9 +661,9 @@ static void set_efer(struct kvm_vcpu *vcpu, u64 efer)
        kvm_x86_ops->set_efer(vcpu, efer);
 
        efer &= ~EFER_LMA;
-       efer |= vcpu->arch.shadow_efer & EFER_LMA;
+       efer |= vcpu->arch.efer & EFER_LMA;
 
-       vcpu->arch.shadow_efer = efer;
+       vcpu->arch.efer = efer;
 
        vcpu->arch.mmu.base_role.nxe = (efer & EFER_NX) && !tdp_enabled;
        kvm_mmu_reset_context(vcpu);
@@ -958,9 +941,13 @@ static int set_msr_mce(struct kvm_vcpu *vcpu, u32 msr, u64 data)
                if (msr >= MSR_IA32_MC0_CTL &&
                    msr < MSR_IA32_MC0_CTL + 4 * bank_num) {
                        u32 offset = msr - MSR_IA32_MC0_CTL;
-                       /* only 0 or all 1s can be written to IA32_MCi_CTL */
+                       /* only 0 or all 1s can be written to IA32_MCi_CTL
+                        * some Linux kernels though clear bit 10 in bank 4 to
+                        * workaround a BIOS/GART TBL issue on AMD K8s, ignore
+                        * this to avoid an uncatched #GP in the guest
+                        */
                        if ((offset & 0x3) == 0 &&
-                           data != 0 && data != ~(u64)0)
+                           data != 0 && (data | (1 << 10)) != ~(u64)0)
                                return -1;
                        vcpu->arch.mce_banks[offset] = data;
                        break;
@@ -1002,6 +989,100 @@ out:
        return r;
 }
 
+static bool kvm_hv_hypercall_enabled(struct kvm *kvm)
+{
+       return kvm->arch.hv_hypercall & HV_X64_MSR_HYPERCALL_ENABLE;
+}
+
+static bool kvm_hv_msr_partition_wide(u32 msr)
+{
+       bool r = false;
+       switch (msr) {
+       case HV_X64_MSR_GUEST_OS_ID:
+       case HV_X64_MSR_HYPERCALL:
+               r = true;
+               break;
+       }
+
+       return r;
+}
+
+static int set_msr_hyperv_pw(struct kvm_vcpu *vcpu, u32 msr, u64 data)
+{
+       struct kvm *kvm = vcpu->kvm;
+
+       switch (msr) {
+       case HV_X64_MSR_GUEST_OS_ID:
+               kvm->arch.hv_guest_os_id = data;
+               /* setting guest os id to zero disables hypercall page */
+               if (!kvm->arch.hv_guest_os_id)
+                       kvm->arch.hv_hypercall &= ~HV_X64_MSR_HYPERCALL_ENABLE;
+               break;
+       case HV_X64_MSR_HYPERCALL: {
+               u64 gfn;
+               unsigned long addr;
+               u8 instructions[4];
+
+               /* if guest os id is not set hypercall should remain disabled */
+               if (!kvm->arch.hv_guest_os_id)
+                       break;
+               if (!(data & HV_X64_MSR_HYPERCALL_ENABLE)) {
+                       kvm->arch.hv_hypercall = data;
+                       break;
+               }
+               gfn = data >> HV_X64_MSR_HYPERCALL_PAGE_ADDRESS_SHIFT;
+               addr = gfn_to_hva(kvm, gfn);
+               if (kvm_is_error_hva(addr))
+                       return 1;
+               kvm_x86_ops->patch_hypercall(vcpu, instructions);
+               ((unsigned char *)instructions)[3] = 0xc3; /* ret */
+               if (copy_to_user((void __user *)addr, instructions, 4))
+                       return 1;
+               kvm->arch.hv_hypercall = data;
+               break;
+       }
+       default:
+               pr_unimpl(vcpu, "HYPER-V unimplemented wrmsr: 0x%x "
+                         "data 0x%llx\n", msr, data);
+               return 1;
+       }
+       return 0;
+}
+
+static int set_msr_hyperv(struct kvm_vcpu *vcpu, u32 msr, u64 data)
+{
+       switch (msr) {
+       case HV_X64_MSR_APIC_ASSIST_PAGE: {
+               unsigned long addr;
+
+               if (!(data & HV_X64_MSR_APIC_ASSIST_PAGE_ENABLE)) {
+                       vcpu->arch.hv_vapic = data;
+                       break;
+               }
+               addr = gfn_to_hva(vcpu->kvm, data >>
+                                 HV_X64_MSR_APIC_ASSIST_PAGE_ADDRESS_SHIFT);
+               if (kvm_is_error_hva(addr))
+                       return 1;
+               if (clear_user((void __user *)addr, PAGE_SIZE))
+                       return 1;
+               vcpu->arch.hv_vapic = data;
+               break;
+       }
+       case HV_X64_MSR_EOI:
+               return kvm_hv_vapic_msr_write(vcpu, APIC_EOI, data);
+       case HV_X64_MSR_ICR:
+               return kvm_hv_vapic_msr_write(vcpu, APIC_ICR, data);
+       case HV_X64_MSR_TPR:
+               return kvm_hv_vapic_msr_write(vcpu, APIC_TASKPRI, data);
+       default:
+               pr_unimpl(vcpu, "HYPER-V unimplemented wrmsr: 0x%x "
+                         "data 0x%llx\n", msr, data);
+               return 1;
+       }
+
+       return 0;
+}
+
 int kvm_set_msr_common(struct kvm_vcpu *vcpu, u32 msr, u64 data)
 {
        switch (msr) {
@@ -1010,6 +1091,7 @@ int kvm_set_msr_common(struct kvm_vcpu *vcpu, u32 msr, u64 data)
                break;
        case MSR_K7_HWCR:
                data &= ~(u64)0x40;     /* ignore flush filter disable */
+               data &= ~(u64)0x100;    /* ignore ignne emulation enable */
                if (data != 0) {
                        pr_unimpl(vcpu, "unimplemented HWCR wrmsr: 0x%llx\n",
                                data);
@@ -1116,6 +1198,16 @@ int kvm_set_msr_common(struct kvm_vcpu *vcpu, u32 msr, u64 data)
                pr_unimpl(vcpu, "unimplemented perfctr wrmsr: "
                        "0x%x data 0x%llx\n", msr, data);
                break;
+       case HV_X64_MSR_GUEST_OS_ID ... HV_X64_MSR_SINT15:
+               if (kvm_hv_msr_partition_wide(msr)) {
+                       int r;
+                       mutex_lock(&vcpu->kvm->lock);
+                       r = set_msr_hyperv_pw(vcpu, msr, data);
+                       mutex_unlock(&vcpu->kvm->lock);
+                       return r;
+               } else
+                       return set_msr_hyperv(vcpu, msr, data);
+               break;
        default:
                if (msr && (msr == vcpu->kvm->arch.xen_hvm_config.msr))
                        return xen_hvm_config(vcpu, data);
@@ -1215,6 +1307,54 @@ static int get_msr_mce(struct kvm_vcpu *vcpu, u32 msr, u64 *pdata)
        return 0;
 }
 
+static int get_msr_hyperv_pw(struct kvm_vcpu *vcpu, u32 msr, u64 *pdata)
+{
+       u64 data = 0;
+       struct kvm *kvm = vcpu->kvm;
+
+       switch (msr) {
+       case HV_X64_MSR_GUEST_OS_ID:
+               data = kvm->arch.hv_guest_os_id;
+               break;
+       case HV_X64_MSR_HYPERCALL:
+               data = kvm->arch.hv_hypercall;
+               break;
+       default:
+               pr_unimpl(vcpu, "Hyper-V unhandled rdmsr: 0x%x\n", msr);
+               return 1;
+       }
+
+       *pdata = data;
+       return 0;
+}
+
+static int get_msr_hyperv(struct kvm_vcpu *vcpu, u32 msr, u64 *pdata)
+{
+       u64 data = 0;
+
+       switch (msr) {
+       case HV_X64_MSR_VP_INDEX: {
+               int r;
+               struct kvm_vcpu *v;
+               kvm_for_each_vcpu(r, v, vcpu->kvm)
+                       if (v == vcpu)
+                               data = r;
+               break;
+       }
+       case HV_X64_MSR_EOI:
+               return kvm_hv_vapic_msr_read(vcpu, APIC_EOI, pdata);
+       case HV_X64_MSR_ICR:
+               return kvm_hv_vapic_msr_read(vcpu, APIC_ICR, pdata);
+       case HV_X64_MSR_TPR:
+               return kvm_hv_vapic_msr_read(vcpu, APIC_TASKPRI, pdata);
+       default:
+               pr_unimpl(vcpu, "Hyper-V unhandled rdmsr: 0x%x\n", msr);
+               return 1;
+       }
+       *pdata = data;
+       return 0;
+}
+
 int kvm_get_msr_common(struct kvm_vcpu *vcpu, u32 msr, u64 *pdata)
 {
        u64 data;
@@ -1266,7 +1406,7 @@ int kvm_get_msr_common(struct kvm_vcpu *vcpu, u32 msr, u64 *pdata)
                data |= (((uint64_t)4ULL) << 40);
                break;
        case MSR_EFER:
-               data = vcpu->arch.shadow_efer;
+               data = vcpu->arch.efer;
                break;
        case MSR_KVM_WALL_CLOCK:
                data = vcpu->kvm->arch.wall_clock;
@@ -1281,6 +1421,16 @@ int kvm_get_msr_common(struct kvm_vcpu *vcpu, u32 msr, u64 *pdata)
        case MSR_IA32_MCG_STATUS:
        case MSR_IA32_MC0_CTL ... MSR_IA32_MC0_CTL + 4 * KVM_MAX_MCE_BANKS - 1:
                return get_msr_mce(vcpu, msr, pdata);
+       case HV_X64_MSR_GUEST_OS_ID ... HV_X64_MSR_SINT15:
+               if (kvm_hv_msr_partition_wide(msr)) {
+                       int r;
+                       mutex_lock(&vcpu->kvm->lock);
+                       r = get_msr_hyperv_pw(vcpu, msr, pdata);
+                       mutex_unlock(&vcpu->kvm->lock);
+                       return r;
+               } else
+                       return get_msr_hyperv(vcpu, msr, pdata);
+               break;
        default:
                if (!ignore_msrs) {
                        pr_unimpl(vcpu, "unhandled rdmsr: 0x%x\n", msr);
@@ -1396,6 +1546,12 @@ int kvm_dev_ioctl_check_extension(long ext)
        case KVM_CAP_XEN_HVM:
        case KVM_CAP_ADJUST_CLOCK:
        case KVM_CAP_VCPU_EVENTS:
+       case KVM_CAP_HYPERV:
+       case KVM_CAP_HYPERV_VAPIC:
+       case KVM_CAP_HYPERV_SPIN:
+       case KVM_CAP_PCI_SEGMENT:
+       case KVM_CAP_DEBUGREGS:
+       case KVM_CAP_X86_ROBUST_SINGLESTEP:
                r = 1;
                break;
        case KVM_CAP_COALESCED_MMIO:
@@ -1947,14 +2103,20 @@ static void kvm_vcpu_ioctl_x86_get_vcpu_events(struct kvm_vcpu *vcpu,
 {
        vcpu_load(vcpu);
 
-       events->exception.injected = vcpu->arch.exception.pending;
+       events->exception.injected =
+               vcpu->arch.exception.pending &&
+               !kvm_exception_is_soft(vcpu->arch.exception.nr);
        events->exception.nr = vcpu->arch.exception.nr;
        events->exception.has_error_code = vcpu->arch.exception.has_error_code;
        events->exception.error_code = vcpu->arch.exception.error_code;
 
-       events->interrupt.injected = vcpu->arch.interrupt.pending;
+       events->interrupt.injected =
+               vcpu->arch.interrupt.pending && !vcpu->arch.interrupt.soft;
        events->interrupt.nr = vcpu->arch.interrupt.nr;
-       events->interrupt.soft = vcpu->arch.interrupt.soft;
+       events->interrupt.soft = 0;
+       events->interrupt.shadow =
+               kvm_x86_ops->get_interrupt_shadow(vcpu,
+                       KVM_X86_SHADOW_INT_MOV_SS | KVM_X86_SHADOW_INT_STI);
 
        events->nmi.injected = vcpu->arch.nmi_injected;
        events->nmi.pending = vcpu->arch.nmi_pending;
@@ -1963,7 +2125,8 @@ static void kvm_vcpu_ioctl_x86_get_vcpu_events(struct kvm_vcpu *vcpu,
        events->sipi_vector = vcpu->arch.sipi_vector;
 
        events->flags = (KVM_VCPUEVENT_VALID_NMI_PENDING
-                        | KVM_VCPUEVENT_VALID_SIPI_VECTOR);
+                        | KVM_VCPUEVENT_VALID_SIPI_VECTOR
+                        | KVM_VCPUEVENT_VALID_SHADOW);
 
        vcpu_put(vcpu);
 }
@@ -1972,7 +2135,8 @@ static int kvm_vcpu_ioctl_x86_set_vcpu_events(struct kvm_vcpu *vcpu,
                                              struct kvm_vcpu_events *events)
 {
        if (events->flags & ~(KVM_VCPUEVENT_VALID_NMI_PENDING
-                             | KVM_VCPUEVENT_VALID_SIPI_VECTOR))
+                             | KVM_VCPUEVENT_VALID_SIPI_VECTOR
+                             | KVM_VCPUEVENT_VALID_SHADOW))
                return -EINVAL;
 
        vcpu_load(vcpu);
@@ -1987,6 +2151,9 @@ static int kvm_vcpu_ioctl_x86_set_vcpu_events(struct kvm_vcpu *vcpu,
        vcpu->arch.interrupt.soft = events->interrupt.soft;
        if (vcpu->arch.interrupt.pending && irqchip_in_kernel(vcpu->kvm))
                kvm_pic_clear_isr_ack(vcpu->kvm);
+       if (events->flags & KVM_VCPUEVENT_VALID_SHADOW)
+               kvm_x86_ops->set_interrupt_shadow(vcpu,
+                                                 events->interrupt.shadow);
 
        vcpu->arch.nmi_injected = events->nmi.injected;
        if (events->flags & KVM_VCPUEVENT_VALID_NMI_PENDING)
@@ -2001,6 +2168,36 @@ static int kvm_vcpu_ioctl_x86_set_vcpu_events(struct kvm_vcpu *vcpu,
        return 0;
 }
 
+static void kvm_vcpu_ioctl_x86_get_debugregs(struct kvm_vcpu *vcpu,
+                                            struct kvm_debugregs *dbgregs)
+{
+       vcpu_load(vcpu);
+
+       memcpy(dbgregs->db, vcpu->arch.db, sizeof(vcpu->arch.db));
+       dbgregs->dr6 = vcpu->arch.dr6;
+       dbgregs->dr7 = vcpu->arch.dr7;
+       dbgregs->flags = 0;
+
+       vcpu_put(vcpu);
+}
+
+static int kvm_vcpu_ioctl_x86_set_debugregs(struct kvm_vcpu *vcpu,
+                                           struct kvm_debugregs *dbgregs)
+{
+       if (dbgregs->flags)
+               return -EINVAL;
+
+       vcpu_load(vcpu);
+
+       memcpy(vcpu->arch.db, dbgregs->db, sizeof(vcpu->arch.db));
+       vcpu->arch.dr6 = dbgregs->dr6;
+       vcpu->arch.dr7 = dbgregs->dr7;
+
+       vcpu_put(vcpu);
+
+       return 0;
+}
+
 long kvm_arch_vcpu_ioctl(struct file *filp,
                         unsigned int ioctl, unsigned long arg)
 {
@@ -2179,6 +2376,29 @@ long kvm_arch_vcpu_ioctl(struct file *filp,
                r = kvm_vcpu_ioctl_x86_set_vcpu_events(vcpu, &events);
                break;
        }
+       case KVM_GET_DEBUGREGS: {
+               struct kvm_debugregs dbgregs;
+
+               kvm_vcpu_ioctl_x86_get_debugregs(vcpu, &dbgregs);
+
+               r = -EFAULT;
+               if (copy_to_user(argp, &dbgregs,
+                                sizeof(struct kvm_debugregs)))
+                       break;
+               r = 0;
+               break;
+       }
+       case KVM_SET_DEBUGREGS: {
+               struct kvm_debugregs dbgregs;
+
+               r = -EFAULT;
+               if (copy_from_user(&dbgregs, argp,
+                                  sizeof(struct kvm_debugregs)))
+                       break;
+
+               r = kvm_vcpu_ioctl_x86_set_debugregs(vcpu, &dbgregs);
+               break;
+       }
        default:
                r = -EINVAL;
        }
@@ -2368,18 +2588,18 @@ static int kvm_vm_ioctl_set_irqchip(struct kvm *kvm, struct kvm_irqchip *chip)
        r = 0;
        switch (chip->chip_id) {
        case KVM_IRQCHIP_PIC_MASTER:
-               spin_lock(&pic_irqchip(kvm)->lock);
+               raw_spin_lock(&pic_irqchip(kvm)->lock);
                memcpy(&pic_irqchip(kvm)->pics[0],
                        &chip->chip.pic,
                        sizeof(struct kvm_pic_state));
-               spin_unlock(&pic_irqchip(kvm)->lock);
+               raw_spin_unlock(&pic_irqchip(kvm)->lock);
                break;
        case KVM_IRQCHIP_PIC_SLAVE:
-               spin_lock(&pic_irqchip(kvm)->lock);
+               raw_spin_lock(&pic_irqchip(kvm)->lock);
                memcpy(&pic_irqchip(kvm)->pics[1],
                        &chip->chip.pic,
                        sizeof(struct kvm_pic_state));
-               spin_unlock(&pic_irqchip(kvm)->lock);
+               raw_spin_unlock(&pic_irqchip(kvm)->lock);
                break;
        case KVM_IRQCHIP_IOAPIC:
                r = kvm_set_ioapic(kvm, &chip->chip.ioapic);
@@ -2459,8 +2679,9 @@ static int kvm_vm_ioctl_reinject(struct kvm *kvm,
 int kvm_vm_ioctl_get_dirty_log(struct kvm *kvm,
                                      struct kvm_dirty_log *log)
 {
-       int r, n, i;
+       int r, i;
        struct kvm_memory_slot *memslot;
+       unsigned long n;
        unsigned long is_dirty = 0;
        unsigned long *dirty_bitmap = NULL;
 
@@ -2475,7 +2696,7 @@ int kvm_vm_ioctl_get_dirty_log(struct kvm *kvm,
        if (!memslot->dirty_bitmap)
                goto out;
 
-       n = ALIGN(memslot->npages, BITS_PER_LONG) / 8;
+       n = kvm_dirty_bitmap_bytes(memslot);
 
        r = -ENOMEM;
        dirty_bitmap = vmalloc(n);
@@ -2597,6 +2818,8 @@ long kvm_arch_vm_ioctl(struct file *filp,
                if (vpic) {
                        r = kvm_ioapic_init(kvm);
                        if (r) {
+                               kvm_io_bus_unregister_dev(kvm, KVM_PIO_BUS,
+                                                         &vpic->dev);
                                kfree(vpic);
                                goto create_irqchip_unlock;
                        }
@@ -2608,10 +2831,8 @@ long kvm_arch_vm_ioctl(struct file *filp,
                r = kvm_setup_default_irq_routing(kvm);
                if (r) {
                        mutex_lock(&kvm->irq_lock);
-                       kfree(kvm->arch.vpic);
-                       kfree(kvm->arch.vioapic);
-                       kvm->arch.vpic = NULL;
-                       kvm->arch.vioapic = NULL;
+                       kvm_ioapic_destroy(kvm);
+                       kvm_destroy_pic(kvm);
                        mutex_unlock(&kvm->irq_lock);
                }
        create_irqchip_unlock:
@@ -2865,14 +3086,41 @@ static int vcpu_mmio_read(struct kvm_vcpu *vcpu, gpa_t addr, int len, void *v)
        return kvm_io_bus_read(vcpu->kvm, KVM_MMIO_BUS, addr, len, v);
 }
 
-static int kvm_read_guest_virt(gva_t addr, void *val, unsigned int bytes,
-                              struct kvm_vcpu *vcpu)
+gpa_t kvm_mmu_gva_to_gpa_read(struct kvm_vcpu *vcpu, gva_t gva, u32 *error)
+{
+       u32 access = (kvm_x86_ops->get_cpl(vcpu) == 3) ? PFERR_USER_MASK : 0;
+       return vcpu->arch.mmu.gva_to_gpa(vcpu, gva, access, error);
+}
+
+ gpa_t kvm_mmu_gva_to_gpa_fetch(struct kvm_vcpu *vcpu, gva_t gva, u32 *error)
+{
+       u32 access = (kvm_x86_ops->get_cpl(vcpu) == 3) ? PFERR_USER_MASK : 0;
+       access |= PFERR_FETCH_MASK;
+       return vcpu->arch.mmu.gva_to_gpa(vcpu, gva, access, error);
+}
+
+gpa_t kvm_mmu_gva_to_gpa_write(struct kvm_vcpu *vcpu, gva_t gva, u32 *error)
+{
+       u32 access = (kvm_x86_ops->get_cpl(vcpu) == 3) ? PFERR_USER_MASK : 0;
+       access |= PFERR_WRITE_MASK;
+       return vcpu->arch.mmu.gva_to_gpa(vcpu, gva, access, error);
+}
+
+/* uses this to access any guest's mapped memory without checking CPL */
+gpa_t kvm_mmu_gva_to_gpa_system(struct kvm_vcpu *vcpu, gva_t gva, u32 *error)
+{
+       return vcpu->arch.mmu.gva_to_gpa(vcpu, gva, 0, error);
+}
+
+static int kvm_read_guest_virt_helper(gva_t addr, void *val, unsigned int bytes,
+                                     struct kvm_vcpu *vcpu, u32 access,
+                                     u32 *error)
 {
        void *data = val;
        int r = X86EMUL_CONTINUE;
 
        while (bytes) {
-               gpa_t gpa = vcpu->arch.mmu.gva_to_gpa(vcpu, addr);
+               gpa_t gpa = vcpu->arch.mmu.gva_to_gpa(vcpu, addr, access, error);
                unsigned offset = addr & (PAGE_SIZE-1);
                unsigned toread = min(bytes, (unsigned)PAGE_SIZE - offset);
                int ret;
@@ -2895,14 +3143,37 @@ out:
        return r;
 }
 
+/* used for instruction fetching */
+static int kvm_fetch_guest_virt(gva_t addr, void *val, unsigned int bytes,
+                               struct kvm_vcpu *vcpu, u32 *error)
+{
+       u32 access = (kvm_x86_ops->get_cpl(vcpu) == 3) ? PFERR_USER_MASK : 0;
+       return kvm_read_guest_virt_helper(addr, val, bytes, vcpu,
+                                         access | PFERR_FETCH_MASK, error);
+}
+
+static int kvm_read_guest_virt(gva_t addr, void *val, unsigned int bytes,
+                              struct kvm_vcpu *vcpu, u32 *error)
+{
+       u32 access = (kvm_x86_ops->get_cpl(vcpu) == 3) ? PFERR_USER_MASK : 0;
+       return kvm_read_guest_virt_helper(addr, val, bytes, vcpu, access,
+                                         error);
+}
+
+static int kvm_read_guest_virt_system(gva_t addr, void *val, unsigned int bytes,
+                              struct kvm_vcpu *vcpu, u32 *error)
+{
+       return kvm_read_guest_virt_helper(addr, val, bytes, vcpu, 0, error);
+}
+
 static int kvm_write_guest_virt(gva_t addr, void *val, unsigned int bytes,
-                               struct kvm_vcpu *vcpu)
+                               struct kvm_vcpu *vcpu, u32 *error)
 {
        void *data = val;
        int r = X86EMUL_CONTINUE;
 
        while (bytes) {
-               gpa_t gpa = vcpu->arch.mmu.gva_to_gpa(vcpu, addr);
+               gpa_t gpa = kvm_mmu_gva_to_gpa_write(vcpu, addr, error);
                unsigned offset = addr & (PAGE_SIZE-1);
                unsigned towrite = min(bytes, (unsigned)PAGE_SIZE - offset);
                int ret;
@@ -2932,6 +3203,7 @@ static int emulator_read_emulated(unsigned long addr,
                                  struct kvm_vcpu *vcpu)
 {
        gpa_t                 gpa;
+       u32 error_code;
 
        if (vcpu->mmio_read_completed) {
                memcpy(val, vcpu->mmio_data, bytes);
@@ -2941,17 +3213,20 @@ static int emulator_read_emulated(unsigned long addr,
                return X86EMUL_CONTINUE;
        }
 
-       gpa = vcpu->arch.mmu.gva_to_gpa(vcpu, addr);
+       gpa = kvm_mmu_gva_to_gpa_read(vcpu, addr, &error_code);
+
+       if (gpa == UNMAPPED_GVA) {
+               kvm_inject_page_fault(vcpu, addr, error_code);
+               return X86EMUL_PROPAGATE_FAULT;
+       }
 
        /* For APIC access vmexit */
        if ((gpa & PAGE_MASK) == APIC_DEFAULT_PHYS_BASE)
                goto mmio;
 
-       if (kvm_read_guest_virt(addr, val, bytes, vcpu)
+       if (kvm_read_guest_virt(addr, val, bytes, vcpu, NULL)
                                == X86EMUL_CONTINUE)
                return X86EMUL_CONTINUE;
-       if (gpa == UNMAPPED_GVA)
-               return X86EMUL_PROPAGATE_FAULT;
 
 mmio:
        /*
@@ -2990,11 +3265,12 @@ static int emulator_write_emulated_onepage(unsigned long addr,
                                           struct kvm_vcpu *vcpu)
 {
        gpa_t                 gpa;
+       u32 error_code;
 
-       gpa = vcpu->arch.mmu.gva_to_gpa(vcpu, addr);
+       gpa = kvm_mmu_gva_to_gpa_write(vcpu, addr, &error_code);
 
        if (gpa == UNMAPPED_GVA) {
-               kvm_inject_page_fault(vcpu, addr, 2);
+               kvm_inject_page_fault(vcpu, addr, error_code);
                return X86EMUL_PROPAGATE_FAULT;
        }
 
@@ -3058,7 +3334,7 @@ static int emulator_cmpxchg_emulated(unsigned long addr,
                char *kaddr;
                u64 val;
 
-               gpa = vcpu->arch.mmu.gva_to_gpa(vcpu, addr);
+               gpa = kvm_mmu_gva_to_gpa_write(vcpu, addr, NULL);
 
                if (gpa == UNMAPPED_GVA ||
                   (gpa & PAGE_MASK) == APIC_DEFAULT_PHYS_BASE)
@@ -3096,34 +3372,20 @@ int emulate_invlpg(struct kvm_vcpu *vcpu, gva_t address)
 int emulate_clts(struct kvm_vcpu *vcpu)
 {
        kvm_x86_ops->set_cr0(vcpu, kvm_read_cr0_bits(vcpu, ~X86_CR0_TS));
+       kvm_x86_ops->fpu_activate(vcpu);
        return X86EMUL_CONTINUE;
 }
 
 int emulator_get_dr(struct x86_emulate_ctxt *ctxt, int dr, unsigned long *dest)
 {
-       struct kvm_vcpu *vcpu = ctxt->vcpu;
-
-       switch (dr) {
-       case 0 ... 3:
-               *dest = kvm_x86_ops->get_dr(vcpu, dr);
-               return X86EMUL_CONTINUE;
-       default:
-               pr_unimpl(vcpu, "%s: unexpected dr %u\n", __func__, dr);
-               return X86EMUL_UNHANDLEABLE;
-       }
+       return kvm_x86_ops->get_dr(ctxt->vcpu, dr, dest);
 }
 
 int emulator_set_dr(struct x86_emulate_ctxt *ctxt, int dr, unsigned long value)
 {
        unsigned long mask = (ctxt->mode == X86EMUL_MODE_PROT64) ? ~0ULL : ~0U;
-       int exception;
 
-       kvm_x86_ops->set_dr(ctxt->vcpu, dr, value & mask, &exception);
-       if (exception) {
-               /* FIXME: better handling */
-               return X86EMUL_UNHANDLEABLE;
-       }
-       return X86EMUL_CONTINUE;
+       return kvm_x86_ops->set_dr(ctxt->vcpu, dr, value & mask);
 }
 
 void kvm_report_emulation_failure(struct kvm_vcpu *vcpu, const char *context)
@@ -3137,7 +3399,7 @@ void kvm_report_emulation_failure(struct kvm_vcpu *vcpu, const char *context)
 
        rip_linear = rip + get_segment_base(vcpu, VCPU_SREG_CS);
 
-       kvm_read_guest_virt(rip_linear, (void *)opcodes, 4, vcpu);
+       kvm_read_guest_virt(rip_linear, (void *)opcodes, 4, vcpu, NULL);
 
        printk(KERN_ERR "emulation failed (%s) rip %lx %02x %02x %02x %02x\n",
               context, rip, opcodes[0], opcodes[1], opcodes[2], opcodes[3]);
@@ -3145,7 +3407,8 @@ void kvm_report_emulation_failure(struct kvm_vcpu *vcpu, const char *context)
 EXPORT_SYMBOL_GPL(kvm_report_emulation_failure);
 
 static struct x86_emulate_ops emulate_ops = {
-       .read_std            = kvm_read_guest_virt,
+       .read_std            = kvm_read_guest_virt_system,
+       .fetch               = kvm_fetch_guest_virt,
        .read_emulated       = emulator_read_emulated,
        .write_emulated      = emulator_write_emulated,
        .cmpxchg_emulated    = emulator_cmpxchg_emulated,
@@ -3186,10 +3449,11 @@ int emulate_instruction(struct kvm_vcpu *vcpu,
                kvm_x86_ops->get_cs_db_l_bits(vcpu, &cs_db, &cs_l);
 
                vcpu->arch.emulate_ctxt.vcpu = vcpu;
-               vcpu->arch.emulate_ctxt.eflags = kvm_get_rflags(vcpu);
+               vcpu->arch.emulate_ctxt.eflags = kvm_x86_ops->get_rflags(vcpu);
                vcpu->arch.emulate_ctxt.mode =
+                       (!is_protmode(vcpu)) ? X86EMUL_MODE_REAL :
                        (vcpu->arch.emulate_ctxt.eflags & X86_EFLAGS_VM)
-                       ? X86EMUL_MODE_REAL : cs_l
+                       ? X86EMUL_MODE_VM86 : cs_l
                        ? X86EMUL_MODE_PROT64 : cs_db
                        ? X86EMUL_MODE_PROT32 : X86EMUL_MODE_PROT16;
 
@@ -3246,7 +3510,7 @@ int emulate_instruction(struct kvm_vcpu *vcpu,
        if (vcpu->arch.pio.string)
                return EMULATE_DO_MMIO;
 
-       if ((r || vcpu->mmio_is_write) && run) {
+       if (r || vcpu->mmio_is_write) {
                run->exit_reason = KVM_EXIT_MMIO;
                run->mmio.phys_addr = vcpu->mmio_phys_addr;
                memcpy(run->mmio.data, vcpu->mmio_data, 8);
@@ -3264,7 +3528,7 @@ int emulate_instruction(struct kvm_vcpu *vcpu,
                return EMULATE_DO_MMIO;
        }
 
-       kvm_set_rflags(vcpu, vcpu->arch.emulate_ctxt.eflags);
+       kvm_x86_ops->set_rflags(vcpu, vcpu->arch.emulate_ctxt.eflags);
 
        if (vcpu->mmio_is_write) {
                vcpu->mmio_needed = 0;
@@ -3281,12 +3545,17 @@ static int pio_copy_data(struct kvm_vcpu *vcpu)
        gva_t q = vcpu->arch.pio.guest_gva;
        unsigned bytes;
        int ret;
+       u32 error_code;
 
        bytes = vcpu->arch.pio.size * vcpu->arch.pio.cur_count;
        if (vcpu->arch.pio.in)
-               ret = kvm_write_guest_virt(q, p, bytes, vcpu);
+               ret = kvm_write_guest_virt(q, p, bytes, vcpu, &error_code);
        else
-               ret = kvm_read_guest_virt(q, p, bytes, vcpu);
+               ret = kvm_read_guest_virt(q, p, bytes, vcpu, &error_code);
+
+       if (ret == X86EMUL_PROPAGATE_FAULT)
+               kvm_inject_page_fault(vcpu, q, error_code);
+
        return ret;
 }
 
@@ -3307,7 +3576,7 @@ int complete_pio(struct kvm_vcpu *vcpu)
                if (io->in) {
                        r = pio_copy_data(vcpu);
                        if (r)
-                               return r;
+                               goto out;
                }
 
                delta = 1;
@@ -3334,7 +3603,7 @@ int complete_pio(struct kvm_vcpu *vcpu)
                        kvm_register_write(vcpu, VCPU_REGS_RSI, val);
                }
        }
-
+out:
        io->count -= io->cur_count;
        io->cur_count = 0;
 
@@ -3377,6 +3646,8 @@ int kvm_emulate_pio(struct kvm_vcpu *vcpu, int in, int size, unsigned port)
 {
        unsigned long val;
 
+       trace_kvm_pio(!in, port, size, 1);
+
        vcpu->run->exit_reason = KVM_EXIT_IO;
        vcpu->run->io.direction = in ? KVM_EXIT_IO_IN : KVM_EXIT_IO_OUT;
        vcpu->run->io.size = vcpu->arch.pio.size = size;
@@ -3388,11 +3659,10 @@ int kvm_emulate_pio(struct kvm_vcpu *vcpu, int in, int size, unsigned port)
        vcpu->arch.pio.down = 0;
        vcpu->arch.pio.rep = 0;
 
-       trace_kvm_pio(vcpu->run->io.direction == KVM_EXIT_IO_OUT, port,
-                     size, 1);
-
-       val = kvm_register_read(vcpu, VCPU_REGS_RAX);
-       memcpy(vcpu->arch.pio_data, &val, 4);
+       if (!vcpu->arch.pio.in) {
+               val = kvm_register_read(vcpu, VCPU_REGS_RAX);
+               memcpy(vcpu->arch.pio_data, &val, 4);
+       }
 
        if (!kernel_pio(vcpu, vcpu->arch.pio_data)) {
                complete_pio(vcpu);
@@ -3409,6 +3679,8 @@ int kvm_emulate_pio_string(struct kvm_vcpu *vcpu, int in,
        unsigned now, in_page;
        int ret = 0;
 
+       trace_kvm_pio(!in, port, size, count);
+
        vcpu->run->exit_reason = KVM_EXIT_IO;
        vcpu->run->io.direction = in ? KVM_EXIT_IO_IN : KVM_EXIT_IO_OUT;
        vcpu->run->io.size = vcpu->arch.pio.size = size;
@@ -3420,9 +3692,6 @@ int kvm_emulate_pio_string(struct kvm_vcpu *vcpu, int in,
        vcpu->arch.pio.down = down;
        vcpu->arch.pio.rep = rep;
 
-       trace_kvm_pio(vcpu->run->io.direction == KVM_EXIT_IO_OUT, port,
-                     size, count);
-
        if (!count) {
                kvm_x86_ops->skip_emulated_instruction(vcpu);
                return 1;
@@ -3454,10 +3723,8 @@ int kvm_emulate_pio_string(struct kvm_vcpu *vcpu, int in,
        if (!vcpu->arch.pio.in) {
                /* string PIO write */
                ret = pio_copy_data(vcpu);
-               if (ret == X86EMUL_PROPAGATE_FAULT) {
-                       kvm_inject_gp(vcpu, 0);
+               if (ret == X86EMUL_PROPAGATE_FAULT)
                        return 1;
-               }
                if (ret == 0 && !pio_string_write(vcpu)) {
                        complete_pio(vcpu);
                        if (vcpu->arch.pio.count == 0)
@@ -3616,11 +3883,76 @@ static inline gpa_t hc_gpa(struct kvm_vcpu *vcpu, unsigned long a0,
                return a0 | ((gpa_t)a1 << 32);
 }
 
+int kvm_hv_hypercall(struct kvm_vcpu *vcpu)
+{
+       u64 param, ingpa, outgpa, ret;
+       uint16_t code, rep_idx, rep_cnt, res = HV_STATUS_SUCCESS, rep_done = 0;
+       bool fast, longmode;
+       int cs_db, cs_l;
+
+       /*
+        * hypercall generates UD from non zero cpl and real mode
+        * per HYPER-V spec
+        */
+       if (kvm_x86_ops->get_cpl(vcpu) != 0 || !is_protmode(vcpu)) {
+               kvm_queue_exception(vcpu, UD_VECTOR);
+               return 0;
+       }
+
+       kvm_x86_ops->get_cs_db_l_bits(vcpu, &cs_db, &cs_l);
+       longmode = is_long_mode(vcpu) && cs_l == 1;
+
+       if (!longmode) {
+               param = ((u64)kvm_register_read(vcpu, VCPU_REGS_RDX) << 32) |
+                       (kvm_register_read(vcpu, VCPU_REGS_RAX) & 0xffffffff);
+               ingpa = ((u64)kvm_register_read(vcpu, VCPU_REGS_RBX) << 32) |
+                       (kvm_register_read(vcpu, VCPU_REGS_RCX) & 0xffffffff);
+               outgpa = ((u64)kvm_register_read(vcpu, VCPU_REGS_RDI) << 32) |
+                       (kvm_register_read(vcpu, VCPU_REGS_RSI) & 0xffffffff);
+       }
+#ifdef CONFIG_X86_64
+       else {
+               param = kvm_register_read(vcpu, VCPU_REGS_RCX);
+               ingpa = kvm_register_read(vcpu, VCPU_REGS_RDX);
+               outgpa = kvm_register_read(vcpu, VCPU_REGS_R8);
+       }
+#endif
+
+       code = param & 0xffff;
+       fast = (param >> 16) & 0x1;
+       rep_cnt = (param >> 32) & 0xfff;
+       rep_idx = (param >> 48) & 0xfff;
+
+       trace_kvm_hv_hypercall(code, fast, rep_cnt, rep_idx, ingpa, outgpa);
+
+       switch (code) {
+       case HV_X64_HV_NOTIFY_LONG_SPIN_WAIT:
+               kvm_vcpu_on_spin(vcpu);
+               break;
+       default:
+               res = HV_STATUS_INVALID_HYPERCALL_CODE;
+               break;
+       }
+
+       ret = res | (((u64)rep_done & 0xfff) << 32);
+       if (longmode) {
+               kvm_register_write(vcpu, VCPU_REGS_RAX, ret);
+       } else {
+               kvm_register_write(vcpu, VCPU_REGS_RDX, ret >> 32);
+               kvm_register_write(vcpu, VCPU_REGS_RAX, ret & 0xffffffff);
+       }
+
+       return 1;
+}
+
 int kvm_emulate_hypercall(struct kvm_vcpu *vcpu)
 {
        unsigned long nr, a0, a1, a2, a3, ret;
        int r = 1;
 
+       if (kvm_hv_hypercall_enabled(vcpu->kvm))
+               return kvm_hv_hypercall(vcpu);
+
        nr = kvm_register_read(vcpu, VCPU_REGS_RAX);
        a0 = kvm_register_read(vcpu, VCPU_REGS_RBX);
        a1 = kvm_register_read(vcpu, VCPU_REGS_RCX);
@@ -3663,10 +3995,8 @@ EXPORT_SYMBOL_GPL(kvm_emulate_hypercall);
 int kvm_fix_hypercall(struct kvm_vcpu *vcpu)
 {
        char instruction[3];
-       int ret = 0;
        unsigned long rip = kvm_rip_read(vcpu);
 
-
        /*
         * Blow out the MMU to ensure that no other VCPU has an active mapping
         * to ensure that the updated hypercall appears atomically across all
@@ -3675,11 +4005,8 @@ int kvm_fix_hypercall(struct kvm_vcpu *vcpu)
        kvm_mmu_zap_all(vcpu->kvm);
 
        kvm_x86_ops->patch_hypercall(vcpu, instruction);
-       if (emulator_write_emulated(rip, instruction, 3, vcpu)
-           != X86EMUL_CONTINUE)
-               ret = -EFAULT;
 
-       return ret;
+       return emulator_write_emulated(rip, instruction, 3, vcpu);
 }
 
 static u64 mk_cr_64(u64 curr_cr, u32 new_val)
@@ -3689,14 +4016,14 @@ static u64 mk_cr_64(u64 curr_cr, u32 new_val)
 
 void realmode_lgdt(struct kvm_vcpu *vcpu, u16 limit, unsigned long base)
 {
-       struct descriptor_table dt = { limit, base };
+       struct desc_ptr dt = { limit, base };
 
        kvm_x86_ops->set_gdt(vcpu, &dt);
 }
 
 void realmode_lidt(struct kvm_vcpu *vcpu, u16 limit, unsigned long base)
 {
-       struct descriptor_table dt = { limit, base };
+       struct desc_ptr dt = { limit, base };
 
        kvm_x86_ops->set_idt(vcpu, &dt);
 }
@@ -4015,7 +4342,8 @@ static int vcpu_enter_guest(struct kvm_vcpu *vcpu)
        preempt_disable();
 
        kvm_x86_ops->prepare_guest_switch(vcpu);
-       kvm_load_guest_fpu(vcpu);
+       if (vcpu->fpu_active)
+               kvm_load_guest_fpu(vcpu);
 
        local_irq_disable();
 
@@ -4200,7 +4528,9 @@ int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run)
                kvm_set_cr8(vcpu, kvm_run->cr8);
 
        if (vcpu->arch.pio.cur_count) {
+               vcpu->srcu_idx = srcu_read_lock(&vcpu->kvm->srcu);
                r = complete_pio(vcpu);
+               srcu_read_unlock(&vcpu->kvm->srcu, vcpu->srcu_idx);
                if (r)
                        goto out;
        }
@@ -4318,7 +4648,7 @@ EXPORT_SYMBOL_GPL(kvm_get_cs_db_l_bits);
 int kvm_arch_vcpu_ioctl_get_sregs(struct kvm_vcpu *vcpu,
                                  struct kvm_sregs *sregs)
 {
-       struct descriptor_table dt;
+       struct desc_ptr dt;
 
        vcpu_load(vcpu);
 
@@ -4333,18 +4663,18 @@ int kvm_arch_vcpu_ioctl_get_sregs(struct kvm_vcpu *vcpu,
        kvm_get_segment(vcpu, &sregs->ldt, VCPU_SREG_LDTR);
 
        kvm_x86_ops->get_idt(vcpu, &dt);
-       sregs->idt.limit = dt.limit;
-       sregs->idt.base = dt.base;
+       sregs->idt.limit = dt.size;
+       sregs->idt.base = dt.address;
        kvm_x86_ops->get_gdt(vcpu, &dt);
-       sregs->gdt.limit = dt.limit;
-       sregs->gdt.base = dt.base;
+       sregs->gdt.limit = dt.size;
+       sregs->gdt.base = dt.address;
 
        sregs->cr0 = kvm_read_cr0(vcpu);
        sregs->cr2 = vcpu->arch.cr2;
        sregs->cr3 = vcpu->arch.cr3;
        sregs->cr4 = kvm_read_cr4(vcpu);
        sregs->cr8 = kvm_get_cr8(vcpu);
-       sregs->efer = vcpu->arch.shadow_efer;
+       sregs->efer = vcpu->arch.efer;
        sregs->apic_base = kvm_get_apic_base(vcpu);
 
        memset(sregs->interrupt_bitmap, 0, sizeof sregs->interrupt_bitmap);
@@ -4409,7 +4739,7 @@ static void seg_desct_to_kvm_desct(struct desc_struct *seg_desc, u16 selector,
 
 static void get_segment_descriptor_dtable(struct kvm_vcpu *vcpu,
                                          u16 selector,
-                                         struct descriptor_table *dtable)
+                                         struct desc_ptr *dtable)
 {
        if (selector & 1 << 2) {
                struct kvm_segment kvm_seg;
@@ -4417,10 +4747,10 @@ static void get_segment_descriptor_dtable(struct kvm_vcpu *vcpu,
                kvm_get_segment(vcpu, &kvm_seg, VCPU_SREG_LDTR);
 
                if (kvm_seg.unusable)
-                       dtable->limit = 0;
+                       dtable->size = 0;
                else
-                       dtable->limit = kvm_seg.limit;
-               dtable->base = kvm_seg.base;
+                       dtable->size = kvm_seg.limit;
+               dtable->address = kvm_seg.base;
        }
        else
                kvm_x86_ops->get_gdt(vcpu, dtable);
@@ -4430,38 +4760,55 @@ static void get_segment_descriptor_dtable(struct kvm_vcpu *vcpu,
 static int load_guest_segment_descriptor(struct kvm_vcpu *vcpu, u16 selector,
                                         struct desc_struct *seg_desc)
 {
-       struct descriptor_table dtable;
+       struct desc_ptr dtable;
        u16 index = selector >> 3;
+       int ret;
+       u32 err;
+       gva_t addr;
 
        get_segment_descriptor_dtable(vcpu, selector, &dtable);
 
-       if (dtable.limit < index * 8 + 7) {
+       if (dtable.size < index * 8 + 7) {
                kvm_queue_exception_e(vcpu, GP_VECTOR, selector & 0xfffc);
-               return 1;
+               return X86EMUL_PROPAGATE_FAULT;
        }
-       return kvm_read_guest_virt(dtable.base + index*8, seg_desc, sizeof(*seg_desc), vcpu);
+       addr = dtable.base + index * 8;
+       ret = kvm_read_guest_virt_system(addr, seg_desc, sizeof(*seg_desc),
+                                        vcpu,  &err);
+       if (ret == X86EMUL_PROPAGATE_FAULT)
+               kvm_inject_page_fault(vcpu, addr, err);
+
+       return ret;
 }
 
 /* allowed just for 8 bytes segments */
 static int save_guest_segment_descriptor(struct kvm_vcpu *vcpu, u16 selector,
                                         struct desc_struct *seg_desc)
 {
-       struct descriptor_table dtable;
+       struct desc_ptr dtable;
        u16 index = selector >> 3;
 
        get_segment_descriptor_dtable(vcpu, selector, &dtable);
 
-       if (dtable.limit < index * 8 + 7)
+       if (dtable.size < index * 8 + 7)
                return 1;
-       return kvm_write_guest_virt(dtable.base + index*8, seg_desc, sizeof(*seg_desc), vcpu);
+       return kvm_write_guest_virt(dtable.address + index*8, seg_desc, sizeof(*seg_desc), vcpu, NULL);
 }
 
-static gpa_t get_tss_base_addr(struct kvm_vcpu *vcpu,
+static gpa_t get_tss_base_addr_write(struct kvm_vcpu *vcpu,
+                              struct desc_struct *seg_desc)
+{
+       u32 base_addr = get_desc_base(seg_desc);
+
+       return kvm_mmu_gva_to_gpa_write(vcpu, base_addr, NULL);
+}
+
+static gpa_t get_tss_base_addr_read(struct kvm_vcpu *vcpu,
                             struct desc_struct *seg_desc)
 {
        u32 base_addr = get_desc_base(seg_desc);
 
-       return vcpu->arch.mmu.gva_to_gpa(vcpu, base_addr);
+       return kvm_mmu_gva_to_gpa_read(vcpu, base_addr, NULL);
 }
 
 static u16 get_segment_selector(struct kvm_vcpu *vcpu, int seg)
@@ -4472,18 +4819,6 @@ static u16 get_segment_selector(struct kvm_vcpu *vcpu, int seg)
        return kvm_seg.selector;
 }
 
-static int load_segment_descriptor_to_kvm_desct(struct kvm_vcpu *vcpu,
-                                               u16 selector,
-                                               struct kvm_segment *kvm_seg)
-{
-       struct desc_struct seg_desc;
-
-       if (load_guest_segment_descriptor(vcpu, selector, &seg_desc))
-               return 1;
-       seg_desct_to_kvm_desct(&seg_desc, selector, kvm_seg);
-       return 0;
-}
-
 static int kvm_load_realmode_segment(struct kvm_vcpu *vcpu, u16 selector, int seg)
 {
        struct kvm_segment segvar = {
@@ -4501,7 +4836,7 @@ static int kvm_load_realmode_segment(struct kvm_vcpu *vcpu, u16 selector, int se
                .unusable = 0,
        };
        kvm_x86_ops->set_segment(vcpu, &segvar, seg);
-       return 0;
+       return X86EMUL_CONTINUE;
 }
 
 static int is_vm86_segment(struct kvm_vcpu *vcpu, int seg)
@@ -4511,35 +4846,112 @@ static int is_vm86_segment(struct kvm_vcpu *vcpu, int seg)
                (kvm_get_rflags(vcpu) & X86_EFLAGS_VM);
 }
 
-static void kvm_check_segment_descriptor(struct kvm_vcpu *vcpu, int seg,
-                                        u16 selector)
-{
-       /* NULL selector is not valid for CS and SS */
-       if (seg == VCPU_SREG_CS || seg == VCPU_SREG_SS)
-               if (!selector)
-                       kvm_queue_exception_e(vcpu, TS_VECTOR, selector >> 3);
-}
-
-int kvm_load_segment_descriptor(struct kvm_vcpu *vcpu, u16 selector,
-                               int type_bits, int seg)
+int kvm_load_segment_descriptor(struct kvm_vcpu *vcpu, u16 selector, int seg)
 {
        struct kvm_segment kvm_seg;
+       struct desc_struct seg_desc;
+       u8 dpl, rpl, cpl;
+       unsigned err_vec = GP_VECTOR;
+       u32 err_code = 0;
+       bool null_selector = !(selector & ~0x3); /* 0000-0003 are null */
+       int ret;
 
-       if (is_vm86_segment(vcpu, seg) || !(kvm_read_cr0_bits(vcpu, X86_CR0_PE)))
+       if (is_vm86_segment(vcpu, seg) || !is_protmode(vcpu))
                return kvm_load_realmode_segment(vcpu, selector, seg);
-       if (load_segment_descriptor_to_kvm_desct(vcpu, selector, &kvm_seg))
-               return 1;
 
-       kvm_check_segment_descriptor(vcpu, seg, selector);
-       kvm_seg.type |= type_bits;
+       /* NULL selector is not valid for TR, CS and SS */
+       if ((seg == VCPU_SREG_CS || seg == VCPU_SREG_SS || seg == VCPU_SREG_TR)
+           && null_selector)
+               goto exception;
+
+       /* TR should be in GDT only */
+       if (seg == VCPU_SREG_TR && (selector & (1 << 2)))
+               goto exception;
+
+       ret = load_guest_segment_descriptor(vcpu, selector, &seg_desc);
+       if (ret)
+               return ret;
+
+       seg_desct_to_kvm_desct(&seg_desc, selector, &kvm_seg);
+
+       if (null_selector) { /* for NULL selector skip all following checks */
+               kvm_seg.unusable = 1;
+               goto load;
+       }
+
+       err_code = selector & 0xfffc;
+       err_vec = GP_VECTOR;
+
+       /* can't load system descriptor into segment selecor */
+       if (seg <= VCPU_SREG_GS && !kvm_seg.s)
+               goto exception;
+
+       if (!kvm_seg.present) {
+               err_vec = (seg == VCPU_SREG_SS) ? SS_VECTOR : NP_VECTOR;
+               goto exception;
+       }
+
+       rpl = selector & 3;
+       dpl = kvm_seg.dpl;
+       cpl = kvm_x86_ops->get_cpl(vcpu);
 
-       if (seg != VCPU_SREG_SS && seg != VCPU_SREG_CS &&
-           seg != VCPU_SREG_LDTR)
-               if (!kvm_seg.s)
-                       kvm_seg.unusable = 1;
+       switch (seg) {
+       case VCPU_SREG_SS:
+               /*
+                * segment is not a writable data segment or segment
+                * selector's RPL != CPL or segment selector's RPL != CPL
+                */
+               if (rpl != cpl || (kvm_seg.type & 0xa) != 0x2 || dpl != cpl)
+                       goto exception;
+               break;
+       case VCPU_SREG_CS:
+               if (!(kvm_seg.type & 8))
+                       goto exception;
+
+               if (kvm_seg.type & 4) {
+                       /* conforming */
+                       if (dpl > cpl)
+                               goto exception;
+               } else {
+                       /* nonconforming */
+                       if (rpl > cpl || dpl != cpl)
+                               goto exception;
+               }
+               /* CS(RPL) <- CPL */
+               selector = (selector & 0xfffc) | cpl;
+            break;
+       case VCPU_SREG_TR:
+               if (kvm_seg.s || (kvm_seg.type != 1 && kvm_seg.type != 9))
+                       goto exception;
+               break;
+       case VCPU_SREG_LDTR:
+               if (kvm_seg.s || kvm_seg.type != 2)
+                       goto exception;
+               break;
+       default: /*  DS, ES, FS, or GS */
+               /*
+                * segment is not a data or readable code segment or
+                * ((segment is a data or nonconforming code segment)
+                * and (both RPL and CPL > DPL))
+                */
+               if ((kvm_seg.type & 0xa) == 0x8 ||
+                   (((kvm_seg.type & 0xc) != 0xc) && (rpl > dpl && cpl > dpl)))
+                       goto exception;
+               break;
+       }
 
+       if (!kvm_seg.unusable && kvm_seg.s) {
+               /* mark segment as accessed */
+               kvm_seg.type |= 1;
+               seg_desc.type |= 1;
+               save_guest_segment_descriptor(vcpu, selector, &seg_desc);
+       }
+load:
        kvm_set_segment(vcpu, &kvm_seg, seg);
-       return 0;
+       return X86EMUL_CONTINUE;
+exception:
+       kvm_queue_exception_e(vcpu, err_vec, err_code);
+       return X86EMUL_PROPAGATE_FAULT;
 }
 
 static void save_state_to_tss32(struct kvm_vcpu *vcpu,
@@ -4565,6 +4977,14 @@ static void save_state_to_tss32(struct kvm_vcpu *vcpu,
        tss->ldt_selector = get_segment_selector(vcpu, VCPU_SREG_LDTR);
 }
 
+static void kvm_load_segment_selector(struct kvm_vcpu *vcpu, u16 sel, int seg)
+{
+       struct kvm_segment kvm_seg;
+       kvm_get_segment(vcpu, &kvm_seg, seg);
+       kvm_seg.selector = sel;
+       kvm_set_segment(vcpu, &kvm_seg, seg);
+}
+
 static int load_state_from_tss32(struct kvm_vcpu *vcpu,
                                  struct tss_segment_32 *tss)
 {
@@ -4582,25 +5002,41 @@ static int load_state_from_tss32(struct kvm_vcpu *vcpu,
        kvm_register_write(vcpu, VCPU_REGS_RSI, tss->esi);
        kvm_register_write(vcpu, VCPU_REGS_RDI, tss->edi);
 
-       if (kvm_load_segment_descriptor(vcpu, tss->ldt_selector, 0, VCPU_SREG_LDTR))
+       /*
+        * SDM says that segment selectors are loaded before segment
+        * descriptors
+        */
+       kvm_load_segment_selector(vcpu, tss->ldt_selector, VCPU_SREG_LDTR);
+       kvm_load_segment_selector(vcpu, tss->es, VCPU_SREG_ES);
+       kvm_load_segment_selector(vcpu, tss->cs, VCPU_SREG_CS);
+       kvm_load_segment_selector(vcpu, tss->ss, VCPU_SREG_SS);
+       kvm_load_segment_selector(vcpu, tss->ds, VCPU_SREG_DS);
+       kvm_load_segment_selector(vcpu, tss->fs, VCPU_SREG_FS);
+       kvm_load_segment_selector(vcpu, tss->gs, VCPU_SREG_GS);
+
+       /*
+        * Now load segment descriptors. If fault happenes at this stage
+        * it is handled in a context of new task
+        */
+       if (kvm_load_segment_descriptor(vcpu, tss->ldt_selector, VCPU_SREG_LDTR))
                return 1;
 
-       if (kvm_load_segment_descriptor(vcpu, tss->es, 1, VCPU_SREG_ES))
+       if (kvm_load_segment_descriptor(vcpu, tss->es, VCPU_SREG_ES))
                return 1;
 
-       if (kvm_load_segment_descriptor(vcpu, tss->cs, 9, VCPU_SREG_CS))
+       if (kvm_load_segment_descriptor(vcpu, tss->cs, VCPU_SREG_CS))
                return 1;
 
-       if (kvm_load_segment_descriptor(vcpu, tss->ss, 1, VCPU_SREG_SS))
+       if (kvm_load_segment_descriptor(vcpu, tss->ss, VCPU_SREG_SS))
                return 1;
 
-       if (kvm_load_segment_descriptor(vcpu, tss->ds, 1, VCPU_SREG_DS))
+       if (kvm_load_segment_descriptor(vcpu, tss->ds, VCPU_SREG_DS))
                return 1;
 
-       if (kvm_load_segment_descriptor(vcpu, tss->fs, 1, VCPU_SREG_FS))
+       if (kvm_load_segment_descriptor(vcpu, tss->fs, VCPU_SREG_FS))
                return 1;
 
-       if (kvm_load_segment_descriptor(vcpu, tss->gs, 1, VCPU_SREG_GS))
+       if (kvm_load_segment_descriptor(vcpu, tss->gs, VCPU_SREG_GS))
                return 1;
        return 0;
 }
@@ -4640,19 +5076,33 @@ static int load_state_from_tss16(struct kvm_vcpu *vcpu,
        kvm_register_write(vcpu, VCPU_REGS_RSI, tss->si);
        kvm_register_write(vcpu, VCPU_REGS_RDI, tss->di);
 
-       if (kvm_load_segment_descriptor(vcpu, tss->ldt, 0, VCPU_SREG_LDTR))
+       /*
+        * SDM says that segment selectors are loaded before segment
+        * descriptors
+        */
+       kvm_load_segment_selector(vcpu, tss->ldt, VCPU_SREG_LDTR);
+       kvm_load_segment_selector(vcpu, tss->es, VCPU_SREG_ES);
+       kvm_load_segment_selector(vcpu, tss->cs, VCPU_SREG_CS);
+       kvm_load_segment_selector(vcpu, tss->ss, VCPU_SREG_SS);
+       kvm_load_segment_selector(vcpu, tss->ds, VCPU_SREG_DS);
+
+       /*
+        * Now load segment descriptors. If fault happenes at this stage
+        * it is handled in a context of new task
+        */
+       if (kvm_load_segment_descriptor(vcpu, tss->ldt, VCPU_SREG_LDTR))
                return 1;
 
-       if (kvm_load_segment_descriptor(vcpu, tss->es, 1, VCPU_SREG_ES))
+       if (kvm_load_segment_descriptor(vcpu, tss->es, VCPU_SREG_ES))
                return 1;
 
-       if (kvm_load_segment_descriptor(vcpu, tss->cs, 9, VCPU_SREG_CS))
+       if (kvm_load_segment_descriptor(vcpu, tss->cs, VCPU_SREG_CS))
                return 1;
 
-       if (kvm_load_segment_descriptor(vcpu, tss->ss, 1, VCPU_SREG_SS))
+       if (kvm_load_segment_descriptor(vcpu, tss->ss, VCPU_SREG_SS))
                return 1;
 
-       if (kvm_load_segment_descriptor(vcpu, tss->ds, 1, VCPU_SREG_DS))
+       if (kvm_load_segment_descriptor(vcpu, tss->ds, VCPU_SREG_DS))
                return 1;
        return 0;
 }
@@ -4674,7 +5124,7 @@ static int kvm_task_switch_16(struct kvm_vcpu *vcpu, u16 tss_selector,
                            sizeof tss_segment_16))
                goto out;
 
-       if (kvm_read_guest(vcpu->kvm, get_tss_base_addr(vcpu, nseg_desc),
+       if (kvm_read_guest(vcpu->kvm, get_tss_base_addr_read(vcpu, nseg_desc),
                           &tss_segment_16, sizeof tss_segment_16))
                goto out;
 
@@ -4682,7 +5132,7 @@ static int kvm_task_switch_16(struct kvm_vcpu *vcpu, u16 tss_selector,
                tss_segment_16.prev_task_link = old_tss_sel;
 
                if (kvm_write_guest(vcpu->kvm,
-                                   get_tss_base_addr(vcpu, nseg_desc),
+                                   get_tss_base_addr_write(vcpu, nseg_desc),
                                    &tss_segment_16.prev_task_link,
                                    sizeof tss_segment_16.prev_task_link))
                        goto out;
@@ -4713,7 +5163,7 @@ static int kvm_task_switch_32(struct kvm_vcpu *vcpu, u16 tss_selector,
                            sizeof tss_segment_32))
                goto out;
 
-       if (kvm_read_guest(vcpu->kvm, get_tss_base_addr(vcpu, nseg_desc),
+       if (kvm_read_guest(vcpu->kvm, get_tss_base_addr_read(vcpu, nseg_desc),
                           &tss_segment_32, sizeof tss_segment_32))
                goto out;
 
@@ -4721,7 +5171,7 @@ static int kvm_task_switch_32(struct kvm_vcpu *vcpu, u16 tss_selector,
                tss_segment_32.prev_task_link = old_tss_sel;
 
                if (kvm_write_guest(vcpu->kvm,
-                                   get_tss_base_addr(vcpu, nseg_desc),
+                                   get_tss_base_addr_write(vcpu, nseg_desc),
                                    &tss_segment_32.prev_task_link,
                                    sizeof tss_segment_32.prev_task_link))
                        goto out;
@@ -4743,8 +5193,9 @@ int kvm_task_switch(struct kvm_vcpu *vcpu, u16 tss_selector, int reason)
        int ret = 0;
        u32 old_tss_base = get_segment_base(vcpu, VCPU_SREG_TR);
        u16 old_tss_sel = get_segment_selector(vcpu, VCPU_SREG_TR);
+       u32 desc_limit;
 
-       old_tss_base = vcpu->arch.mmu.gva_to_gpa(vcpu, old_tss_base);
+       old_tss_base = kvm_mmu_gva_to_gpa_write(vcpu, old_tss_base, NULL);
 
        /* FIXME: Handle errors. Failure to read either TSS or their
         * descriptors should generate a pagefault.
@@ -4765,7 +5216,10 @@ int kvm_task_switch(struct kvm_vcpu *vcpu, u16 tss_selector, int reason)
                }
        }
 
-       if (!nseg_desc.p || get_desc_limit(&nseg_desc) < 0x67) {
+       desc_limit = get_desc_limit(&nseg_desc);
+       if (!nseg_desc.p ||
+           ((desc_limit < 0x67 && (nseg_desc.type & 8)) ||
+            desc_limit < 0x2b)) {
                kvm_queue_exception_e(vcpu, TS_VECTOR, tss_selector & 0xfffc);
                return 1;
        }
@@ -4817,15 +5271,15 @@ int kvm_arch_vcpu_ioctl_set_sregs(struct kvm_vcpu *vcpu,
 {
        int mmu_reset_needed = 0;
        int pending_vec, max_bits;
-       struct descriptor_table dt;
+       struct desc_ptr dt;
 
        vcpu_load(vcpu);
 
-       dt.limit = sregs->idt.limit;
-       dt.base = sregs->idt.base;
+       dt.size = sregs->idt.limit;
+       dt.address = sregs->idt.base;
        kvm_x86_ops->set_idt(vcpu, &dt);
-       dt.limit = sregs->gdt.limit;
-       dt.base = sregs->gdt.base;
+       dt.size = sregs->gdt.limit;
+       dt.address = sregs->gdt.base;
        kvm_x86_ops->set_gdt(vcpu, &dt);
 
        vcpu->arch.cr2 = sregs->cr2;
@@ -4834,7 +5288,7 @@ int kvm_arch_vcpu_ioctl_set_sregs(struct kvm_vcpu *vcpu,
 
        kvm_set_cr8(vcpu, sregs->cr8);
 
-       mmu_reset_needed |= vcpu->arch.shadow_efer != sregs->efer;
+       mmu_reset_needed |= vcpu->arch.efer != sregs->efer;
        kvm_x86_ops->set_efer(vcpu, sregs->efer);
        kvm_set_apic_base(vcpu, sregs->apic_base);
 
@@ -4877,7 +5331,7 @@ int kvm_arch_vcpu_ioctl_set_sregs(struct kvm_vcpu *vcpu,
        /* Older userspace won't unhalt the vcpu on reset. */
        if (kvm_vcpu_is_bsp(vcpu) && kvm_rip_read(vcpu) == 0xfff0 &&
            sregs->cs.selector == 0xf000 && sregs->cs.base == 0xffff0000 &&
-           !(kvm_read_cr0_bits(vcpu, X86_CR0_PE)))
+           !is_protmode(vcpu))
                vcpu->arch.mp_state = KVM_MP_STATE_RUNNABLE;
 
        vcpu_put(vcpu);
@@ -4924,11 +5378,9 @@ int kvm_arch_vcpu_ioctl_set_guest_debug(struct kvm_vcpu *vcpu,
                vcpu->arch.switch_db_regs = (vcpu->arch.dr7 & DR7_BP_EN_MASK);
        }
 
-       if (vcpu->guest_debug & KVM_GUESTDBG_SINGLESTEP) {
-               vcpu->arch.singlestep_cs =
-                       get_segment_selector(vcpu, VCPU_SREG_CS);
-               vcpu->arch.singlestep_rip = kvm_rip_read(vcpu);
-       }
+       if (vcpu->guest_debug & KVM_GUESTDBG_SINGLESTEP)
+               vcpu->arch.singlestep_rip = kvm_rip_read(vcpu) +
+                       get_segment_base(vcpu, VCPU_SREG_CS);
 
        /*
         * Trigger an rflags update that will inject or remove the trace
@@ -4979,7 +5431,7 @@ int kvm_arch_vcpu_ioctl_translate(struct kvm_vcpu *vcpu,
 
        vcpu_load(vcpu);
        idx = srcu_read_lock(&vcpu->kvm->srcu);
-       gpa = vcpu->arch.mmu.gva_to_gpa(vcpu, vaddr);
+       gpa = kvm_mmu_gva_to_gpa_system(vcpu, vaddr, NULL);
        srcu_read_unlock(&vcpu->kvm->srcu, idx);
        tr->physical_address = gpa;
        tr->valid = gpa != UNMAPPED_GVA;
@@ -5061,14 +5513,14 @@ EXPORT_SYMBOL_GPL(fx_init);
 
 void kvm_load_guest_fpu(struct kvm_vcpu *vcpu)
 {
-       if (!vcpu->fpu_active || vcpu->guest_fpu_loaded)
+       if (vcpu->guest_fpu_loaded)
                return;
 
        vcpu->guest_fpu_loaded = 1;
        kvm_fx_save(&vcpu->arch.host_fx_image);
        kvm_fx_restore(&vcpu->arch.guest_fx_image);
+       trace_kvm_fpu(1);
 }
-EXPORT_SYMBOL_GPL(kvm_load_guest_fpu);
 
 void kvm_put_guest_fpu(struct kvm_vcpu *vcpu)
 {
@@ -5080,8 +5532,8 @@ void kvm_put_guest_fpu(struct kvm_vcpu *vcpu)
        kvm_fx_restore(&vcpu->arch.host_fx_image);
        ++vcpu->stat.fpu_reload;
        set_bit(KVM_REQ_DEACTIVATE_FPU, &vcpu->requests);
+       trace_kvm_fpu(0);
 }
-EXPORT_SYMBOL_GPL(kvm_put_guest_fpu);
 
 void kvm_arch_vcpu_free(struct kvm_vcpu *vcpu)
 {
@@ -5312,6 +5764,7 @@ void kvm_arch_destroy_vm(struct kvm *kvm)
                put_page(kvm->arch.apic_access_page);
        if (kvm->arch.ept_identity_pagetable)
                put_page(kvm->arch.ept_identity_pagetable);
+       cleanup_srcu_struct(&kvm->srcu);
        kfree(kvm->arch.aliases);
        kfree(kvm);
 }
@@ -5418,13 +5871,22 @@ int kvm_arch_interrupt_allowed(struct kvm_vcpu *vcpu)
        return kvm_x86_ops->interrupt_allowed(vcpu);
 }
 
+bool kvm_is_linear_rip(struct kvm_vcpu *vcpu, unsigned long linear_rip)
+{
+       unsigned long current_rip = kvm_rip_read(vcpu) +
+               get_segment_base(vcpu, VCPU_SREG_CS);
+
+       return current_rip == linear_rip;
+}
+EXPORT_SYMBOL_GPL(kvm_is_linear_rip);
+
 unsigned long kvm_get_rflags(struct kvm_vcpu *vcpu)
 {
        unsigned long rflags;
 
        rflags = kvm_x86_ops->get_rflags(vcpu);
        if (vcpu->guest_debug & KVM_GUESTDBG_SINGLESTEP)
-               rflags &= ~(unsigned long)(X86_EFLAGS_TF | X86_EFLAGS_RF);
+               rflags &= ~X86_EFLAGS_TF;
        return rflags;
 }
 EXPORT_SYMBOL_GPL(kvm_get_rflags);
@@ -5432,10 +5894,8 @@ EXPORT_SYMBOL_GPL(kvm_get_rflags);
 void kvm_set_rflags(struct kvm_vcpu *vcpu, unsigned long rflags)
 {
        if (vcpu->guest_debug & KVM_GUESTDBG_SINGLESTEP &&
-           vcpu->arch.singlestep_cs ==
-                       get_segment_selector(vcpu, VCPU_SREG_CS) &&
-           vcpu->arch.singlestep_rip == kvm_rip_read(vcpu))
-               rflags |= X86_EFLAGS_TF | X86_EFLAGS_RF;
+           kvm_is_linear_rip(vcpu, vcpu->arch.singlestep_rip))
+               rflags |= X86_EFLAGS_TF;
        kvm_x86_ops->set_rflags(vcpu, rflags);
 }
 EXPORT_SYMBOL_GPL(kvm_set_rflags);
@@ -5451,3 +5911,4 @@ EXPORT_TRACEPOINT_SYMBOL_GPL(kvm_nested_vmexit_inject);
 EXPORT_TRACEPOINT_SYMBOL_GPL(kvm_nested_intr_vmexit);
 EXPORT_TRACEPOINT_SYMBOL_GPL(kvm_invlpga);
 EXPORT_TRACEPOINT_SYMBOL_GPL(kvm_skinit);
+EXPORT_TRACEPOINT_SYMBOL_GPL(kvm_nested_intercepts);