KVM: Do not report TPR write to userspace if new value bigger or equal to a previous...
[safe/jmp/linux-2.6] / arch / x86 / kvm / svm.c
index 0fbbde5..e283a63 100644 (file)
@@ -19,6 +19,7 @@
 #include "irq.h"
 #include "mmu.h"
 #include "kvm_cache_regs.h"
+#include "x86.h"
 
 #include <linux/module.h>
 #include <linux/kernel.h>
@@ -38,9 +39,6 @@ MODULE_LICENSE("GPL");
 #define IOPM_ALLOC_ORDER 2
 #define MSRPM_ALLOC_ORDER 1
 
-#define DR7_GD_MASK (1 << 13)
-#define DR6_BD_MASK (1 << 13)
-
 #define SEG_TYPE_LDT 2
 #define SEG_TYPE_BUSY_TSS16 3
 
@@ -72,7 +70,6 @@ module_param(npt, int, S_IRUGO);
 static int nested = 0;
 module_param(nested, int, S_IRUGO);
 
-static void kvm_reput_irq(struct vcpu_svm *svm);
 static void svm_flush_tlb(struct kvm_vcpu *vcpu);
 
 static int nested_svm_exit_handled(struct vcpu_svm *svm, bool kvm_override);
@@ -135,24 +132,6 @@ static inline u32 svm_has(u32 feat)
        return svm_features & feat;
 }
 
-static inline u8 pop_irq(struct kvm_vcpu *vcpu)
-{
-       int word_index = __ffs(vcpu->arch.irq_summary);
-       int bit_index = __ffs(vcpu->arch.irq_pending[word_index]);
-       int irq = word_index * BITS_PER_LONG + bit_index;
-
-       clear_bit(bit_index, &vcpu->arch.irq_pending[word_index]);
-       if (!vcpu->arch.irq_pending[word_index])
-               clear_bit(word_index, &vcpu->arch.irq_summary);
-       return irq;
-}
-
-static inline void push_irq(struct kvm_vcpu *vcpu, u8 irq)
-{
-       set_bit(irq, vcpu->arch.irq_pending);
-       set_bit(irq / BITS_PER_LONG, &vcpu->arch.irq_summary);
-}
-
 static inline void clgi(void)
 {
        asm volatile (__ex(SVM_CLGI));
@@ -181,32 +160,6 @@ static inline void kvm_write_cr2(unsigned long val)
        asm volatile ("mov %0, %%cr2" :: "r" (val));
 }
 
-static inline unsigned long read_dr6(void)
-{
-       unsigned long dr6;
-
-       asm volatile ("mov %%dr6, %0" : "=r" (dr6));
-       return dr6;
-}
-
-static inline void write_dr6(unsigned long val)
-{
-       asm volatile ("mov %0, %%dr6" :: "r" (val));
-}
-
-static inline unsigned long read_dr7(void)
-{
-       unsigned long dr7;
-
-       asm volatile ("mov %%dr7, %0" : "=r" (dr7));
-       return dr7;
-}
-
-static inline void write_dr7(unsigned long val)
-{
-       asm volatile ("mov %0, %%dr7" :: "r" (val));
-}
-
 static inline void force_new_asid(struct kvm_vcpu *vcpu)
 {
        to_svm(vcpu)->asid_generation--;
@@ -243,13 +196,6 @@ static void svm_queue_exception(struct kvm_vcpu *vcpu, unsigned nr,
        svm->vmcb->control.event_inj_err = error_code;
 }
 
-static bool svm_exception_injected(struct kvm_vcpu *vcpu)
-{
-       struct vcpu_svm *svm = to_svm(vcpu);
-
-       return !(svm->vmcb->control.exit_int_info & SVM_EXITINTINFO_VALID);
-}
-
 static int is_external_interrupt(u32 info)
 {
        info &= SVM_EVTINJ_TYPE_MASK | SVM_EVTINJ_VALID;
@@ -279,7 +225,7 @@ static int has_svm(void)
        const char *msg;
 
        if (!cpu_has_svm(&msg)) {
-               printk(KERN_INFO "has_svn: %s\n", msg);
+               printk(KERN_INFO "has_svm: %s\n", msg);
                return 0;
        }
 
@@ -440,12 +386,14 @@ static __init int svm_hardware_setup(void)
 
        iopm_va = page_address(iopm_pages);
        memset(iopm_va, 0xff, PAGE_SIZE * (1 << IOPM_ALLOC_ORDER));
-       clear_bit(0x80, iopm_va); /* allow direct access to PC debug port */
        iopm_base = page_to_pfn(iopm_pages) << PAGE_SHIFT;
 
        if (boot_cpu_has(X86_FEATURE_NX))
                kvm_enable_efer_bits(EFER_NX);
 
+       if (boot_cpu_has(X86_FEATURE_FXSR_OPT))
+               kvm_enable_efer_bits(EFER_FFXSR);
+
        if (nested) {
                printk(KERN_INFO "kvm: Nested Virtualization enabled\n");
                kvm_enable_efer_bits(EFER_SVME);
@@ -695,7 +643,6 @@ static struct kvm_vcpu *svm_create_vcpu(struct kvm *kvm, unsigned int id)
        clear_page(svm->vmcb);
        svm->vmcb_pa = page_to_pfn(page) << PAGE_SHIFT;
        svm->asid_generation = 0;
-       memset(svm->db_regs, 0, sizeof(svm->db_regs));
        init_vmcb(svm);
 
        fx_init(&svm->vcpu);
@@ -823,22 +770,42 @@ static void svm_get_segment(struct kvm_vcpu *vcpu,
        var->db = (s->attrib >> SVM_SELECTOR_DB_SHIFT) & 1;
        var->g = (s->attrib >> SVM_SELECTOR_G_SHIFT) & 1;
 
-       /*
-        * SVM always stores 0 for the 'G' bit in the CS selector in
-        * the VMCB on a VMEXIT. This hurts cross-vendor migration:
-        * Intel's VMENTRY has a check on the 'G' bit.
+       /* AMD's VMCB does not have an explicit unusable field, so emulate it
+        * for cross vendor migration purposes by "not present"
         */
-       if (seg == VCPU_SREG_CS)
-               var->g = s->limit > 0xfffff;
+       var->unusable = !var->present || (var->type == 0);
 
-       /*
-        * Work around a bug where the busy flag in the tr selector
-        * isn't exposed
-        */
-       if (seg == VCPU_SREG_TR)
+       switch (seg) {
+       case VCPU_SREG_CS:
+               /*
+                * SVM always stores 0 for the 'G' bit in the CS selector in
+                * the VMCB on a VMEXIT. This hurts cross-vendor migration:
+                * Intel's VMENTRY has a check on the 'G' bit.
+                */
+               var->g = s->limit > 0xfffff;
+               break;
+       case VCPU_SREG_TR:
+               /*
+                * Work around a bug where the busy flag in the tr selector
+                * isn't exposed
+                */
                var->type |= 0x2;
-
-       var->unusable = !var->present;
+               break;
+       case VCPU_SREG_DS:
+       case VCPU_SREG_ES:
+       case VCPU_SREG_FS:
+       case VCPU_SREG_GS:
+               /*
+                * The accessed bit must always be set in the segment
+                * descriptor cache, although it can be cleared in the
+                * descriptor, the cached bit always remains at 1. Since
+                * Intel has a check on this, set it here to support
+                * cross-vendor migration.
+                */
+               if (!var->unusable)
+                       var->type |= 0x1;
+               break;
+       }
 }
 
 static int svm_get_cpl(struct kvm_vcpu *vcpu)
@@ -968,19 +935,44 @@ static void svm_set_segment(struct kvm_vcpu *vcpu,
 
 }
 
-static int svm_guest_debug(struct kvm_vcpu *vcpu, struct kvm_debug_guest *dbg)
+static int svm_guest_debug(struct kvm_vcpu *vcpu, struct kvm_guest_debug *dbg)
 {
-       return -EOPNOTSUPP;
+       int old_debug = vcpu->guest_debug;
+       struct vcpu_svm *svm = to_svm(vcpu);
+
+       vcpu->guest_debug = dbg->control;
+
+       svm->vmcb->control.intercept_exceptions &=
+               ~((1 << DB_VECTOR) | (1 << BP_VECTOR));
+       if (vcpu->guest_debug & KVM_GUESTDBG_ENABLE) {
+               if (vcpu->guest_debug &
+                   (KVM_GUESTDBG_SINGLESTEP | KVM_GUESTDBG_USE_HW_BP))
+                       svm->vmcb->control.intercept_exceptions |=
+                               1 << DB_VECTOR;
+               if (vcpu->guest_debug & KVM_GUESTDBG_USE_SW_BP)
+                       svm->vmcb->control.intercept_exceptions |=
+                               1 << BP_VECTOR;
+       } else
+               vcpu->guest_debug = 0;
+
+       if (vcpu->guest_debug & KVM_GUESTDBG_USE_HW_BP)
+               svm->vmcb->save.dr7 = dbg->arch.debugreg[7];
+       else
+               svm->vmcb->save.dr7 = vcpu->arch.dr7;
+
+       if (vcpu->guest_debug & KVM_GUESTDBG_SINGLESTEP)
+               svm->vmcb->save.rflags |= X86_EFLAGS_TF | X86_EFLAGS_RF;
+       else if (old_debug & KVM_GUESTDBG_SINGLESTEP)
+               svm->vmcb->save.rflags &= ~(X86_EFLAGS_TF | X86_EFLAGS_RF);
+
+       return 0;
 }
 
 static int svm_get_irq(struct kvm_vcpu *vcpu)
 {
-       struct vcpu_svm *svm = to_svm(vcpu);
-       u32 exit_int_info = svm->vmcb->control.exit_int_info;
-
-       if (is_external_interrupt(exit_int_info))
-               return exit_int_info & SVM_EVTINJ_VEC_MASK;
-       return -1;
+       if (!vcpu->arch.interrupt.pending)
+               return -1;
+       return vcpu->arch.interrupt.nr;
 }
 
 static void load_host_msrs(struct kvm_vcpu *vcpu)
@@ -1012,7 +1004,29 @@ static void new_asid(struct vcpu_svm *svm, struct svm_cpu_data *svm_data)
 
 static unsigned long svm_get_dr(struct kvm_vcpu *vcpu, int dr)
 {
-       unsigned long val = to_svm(vcpu)->db_regs[dr];
+       struct vcpu_svm *svm = to_svm(vcpu);
+       unsigned long val;
+
+       switch (dr) {
+       case 0 ... 3:
+               val = vcpu->arch.db[dr];
+               break;
+       case 6:
+               if (vcpu->guest_debug & KVM_GUESTDBG_USE_HW_BP)
+                       val = vcpu->arch.dr6;
+               else
+                       val = svm->vmcb->save.dr6;
+               break;
+       case 7:
+               if (vcpu->guest_debug & KVM_GUESTDBG_USE_HW_BP)
+                       val = vcpu->arch.dr7;
+               else
+                       val = svm->vmcb->save.dr7;
+               break;
+       default:
+               val = 0;
+       }
+
        KVMTRACE_2D(DR_READ, vcpu, (u32)dr, (u32)val, handler);
        return val;
 }
@@ -1022,33 +1036,40 @@ static void svm_set_dr(struct kvm_vcpu *vcpu, int dr, unsigned long value,
 {
        struct vcpu_svm *svm = to_svm(vcpu);
 
-       *exception = 0;
+       KVMTRACE_2D(DR_WRITE, vcpu, (u32)dr, (u32)value, handler);
 
-       if (svm->vmcb->save.dr7 & DR7_GD_MASK) {
-               svm->vmcb->save.dr7 &= ~DR7_GD_MASK;
-               svm->vmcb->save.dr6 |= DR6_BD_MASK;
-               *exception = DB_VECTOR;
-               return;
-       }
+       *exception = 0;
 
        switch (dr) {
        case 0 ... 3:
-               svm->db_regs[dr] = value;
+               vcpu->arch.db[dr] = value;
+               if (!(vcpu->guest_debug & KVM_GUESTDBG_USE_HW_BP))
+                       vcpu->arch.eff_db[dr] = value;
                return;
        case 4 ... 5:
-               if (vcpu->arch.cr4 & X86_CR4_DE) {
+               if (vcpu->arch.cr4 & X86_CR4_DE)
                        *exception = UD_VECTOR;
+               return;
+       case 6:
+               if (value & 0xffffffff00000000ULL) {
+                       *exception = GP_VECTOR;
                        return;
                }
-       case 7: {
-               if (value & ~((1ULL << 32) - 1)) {
+               vcpu->arch.dr6 = (value & DR6_VOLATILE) | DR6_FIXED_1;
+               return;
+       case 7:
+               if (value & 0xffffffff00000000ULL) {
                        *exception = GP_VECTOR;
                        return;
                }
-               svm->vmcb->save.dr7 = value;
+               vcpu->arch.dr7 = (value & DR7_VOLATILE) | DR7_FIXED_1;
+               if (!(vcpu->guest_debug & KVM_GUESTDBG_USE_HW_BP)) {
+                       svm->vmcb->save.dr7 = vcpu->arch.dr7;
+                       vcpu->arch.switch_db_regs = (value & DR7_BP_EN_MASK);
+               }
                return;
-       }
        default:
+               /* FIXME: Possible case? */
                printk(KERN_DEBUG "%s: unexpected dr %u\n",
                       __func__, dr);
                *exception = UD_VECTOR;
@@ -1058,17 +1079,8 @@ static void svm_set_dr(struct kvm_vcpu *vcpu, int dr, unsigned long value,
 
 static int pf_interception(struct vcpu_svm *svm, struct kvm_run *kvm_run)
 {
-       u32 exit_int_info = svm->vmcb->control.exit_int_info;
-       struct kvm *kvm = svm->vcpu.kvm;
        u64 fault_address;
        u32 error_code;
-       bool event_injection = false;
-
-       if (!irqchip_in_kernel(kvm) &&
-           is_external_interrupt(exit_int_info)) {
-               event_injection = true;
-               push_irq(&svm->vcpu, exit_int_info & SVM_EVTINJ_VEC_MASK);
-       }
 
        fault_address  = svm->vmcb->control.exit_info_2;
        error_code = svm->vmcb->control.exit_info_1;
@@ -1088,12 +1100,35 @@ static int pf_interception(struct vcpu_svm *svm, struct kvm_run *kvm_run)
         */
        if (npt_enabled)
                svm_flush_tlb(&svm->vcpu);
-
-       if (!npt_enabled && event_injection)
-               kvm_mmu_unprotect_page_virt(&svm->vcpu, fault_address);
+       else {
+               if (svm->vcpu.arch.interrupt.pending ||
+                               svm->vcpu.arch.exception.pending)
+                       kvm_mmu_unprotect_page_virt(&svm->vcpu, fault_address);
+       }
        return kvm_mmu_page_fault(&svm->vcpu, fault_address, error_code);
 }
 
+static int db_interception(struct vcpu_svm *svm, struct kvm_run *kvm_run)
+{
+       if (!(svm->vcpu.guest_debug &
+             (KVM_GUESTDBG_SINGLESTEP | KVM_GUESTDBG_USE_HW_BP))) {
+               kvm_queue_exception(&svm->vcpu, DB_VECTOR);
+               return 1;
+       }
+       kvm_run->exit_reason = KVM_EXIT_DEBUG;
+       kvm_run->debug.arch.pc = svm->vmcb->save.cs.base + svm->vmcb->save.rip;
+       kvm_run->debug.arch.exception = DB_VECTOR;
+       return 0;
+}
+
+static int bp_interception(struct vcpu_svm *svm, struct kvm_run *kvm_run)
+{
+       kvm_run->exit_reason = KVM_EXIT_DEBUG;
+       kvm_run->debug.arch.pc = svm->vmcb->save.cs.base + svm->vmcb->save.rip;
+       kvm_run->debug.arch.exception = BP_VECTOR;
+       return 0;
+}
+
 static int ud_interception(struct vcpu_svm *svm, struct kvm_run *kvm_run)
 {
        int er;
@@ -1143,7 +1178,7 @@ static int shutdown_interception(struct vcpu_svm *svm, struct kvm_run *kvm_run)
 static int io_interception(struct vcpu_svm *svm, struct kvm_run *kvm_run)
 {
        u32 io_info = svm->vmcb->control.exit_info_1; /* address size bug? */
-       int size, down, in, string, rep;
+       int size, in, string;
        unsigned port;
 
        ++svm->vcpu.stat.io_exits;
@@ -1162,8 +1197,6 @@ static int io_interception(struct vcpu_svm *svm, struct kvm_run *kvm_run)
        in = (io_info & SVM_IOIO_TYPE_MASK) != 0;
        port = io_info >> 16;
        size = (io_info & SVM_IOIO_SIZE_MASK) >> SVM_IOIO_SIZE_SHIFT;
-       rep = (io_info & SVM_IOIO_REP_MASK) != 0;
-       down = (svm->vmcb->save.rflags & X86_EFLAGS_DF) != 0;
 
        skip_emulated_instruction(&svm->vcpu);
        return kvm_emulate_pio(&svm->vcpu, kvm_run, in, size, port);
@@ -1774,17 +1807,35 @@ static int task_switch_interception(struct vcpu_svm *svm,
                                    struct kvm_run *kvm_run)
 {
        u16 tss_selector;
+       int reason;
+       int int_type = svm->vmcb->control.exit_int_info &
+               SVM_EXITINTINFO_TYPE_MASK;
+       int int_vec = svm->vmcb->control.exit_int_info & SVM_EVTINJ_VEC_MASK;
 
        tss_selector = (u16)svm->vmcb->control.exit_info_1;
+
        if (svm->vmcb->control.exit_info_2 &
            (1ULL << SVM_EXITINFOSHIFT_TS_REASON_IRET))
-               return kvm_task_switch(&svm->vcpu, tss_selector,
-                                      TASK_SWITCH_IRET);
-       if (svm->vmcb->control.exit_info_2 &
-           (1ULL << SVM_EXITINFOSHIFT_TS_REASON_JMP))
-               return kvm_task_switch(&svm->vcpu, tss_selector,
-                                      TASK_SWITCH_JMP);
-       return kvm_task_switch(&svm->vcpu, tss_selector, TASK_SWITCH_CALL);
+               reason = TASK_SWITCH_IRET;
+       else if (svm->vmcb->control.exit_info_2 &
+                (1ULL << SVM_EXITINFOSHIFT_TS_REASON_JMP))
+               reason = TASK_SWITCH_JMP;
+       else if (svm->vmcb->control.exit_int_info & SVM_EXITINTINFO_VALID)
+               reason = TASK_SWITCH_GATE;
+       else
+               reason = TASK_SWITCH_CALL;
+
+
+       if (reason != TASK_SWITCH_GATE ||
+           int_type == SVM_EXITINTINFO_TYPE_SOFT ||
+           (int_type == SVM_EXITINTINFO_TYPE_EXEPT &&
+            (int_vec == OF_VECTOR || int_vec == BP_VECTOR))) {
+               if (emulate_instruction(&svm->vcpu, kvm_run, 0, 0,
+                                       EMULTYPE_SKIP) != EMULATE_DONE)
+                       return 0;
+       }
+
+       return kvm_task_switch(&svm->vcpu, tss_selector, reason);
 }
 
 static int cpuid_interception(struct vcpu_svm *svm, struct kvm_run *kvm_run)
@@ -1811,9 +1862,13 @@ static int emulate_on_interception(struct vcpu_svm *svm,
 
 static int cr8_write_interception(struct vcpu_svm *svm, struct kvm_run *kvm_run)
 {
+       u8 cr8_prev = kvm_get_cr8(&svm->vcpu);
+       /* instruction emulation calls kvm_set_cr8() */
        emulate_instruction(&svm->vcpu, NULL, 0, 0, 0);
        if (irqchip_in_kernel(svm->vcpu.kvm))
                return 1;
+       if (cr8_prev <= kvm_get_cr8(&svm->vcpu))
+               return 1;
        kvm_run->exit_reason = KVM_EXIT_SET_TPR;
        return 0;
 }
@@ -1880,6 +1935,9 @@ static int svm_get_msr(struct kvm_vcpu *vcpu, unsigned ecx, u64 *data)
        case MSR_VM_CR:
                *data = 0;
                break;
+       case MSR_IA32_UCODE_REV:
+               *data = 0x01000065;
+               break;
        default:
                return kvm_get_msr_common(vcpu, ecx, data);
        }
@@ -2019,8 +2077,9 @@ static int interrupt_window_interception(struct vcpu_svm *svm,
         * If the user space waits to inject interrupts, exit as soon as
         * possible
         */
-       if (kvm_run->request_interrupt_window &&
-           !svm->vcpu.arch.irq_summary) {
+       if (!irqchip_in_kernel(svm->vcpu.kvm) &&
+           kvm_run->request_interrupt_window &&
+           !kvm_cpu_has_interrupt(&svm->vcpu)) {
                ++svm->vcpu.stat.irq_window_exits;
                kvm_run->exit_reason = KVM_EXIT_IRQ_WINDOW_OPEN;
                return 0;
@@ -2050,6 +2109,8 @@ static int (*svm_exit_handlers[])(struct vcpu_svm *svm,
        [SVM_EXIT_WRITE_DR3]                    = emulate_on_interception,
        [SVM_EXIT_WRITE_DR5]                    = emulate_on_interception,
        [SVM_EXIT_WRITE_DR7]                    = emulate_on_interception,
+       [SVM_EXIT_EXCP_BASE + DB_VECTOR]        = db_interception,
+       [SVM_EXIT_EXCP_BASE + BP_VECTOR]        = bp_interception,
        [SVM_EXIT_EXCP_BASE + UD_VECTOR]        = ud_interception,
        [SVM_EXIT_EXCP_BASE + PF_VECTOR]        = pf_interception,
        [SVM_EXIT_EXCP_BASE + NM_VECTOR]        = nm_interception,
@@ -2121,7 +2182,6 @@ static int handle_exit(struct kvm_run *kvm_run, struct kvm_vcpu *vcpu)
                }
        }
 
-       kvm_reput_irq(svm);
 
        if (svm->vmcb->control.exit_code == SVM_EXIT_ERR) {
                kvm_run->exit_reason = KVM_EXIT_FAIL_ENTRY;
@@ -2184,13 +2244,19 @@ static inline void svm_inject_irq(struct vcpu_svm *svm, int irq)
                ((/*control->int_vector >> 4*/ 0xf) << V_INTR_PRIO_SHIFT);
 }
 
+static void svm_queue_irq(struct vcpu_svm *svm, unsigned nr)
+{
+       svm->vmcb->control.event_inj = nr |
+               SVM_EVTINJ_VALID | SVM_EVTINJ_TYPE_INTR;
+}
+
 static void svm_set_irq(struct kvm_vcpu *vcpu, int irq)
 {
        struct vcpu_svm *svm = to_svm(vcpu);
 
        nested_svm_intr(svm);
 
-       svm_inject_irq(svm, irq);
+       svm_queue_irq(svm, irq);
 }
 
 static void update_cr8_intercept(struct kvm_vcpu *vcpu)
@@ -2214,104 +2280,56 @@ static void update_cr8_intercept(struct kvm_vcpu *vcpu)
                vmcb->control.intercept_cr_write |= INTERCEPT_CR8_MASK;
 }
 
-static void svm_intr_assist(struct kvm_vcpu *vcpu)
+static int svm_interrupt_allowed(struct kvm_vcpu *vcpu)
 {
        struct vcpu_svm *svm = to_svm(vcpu);
        struct vmcb *vmcb = svm->vmcb;
-       int intr_vector = -1;
-
-       if ((vmcb->control.exit_int_info & SVM_EVTINJ_VALID) &&
-           ((vmcb->control.exit_int_info & SVM_EVTINJ_TYPE_MASK) == 0)) {
-               intr_vector = vmcb->control.exit_int_info &
-                             SVM_EVTINJ_VEC_MASK;
-               vmcb->control.exit_int_info = 0;
-               svm_inject_irq(svm, intr_vector);
-               goto out;
-       }
-
-       if (vmcb->control.int_ctl & V_IRQ_MASK)
-               goto out;
-
-       if (!kvm_cpu_has_interrupt(vcpu))
-               goto out;
-
-       if (nested_svm_intr(svm))
-               goto out;
-
-       if (!(svm->vcpu.arch.hflags & HF_GIF_MASK))
-               goto out;
-
-       if (!(vmcb->save.rflags & X86_EFLAGS_IF) ||
-           (vmcb->control.int_state & SVM_INTERRUPT_SHADOW_MASK) ||
-           (vmcb->control.event_inj & SVM_EVTINJ_VALID)) {
-               /* unable to deliver irq, set pending irq */
-               svm_set_vintr(svm);
-               svm_inject_irq(svm, 0x0);
-               goto out;
-       }
-       /* Okay, we can deliver the interrupt: grab it and update PIC state. */
-       intr_vector = kvm_cpu_get_interrupt(vcpu);
-       svm_inject_irq(svm, intr_vector);
-out:
-       update_cr8_intercept(vcpu);
+       return (vmcb->save.rflags & X86_EFLAGS_IF) &&
+               !(vmcb->control.int_state & SVM_INTERRUPT_SHADOW_MASK) &&
+               (svm->vcpu.arch.hflags & HF_GIF_MASK);
 }
 
-static void kvm_reput_irq(struct vcpu_svm *svm)
+static void enable_irq_window(struct kvm_vcpu *vcpu)
 {
-       struct vmcb_control_area *control = &svm->vmcb->control;
-
-       if ((control->int_ctl & V_IRQ_MASK)
-           && !irqchip_in_kernel(svm->vcpu.kvm)) {
-               control->int_ctl &= ~V_IRQ_MASK;
-               push_irq(&svm->vcpu, control->int_vector);
-       }
-
-       svm->vcpu.arch.interrupt_window_open =
-               !(control->int_state & SVM_INTERRUPT_SHADOW_MASK) &&
-                (svm->vcpu.arch.hflags & HF_GIF_MASK);
+       svm_set_vintr(to_svm(vcpu));
+       svm_inject_irq(to_svm(vcpu), 0x0);
 }
 
-static void svm_do_inject_vector(struct vcpu_svm *svm)
+static void svm_intr_inject(struct kvm_vcpu *vcpu)
 {
-       struct kvm_vcpu *vcpu = &svm->vcpu;
-       int word_index = __ffs(vcpu->arch.irq_summary);
-       int bit_index = __ffs(vcpu->arch.irq_pending[word_index]);
-       int irq = word_index * BITS_PER_LONG + bit_index;
+       /* try to reinject previous events if any */
+       if (vcpu->arch.interrupt.pending) {
+               svm_queue_irq(to_svm(vcpu), vcpu->arch.interrupt.nr);
+               return;
+       }
 
-       clear_bit(bit_index, &vcpu->arch.irq_pending[word_index]);
-       if (!vcpu->arch.irq_pending[word_index])
-               clear_bit(word_index, &vcpu->arch.irq_summary);
-       svm_inject_irq(svm, irq);
+       /* try to inject new event if pending */
+       if (kvm_cpu_has_interrupt(vcpu)) {
+               if (vcpu->arch.interrupt_window_open) {
+                       kvm_queue_interrupt(vcpu, kvm_cpu_get_interrupt(vcpu));
+                       svm_queue_irq(to_svm(vcpu), vcpu->arch.interrupt.nr);
+               }
+       }
 }
 
-static void do_interrupt_requests(struct kvm_vcpu *vcpu,
-                                      struct kvm_run *kvm_run)
+static void svm_intr_assist(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run)
 {
        struct vcpu_svm *svm = to_svm(vcpu);
-       struct vmcb_control_area *control = &svm->vmcb->control;
+       bool req_int_win = !irqchip_in_kernel(vcpu->kvm) &&
+               kvm_run->request_interrupt_window;
 
        if (nested_svm_intr(svm))
-               return;
+               goto out;
 
-       svm->vcpu.arch.interrupt_window_open =
-               (!(control->int_state & SVM_INTERRUPT_SHADOW_MASK) &&
-                (svm->vmcb->save.rflags & X86_EFLAGS_IF) &&
-                (svm->vcpu.arch.hflags & HF_GIF_MASK));
+       svm->vcpu.arch.interrupt_window_open = svm_interrupt_allowed(vcpu);
 
-       if (svm->vcpu.arch.interrupt_window_open && svm->vcpu.arch.irq_summary)
-               /*
-                * If interrupts enabled, and not blocked by sti or mov ss. Good.
-                */
-               svm_do_inject_vector(svm);
+       svm_intr_inject(vcpu);
 
-       /*
-        * Interrupts blocked.  Wait for unblock.
-        */
-       if (!svm->vcpu.arch.interrupt_window_open &&
-           (svm->vcpu.arch.irq_summary || kvm_run->request_interrupt_window))
-               svm_set_vintr(svm);
-       else
-               svm_clear_vintr(svm);
+       if (kvm_cpu_has_interrupt(vcpu) || req_int_win)
+               enable_irq_window(vcpu);
+
+out:
+       update_cr8_intercept(vcpu);
 }
 
 static int svm_set_tss_addr(struct kvm *kvm, unsigned int addr)
@@ -2319,22 +2337,6 @@ static int svm_set_tss_addr(struct kvm *kvm, unsigned int addr)
        return 0;
 }
 
-static void save_db_regs(unsigned long *db_regs)
-{
-       asm volatile ("mov %%dr0, %0" : "=r"(db_regs[0]));
-       asm volatile ("mov %%dr1, %0" : "=r"(db_regs[1]));
-       asm volatile ("mov %%dr2, %0" : "=r"(db_regs[2]));
-       asm volatile ("mov %%dr3, %0" : "=r"(db_regs[3]));
-}
-
-static void load_db_regs(unsigned long *db_regs)
-{
-       asm volatile ("mov %0, %%dr0" : : "r"(db_regs[0]));
-       asm volatile ("mov %0, %%dr1" : : "r"(db_regs[1]));
-       asm volatile ("mov %0, %%dr2" : : "r"(db_regs[2]));
-       asm volatile ("mov %0, %%dr3" : : "r"(db_regs[3]));
-}
-
 static void svm_flush_tlb(struct kvm_vcpu *vcpu)
 {
        force_new_asid(vcpu);
@@ -2350,7 +2352,7 @@ static inline void sync_cr8_to_lapic(struct kvm_vcpu *vcpu)
 
        if (!(svm->vmcb->control.intercept_cr_write & INTERCEPT_CR8_MASK)) {
                int cr8 = svm->vmcb->control.int_ctl & V_TPR_MASK;
-               kvm_lapic_set_tpr(vcpu, cr8);
+               kvm_set_cr8(vcpu, cr8);
        }
 }
 
@@ -2359,14 +2361,51 @@ static inline void sync_lapic_to_cr8(struct kvm_vcpu *vcpu)
        struct vcpu_svm *svm = to_svm(vcpu);
        u64 cr8;
 
-       if (!irqchip_in_kernel(vcpu->kvm))
-               return;
-
        cr8 = kvm_get_cr8(vcpu);
        svm->vmcb->control.int_ctl &= ~V_TPR_MASK;
        svm->vmcb->control.int_ctl |= cr8 & V_TPR_MASK;
 }
 
+static void svm_complete_interrupts(struct vcpu_svm *svm)
+{
+       u8 vector;
+       int type;
+       u32 exitintinfo = svm->vmcb->control.exit_int_info;
+
+       svm->vcpu.arch.nmi_injected = false;
+       kvm_clear_exception_queue(&svm->vcpu);
+       kvm_clear_interrupt_queue(&svm->vcpu);
+
+       if (!(exitintinfo & SVM_EXITINTINFO_VALID))
+               return;
+
+       vector = exitintinfo & SVM_EXITINTINFO_VEC_MASK;
+       type = exitintinfo & SVM_EXITINTINFO_TYPE_MASK;
+
+       switch (type) {
+       case SVM_EXITINTINFO_TYPE_NMI:
+               svm->vcpu.arch.nmi_injected = true;
+               break;
+       case SVM_EXITINTINFO_TYPE_EXEPT:
+               /* In case of software exception do not reinject an exception
+                  vector, but re-execute and instruction instead */
+               if (vector == BP_VECTOR || vector == OF_VECTOR)
+                       break;
+               if (exitintinfo & SVM_EXITINTINFO_VALID_ERR) {
+                       u32 err = svm->vmcb->control.exit_int_info_err;
+                       kvm_queue_exception_e(&svm->vcpu, vector, err);
+
+               } else
+                       kvm_queue_exception(&svm->vcpu, vector);
+               break;
+       case SVM_EXITINTINFO_TYPE_INTR:
+               kvm_queue_interrupt(&svm->vcpu, vector);
+               break;
+       default:
+               break;
+       }
+}
+
 #ifdef CONFIG_X86_64
 #define R "r"
 #else
@@ -2393,20 +2432,12 @@ static void svm_vcpu_run(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run)
        gs_selector = kvm_read_gs();
        ldt_selector = kvm_read_ldt();
        svm->host_cr2 = kvm_read_cr2();
-       svm->host_dr6 = read_dr6();
-       svm->host_dr7 = read_dr7();
        if (!is_nested(svm))
                svm->vmcb->save.cr2 = vcpu->arch.cr2;
        /* required for live migration with NPT */
        if (npt_enabled)
                svm->vmcb->save.cr3 = vcpu->arch.cr3;
 
-       if (svm->vmcb->save.dr7 & 0xff) {
-               write_dr7(0);
-               save_db_regs(svm->host_db_regs);
-               load_db_regs(svm->db_regs);
-       }
-
        clgi();
 
        local_irq_enable();
@@ -2482,16 +2513,11 @@ static void svm_vcpu_run(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run)
 #endif
                );
 
-       if ((svm->vmcb->save.dr7 & 0xff))
-               load_db_regs(svm->host_db_regs);
-
        vcpu->arch.cr2 = svm->vmcb->save.cr2;
        vcpu->arch.regs[VCPU_REGS_RAX] = svm->vmcb->save.rax;
        vcpu->arch.regs[VCPU_REGS_RSP] = svm->vmcb->save.rsp;
        vcpu->arch.regs[VCPU_REGS_RIP] = svm->vmcb->save.rip;
 
-       write_dr6(svm->host_dr6);
-       write_dr7(svm->host_dr7);
        kvm_write_cr2(svm->host_cr2);
 
        kvm_load_fs(fs_selector);
@@ -2508,6 +2534,8 @@ static void svm_vcpu_run(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run)
        sync_cr8_to_lapic(vcpu);
 
        svm->next_rip = 0;
+
+       svm_complete_interrupts(svm);
 }
 
 #undef R
@@ -2627,9 +2655,8 @@ static struct kvm_x86_ops svm_x86_ops = {
        .get_irq = svm_get_irq,
        .set_irq = svm_set_irq,
        .queue_exception = svm_queue_exception,
-       .exception_injected = svm_exception_injected,
        .inject_pending_irq = svm_intr_assist,
-       .inject_pending_vectors = do_interrupt_requests,
+       .interrupt_allowed = svm_interrupt_allowed,
 
        .set_tss_addr = svm_set_tss_addr,
        .get_tdp_level = get_npt_level,