KVM: VMX: Check cpl before emulating debug register access
[safe/jmp/linux-2.6] / arch / x86 / kvm / vmx.c
index 0ba706e..f381201 100644 (file)
@@ -540,10 +540,12 @@ static void update_exception_bitmap(struct kvm_vcpu *vcpu)
        eb = (1u << PF_VECTOR) | (1u << UD_VECTOR) | (1u << MC_VECTOR);
        if (!vcpu->fpu_active)
                eb |= 1u << NM_VECTOR;
+       /*
+        * Unconditionally intercept #DB so we can maintain dr6 without
+        * reading it every exit.
+        */
+       eb |= 1u << DB_VECTOR;
        if (vcpu->guest_debug & KVM_GUESTDBG_ENABLE) {
-               if (vcpu->guest_debug &
-                   (KVM_GUESTDBG_SINGLESTEP | KVM_GUESTDBG_USE_HW_BP))
-                       eb |= 1u << DB_VECTOR;
                if (vcpu->guest_debug & KVM_GUESTDBG_USE_SW_BP)
                        eb |= 1u << BP_VECTOR;
        }
@@ -571,12 +573,15 @@ static void reload_tss(void)
 static void load_transition_efer(struct vcpu_vmx *vmx)
 {
        int efer_offset = vmx->msr_offset_efer;
-       u64 host_efer = vmx->host_msrs[efer_offset].data;
-       u64 guest_efer = vmx->guest_msrs[efer_offset].data;
+       u64 host_efer;
+       u64 guest_efer;
        u64 ignore_bits;
 
        if (efer_offset < 0)
                return;
+       host_efer = vmx->host_msrs[efer_offset].data;
+       guest_efer = vmx->guest_msrs[efer_offset].data;
+
        /*
         * NX is emulated; LMA and LME handled by hardware; SCE meaninless
         * outside long mode
@@ -778,7 +783,12 @@ static void vmx_fpu_deactivate(struct kvm_vcpu *vcpu)
 
 static unsigned long vmx_get_rflags(struct kvm_vcpu *vcpu)
 {
-       return vmcs_readl(GUEST_RFLAGS);
+       unsigned long rflags;
+
+       rflags = vmcs_readl(GUEST_RFLAGS);
+       if (to_vmx(vcpu)->rmode.vm86_active)
+               rflags &= ~(unsigned long)(X86_EFLAGS_IOPL | X86_EFLAGS_VM);
+       return rflags;
 }
 
 static void vmx_set_rflags(struct kvm_vcpu *vcpu, unsigned long rflags)
@@ -997,9 +1007,9 @@ static int vmx_get_msr(struct kvm_vcpu *vcpu, u32 msr_index, u64 *pdata)
                data = vmcs_readl(GUEST_SYSENTER_ESP);
                break;
        default:
-               vmx_load_host_state(to_vmx(vcpu));
                msr = find_msr_entry(to_vmx(vcpu), msr_index);
                if (msr) {
+                       vmx_load_host_state(to_vmx(vcpu));
                        data = msr->data;
                        break;
                }
@@ -1056,9 +1066,9 @@ static int vmx_set_msr(struct kvm_vcpu *vcpu, u32 msr_index, u64 data)
                }
                /* Otherwise falls through to kvm_set_msr_common */
        default:
-               vmx_load_host_state(vmx);
                msr = find_msr_entry(vmx, msr_index);
                if (msr) {
+                       vmx_load_host_state(vmx);
                        msr->data = data;
                        break;
                }
@@ -1254,12 +1264,9 @@ static __init int setup_vmcs_config(struct vmcs_config *vmcs_conf)
        if (_cpu_based_2nd_exec_control & SECONDARY_EXEC_ENABLE_EPT) {
                /* CR3 accesses and invlpg don't need to cause VM Exits when EPT
                   enabled */
-               min &= ~(CPU_BASED_CR3_LOAD_EXITING |
-                        CPU_BASED_CR3_STORE_EXITING |
-                        CPU_BASED_INVLPG_EXITING);
-               if (adjust_vmx_controls(min, opt, MSR_IA32_VMX_PROCBASED_CTLS,
-                                       &_cpu_based_exec_control) < 0)
-                       return -EIO;
+               _cpu_based_exec_control &= ~(CPU_BASED_CR3_LOAD_EXITING |
+                                            CPU_BASED_CR3_STORE_EXITING |
+                                            CPU_BASED_INVLPG_EXITING);
                rdmsr(MSR_IA32_VMX_EPT_VPID_CAP,
                      vmx_capability.ept, vmx_capability.vpid);
        }
@@ -1634,7 +1641,6 @@ static void ept_update_paging_mode_cr0(unsigned long *hw_cr0,
                              CPU_BASED_CR3_STORE_EXITING));
                vcpu->arch.cr0 = cr0;
                vmx_set_cr4(vcpu, vcpu->arch.cr4);
-               *hw_cr0 &= ~X86_CR0_WP;
        } else if (!is_paging(vcpu)) {
                /* From nonpaging to paging */
                vmcs_write32(CPU_BASED_VM_EXEC_CONTROL,
@@ -1643,9 +1649,10 @@ static void ept_update_paging_mode_cr0(unsigned long *hw_cr0,
                               CPU_BASED_CR3_STORE_EXITING));
                vcpu->arch.cr0 = cr0;
                vmx_set_cr4(vcpu, vcpu->arch.cr4);
-               if (!(vcpu->arch.cr0 & X86_CR0_WP))
-                       *hw_cr0 &= ~X86_CR0_WP;
        }
+
+       if (!(cr0 & X86_CR0_WP))
+               *hw_cr0 &= ~X86_CR0_WP;
 }
 
 static void ept_update_paging_mode_cr4(unsigned long *hw_cr4,
@@ -2927,6 +2934,8 @@ static int handle_dr(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run)
        unsigned long val;
        int dr, reg;
 
+       if (!kvm_require_cpl(vcpu, 0))
+               return 1;
        dr = vmcs_readl(GUEST_DR7);
        if (dr & DR7_GD) {
                /*
@@ -3627,7 +3636,8 @@ static void vmx_vcpu_run(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run)
         */
        vmcs_writel(HOST_CR0, read_cr0());
 
-       set_debugreg(vcpu->arch.dr6, 6);
+       if (vcpu->arch.switch_db_regs)
+               set_debugreg(vcpu->arch.dr6, 6);
 
        asm(
                /* Store host registers */
@@ -3729,7 +3739,8 @@ static void vmx_vcpu_run(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run)
                                  | (1 << VCPU_EXREG_PDPTR));
        vcpu->arch.regs_dirty = 0;
 
-       get_debugreg(vcpu->arch.dr6, 6);
+       if (vcpu->arch.switch_db_regs)
+               get_debugreg(vcpu->arch.dr6, 6);
 
        vmx->idt_vectoring_info = vmcs_read32(IDT_VECTORING_INFO_FIELD);
        if (vmx->rmode.irq.pending)