KVM: VMX: Check cpl before emulating debug register access
[safe/jmp/linux-2.6] / arch / x86 / kvm / vmx.c
index c6256b9..f381201 100644 (file)
@@ -540,10 +540,12 @@ static void update_exception_bitmap(struct kvm_vcpu *vcpu)
        eb = (1u << PF_VECTOR) | (1u << UD_VECTOR) | (1u << MC_VECTOR);
        if (!vcpu->fpu_active)
                eb |= 1u << NM_VECTOR;
+       /*
+        * Unconditionally intercept #DB so we can maintain dr6 without
+        * reading it every exit.
+        */
+       eb |= 1u << DB_VECTOR;
        if (vcpu->guest_debug & KVM_GUESTDBG_ENABLE) {
-               if (vcpu->guest_debug &
-                   (KVM_GUESTDBG_SINGLESTEP | KVM_GUESTDBG_USE_HW_BP))
-                       eb |= 1u << DB_VECTOR;
                if (vcpu->guest_debug & KVM_GUESTDBG_USE_SW_BP)
                        eb |= 1u << BP_VECTOR;
        }
@@ -571,12 +573,15 @@ static void reload_tss(void)
 static void load_transition_efer(struct vcpu_vmx *vmx)
 {
        int efer_offset = vmx->msr_offset_efer;
-       u64 host_efer = vmx->host_msrs[efer_offset].data;
-       u64 guest_efer = vmx->guest_msrs[efer_offset].data;
+       u64 host_efer;
+       u64 guest_efer;
        u64 ignore_bits;
 
        if (efer_offset < 0)
                return;
+       host_efer = vmx->host_msrs[efer_offset].data;
+       guest_efer = vmx->guest_msrs[efer_offset].data;
+
        /*
         * NX is emulated; LMA and LME handled by hardware; SCE meaninless
         * outside long mode
@@ -778,7 +783,12 @@ static void vmx_fpu_deactivate(struct kvm_vcpu *vcpu)
 
 static unsigned long vmx_get_rflags(struct kvm_vcpu *vcpu)
 {
-       return vmcs_readl(GUEST_RFLAGS);
+       unsigned long rflags;
+
+       rflags = vmcs_readl(GUEST_RFLAGS);
+       if (to_vmx(vcpu)->rmode.vm86_active)
+               rflags &= ~(unsigned long)(X86_EFLAGS_IOPL | X86_EFLAGS_VM);
+       return rflags;
 }
 
 static void vmx_set_rflags(struct kvm_vcpu *vcpu, unsigned long rflags)
@@ -997,9 +1007,9 @@ static int vmx_get_msr(struct kvm_vcpu *vcpu, u32 msr_index, u64 *pdata)
                data = vmcs_readl(GUEST_SYSENTER_ESP);
                break;
        default:
-               vmx_load_host_state(to_vmx(vcpu));
                msr = find_msr_entry(to_vmx(vcpu), msr_index);
                if (msr) {
+                       vmx_load_host_state(to_vmx(vcpu));
                        data = msr->data;
                        break;
                }
@@ -1056,9 +1066,9 @@ static int vmx_set_msr(struct kvm_vcpu *vcpu, u32 msr_index, u64 data)
                }
                /* Otherwise falls through to kvm_set_msr_common */
        default:
-               vmx_load_host_state(vmx);
                msr = find_msr_entry(vmx, msr_index);
                if (msr) {
+                       vmx_load_host_state(vmx);
                        msr->data = data;
                        break;
                }
@@ -1254,12 +1264,9 @@ static __init int setup_vmcs_config(struct vmcs_config *vmcs_conf)
        if (_cpu_based_2nd_exec_control & SECONDARY_EXEC_ENABLE_EPT) {
                /* CR3 accesses and invlpg don't need to cause VM Exits when EPT
                   enabled */
-               min &= ~(CPU_BASED_CR3_LOAD_EXITING |
-                        CPU_BASED_CR3_STORE_EXITING |
-                        CPU_BASED_INVLPG_EXITING);
-               if (adjust_vmx_controls(min, opt, MSR_IA32_VMX_PROCBASED_CTLS,
-                                       &_cpu_based_exec_control) < 0)
-                       return -EIO;
+               _cpu_based_exec_control &= ~(CPU_BASED_CR3_LOAD_EXITING |
+                                            CPU_BASED_CR3_STORE_EXITING |
+                                            CPU_BASED_INVLPG_EXITING);
                rdmsr(MSR_IA32_VMX_EPT_VPID_CAP,
                      vmx_capability.ept, vmx_capability.vpid);
        }
@@ -1634,7 +1641,6 @@ static void ept_update_paging_mode_cr0(unsigned long *hw_cr0,
                              CPU_BASED_CR3_STORE_EXITING));
                vcpu->arch.cr0 = cr0;
                vmx_set_cr4(vcpu, vcpu->arch.cr4);
-               *hw_cr0 &= ~X86_CR0_WP;
        } else if (!is_paging(vcpu)) {
                /* From nonpaging to paging */
                vmcs_write32(CPU_BASED_VM_EXEC_CONTROL,
@@ -1643,9 +1649,10 @@ static void ept_update_paging_mode_cr0(unsigned long *hw_cr0,
                               CPU_BASED_CR3_STORE_EXITING));
                vcpu->arch.cr0 = cr0;
                vmx_set_cr4(vcpu, vcpu->arch.cr4);
-               if (!(vcpu->arch.cr0 & X86_CR0_WP))
-                       *hw_cr0 &= ~X86_CR0_WP;
        }
+
+       if (!(cr0 & X86_CR0_WP))
+               *hw_cr0 &= ~X86_CR0_WP;
 }
 
 static void ept_update_paging_mode_cr4(unsigned long *hw_cr4,
@@ -1719,7 +1726,7 @@ static void vmx_set_cr3(struct kvm_vcpu *vcpu, unsigned long cr3)
                eptp = construct_eptp(cr3);
                vmcs_write64(EPT_POINTER, eptp);
                guest_cr3 = is_paging(vcpu) ? vcpu->arch.cr3 :
-                       VMX_EPT_IDENTITY_PAGETABLE_ADDR;
+                       vcpu->kvm->arch.ept_identity_map_addr;
        }
 
        vmx_flush_tlb(vcpu);
@@ -1773,16 +1780,13 @@ static void vmx_get_segment(struct kvm_vcpu *vcpu,
 
 static int vmx_get_cpl(struct kvm_vcpu *vcpu)
 {
-       struct kvm_segment kvm_seg;
-
        if (!(vcpu->arch.cr0 & X86_CR0_PE)) /* if real mode */
                return 0;
 
        if (vmx_get_rflags(vcpu) & X86_EFLAGS_VM) /* if virtual 8086 */
                return 3;
 
-       vmx_get_segment(vcpu, &kvm_seg, VCPU_SREG_CS);
-       return kvm_seg.selector & 3;
+       return vmcs_read16(GUEST_CS_SELECTOR) & 3;
 }
 
 static u32 vmx_segment_access_rights(struct kvm_segment *var)
@@ -2122,7 +2126,7 @@ static int init_rmode_identity_map(struct kvm *kvm)
        if (likely(kvm->arch.ept_identity_pagetable_done))
                return 1;
        ret = 0;
-       identity_map_pfn = VMX_EPT_IDENTITY_PAGETABLE_ADDR >> PAGE_SHIFT;
+       identity_map_pfn = kvm->arch.ept_identity_map_addr >> PAGE_SHIFT;
        r = kvm_clear_guest_page(kvm, identity_map_pfn, 0, PAGE_SIZE);
        if (r < 0)
                goto out;
@@ -2191,14 +2195,15 @@ static int alloc_identity_pagetable(struct kvm *kvm)
                goto out;
        kvm_userspace_mem.slot = IDENTITY_PAGETABLE_PRIVATE_MEMSLOT;
        kvm_userspace_mem.flags = 0;
-       kvm_userspace_mem.guest_phys_addr = VMX_EPT_IDENTITY_PAGETABLE_ADDR;
+       kvm_userspace_mem.guest_phys_addr =
+               kvm->arch.ept_identity_map_addr;
        kvm_userspace_mem.memory_size = PAGE_SIZE;
        r = __kvm_set_memory_region(kvm, &kvm_userspace_mem, 0);
        if (r)
                goto out;
 
        kvm->arch.ept_identity_pagetable = gfn_to_page(kvm,
-                       VMX_EPT_IDENTITY_PAGETABLE_ADDR >> PAGE_SHIFT);
+                       kvm->arch.ept_identity_map_addr >> PAGE_SHIFT);
 out:
        up_write(&kvm->slots_lock);
        return r;
@@ -2929,6 +2934,8 @@ static int handle_dr(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run)
        unsigned long val;
        int dr, reg;
 
+       if (!kvm_require_cpl(vcpu, 0))
+               return 1;
        dr = vmcs_readl(GUEST_DR7);
        if (dr & DR7_GD) {
                /*
@@ -3132,7 +3139,7 @@ static int handle_apic_access(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run)
                printk(KERN_ERR
                       "Fail to handle apic access vmexit! Offset is 0x%lx\n",
                       offset);
-               return -ENOTSUPP;
+               return -ENOEXEC;
        }
        return 1;
 }
@@ -3201,7 +3208,7 @@ static int handle_ept_violation(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run)
 
        if (exit_qualification & (1 << 6)) {
                printk(KERN_ERR "EPT: GPA exceeds GAW!\n");
-               return -ENOTSUPP;
+               return -EINVAL;
        }
 
        gla_validity = (exit_qualification >> 7) & 0x3;
@@ -3629,7 +3636,8 @@ static void vmx_vcpu_run(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run)
         */
        vmcs_writel(HOST_CR0, read_cr0());
 
-       set_debugreg(vcpu->arch.dr6, 6);
+       if (vcpu->arch.switch_db_regs)
+               set_debugreg(vcpu->arch.dr6, 6);
 
        asm(
                /* Store host registers */
@@ -3731,7 +3739,8 @@ static void vmx_vcpu_run(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run)
                                  | (1 << VCPU_EXREG_PDPTR));
        vcpu->arch.regs_dirty = 0;
 
-       get_debugreg(vcpu->arch.dr6, 6);
+       if (vcpu->arch.switch_db_regs)
+               get_debugreg(vcpu->arch.dr6, 6);
 
        vmx->idt_vectoring_info = vmcs_read32(IDT_VECTORING_INFO_FIELD);
        if (vmx->rmode.irq.pending)
@@ -3814,9 +3823,13 @@ static struct kvm_vcpu *vmx_create_vcpu(struct kvm *kvm, unsigned int id)
                if (alloc_apic_access_page(kvm) != 0)
                        goto free_vmcs;
 
-       if (enable_ept)
+       if (enable_ept) {
+               if (!kvm->arch.ept_identity_map_addr)
+                       kvm->arch.ept_identity_map_addr =
+                               VMX_EPT_IDENTITY_PAGETABLE_ADDR;
                if (alloc_identity_pagetable(kvm) != 0)
                        goto free_vmcs;
+       }
 
        return &vmx->vcpu;
 
@@ -3903,6 +3916,11 @@ static const struct trace_print_flags vmx_exit_reasons_str[] = {
        { -1, NULL }
 };
 
+static bool vmx_gb_page_enable(void)
+{
+       return false;
+}
+
 static struct kvm_x86_ops vmx_x86_ops = {
        .cpu_has_kvm_support = cpu_has_kvm_support,
        .disabled_by_bios = vmx_disabled_by_bios,
@@ -3964,6 +3982,7 @@ static struct kvm_x86_ops vmx_x86_ops = {
        .get_mt_mask = vmx_get_mt_mask,
 
        .exit_reasons_str = vmx_exit_reasons_str,
+       .gb_page_enable = vmx_gb_page_enable,
 };
 
 static int __init vmx_init(void)