KVM: Don't follow an atomic operation by a non-atomic one
authorAvi Kivity <avi@redhat.com>
Mon, 15 Mar 2010 11:59:55 +0000 (13:59 +0200)
committerAvi Kivity <avi@redhat.com>
Mon, 17 May 2010 09:15:40 +0000 (12:15 +0300)
Currently emulated atomic operations are immediately followed by a non-atomic
operation, so that kvm_mmu_pte_write() can be invoked.  This updates the mmu
but undoes the whole point of doing things atomically.

Fix by only performing the atomic operation and the mmu update, and avoiding
the non-atomic write.

Signed-off-by: Avi Kivity <avi@redhat.com>
Signed-off-by: Marcelo Tosatti <mtosatti@redhat.com>
arch/x86/kvm/x86.c

index 855f3ea..dd4a7ad 100644 (file)
@@ -3234,7 +3234,8 @@ int emulator_write_phys(struct kvm_vcpu *vcpu, gpa_t gpa,
 static int emulator_write_emulated_onepage(unsigned long addr,
                                           const void *val,
                                           unsigned int bytes,
-                                          struct kvm_vcpu *vcpu)
+                                          struct kvm_vcpu *vcpu,
+                                          bool mmu_only)
 {
        gpa_t                 gpa;
        u32 error_code;
@@ -3250,6 +3251,10 @@ static int emulator_write_emulated_onepage(unsigned long addr,
        if ((gpa & PAGE_MASK) == APIC_DEFAULT_PHYS_BASE)
                goto mmio;
 
+       if (mmu_only) {
+               kvm_mmu_pte_write(vcpu, gpa, val, bytes, 1);
+               return X86EMUL_CONTINUE;
+       }
        if (emulator_write_phys(vcpu, gpa, val, bytes))
                return X86EMUL_CONTINUE;
 
@@ -3270,24 +3275,35 @@ mmio:
        return X86EMUL_CONTINUE;
 }
 
-int emulator_write_emulated(unsigned long addr,
+int __emulator_write_emulated(unsigned long addr,
                                   const void *val,
                                   unsigned int bytes,
-                                  struct kvm_vcpu *vcpu)
+                                  struct kvm_vcpu *vcpu,
+                                  bool mmu_only)
 {
        /* Crossing a page boundary? */
        if (((addr + bytes - 1) ^ addr) & PAGE_MASK) {
                int rc, now;
 
                now = -addr & ~PAGE_MASK;
-               rc = emulator_write_emulated_onepage(addr, val, now, vcpu);
+               rc = emulator_write_emulated_onepage(addr, val, now, vcpu,
+                                                    mmu_only);
                if (rc != X86EMUL_CONTINUE)
                        return rc;
                addr += now;
                val += now;
                bytes -= now;
        }
-       return emulator_write_emulated_onepage(addr, val, bytes, vcpu);
+       return emulator_write_emulated_onepage(addr, val, bytes, vcpu,
+                                              mmu_only);
+}
+
+int emulator_write_emulated(unsigned long addr,
+                                  const void *val,
+                                  unsigned int bytes,
+                                  struct kvm_vcpu *vcpu)
+{
+       return __emulator_write_emulated(addr, val, bytes, vcpu, false);
 }
 EXPORT_SYMBOL_GPL(emulator_write_emulated);
 
@@ -3351,6 +3367,8 @@ static int emulator_cmpxchg_emulated(unsigned long addr,
        if (!exchanged)
                return X86EMUL_CMPXCHG_FAILED;
 
+       return __emulator_write_emulated(addr, new, bytes, vcpu, true);
+
 emul_write:
        printk_once(KERN_WARNING "kvm: emulating exchange as write\n");
 
@@ -4005,7 +4023,7 @@ int kvm_fix_hypercall(struct kvm_vcpu *vcpu)
 
        kvm_x86_ops->patch_hypercall(vcpu, instruction);
 
-       return emulator_write_emulated(rip, instruction, 3, vcpu);
+       return __emulator_write_emulated(rip, instruction, 3, vcpu, false);
 }
 
 static u64 mk_cr_64(u64 curr_cr, u32 new_val)