NET: turn local_save_flags() + local_irq_disable() into local_irq_save()
[safe/jmp/linux-2.6] / drivers / kvm / kvm_main.c
index cec1010..b10972e 100644 (file)
@@ -230,6 +230,7 @@ static int kvm_dev_open(struct inode *inode, struct file *filp)
                struct kvm_vcpu *vcpu = &kvm->vcpus[i];
 
                mutex_init(&vcpu->mutex);
+               vcpu->kvm = kvm;
                vcpu->mmu.root_hpa = INVALID_PAGE;
                INIT_LIST_HEAD(&vcpu->free_pages);
        }
@@ -271,8 +272,10 @@ static void kvm_free_physmem(struct kvm *kvm)
 
 static void kvm_free_vcpu(struct kvm_vcpu *vcpu)
 {
-       kvm_arch_ops->vcpu_free(vcpu);
+       vcpu_load(vcpu->kvm, vcpu_slot(vcpu));
        kvm_mmu_destroy(vcpu);
+       vcpu_put(vcpu);
+       kvm_arch_ops->vcpu_free(vcpu);
 }
 
 static void kvm_free_vcpus(struct kvm *kvm)
@@ -462,7 +465,19 @@ void set_cr3(struct kvm_vcpu *vcpu, unsigned long cr3)
 
        vcpu->cr3 = cr3;
        spin_lock(&vcpu->kvm->lock);
-       vcpu->mmu.new_cr3(vcpu);
+       /*
+        * Does the new cr3 value map to physical memory? (Note, we
+        * catch an invalid cr3 even in real-mode, because it would
+        * cause trouble later on when we turn on paging anyway.)
+        *
+        * A real CPU would silently accept an invalid cr3 and would
+        * attempt to use it - with largely undefined (and often hard
+        * to debug) behavior on the guest side.
+        */
+       if (unlikely(!gfn_to_memslot(vcpu->kvm, cr3 >> PAGE_SHIFT)))
+               inject_gp(vcpu);
+       else
+               vcpu->mmu.new_cr3(vcpu);
        spin_unlock(&vcpu->kvm->lock);
 }
 EXPORT_SYMBOL_GPL(set_cr3);
@@ -530,7 +545,6 @@ static int kvm_dev_ioctl_create_vcpu(struct kvm *kvm, int n)
        vcpu->guest_fx_image = vcpu->host_fx_image + FX_IMAGE_SIZE;
 
        vcpu->cpu = -1;  /* First load will set up TR */
-       vcpu->kvm = kvm;
        r = kvm_arch_ops->vcpu_create(vcpu);
        if (r < 0)
                goto out_free_vcpus;
@@ -702,6 +716,13 @@ out:
        return r;
 }
 
+static void do_remove_write_access(struct kvm_vcpu *vcpu, int slot)
+{
+       spin_lock(&vcpu->kvm->lock);
+       kvm_mmu_slot_remove_write_access(vcpu, slot);
+       spin_unlock(&vcpu->kvm->lock);
+}
+
 /*
  * Get (and clear) the dirty memory log for a memory slot.
  */
@@ -711,6 +732,7 @@ static int kvm_dev_ioctl_get_dirty_log(struct kvm *kvm,
        struct kvm_memory_slot *memslot;
        int r, i;
        int n;
+       int cleared;
        unsigned long any = 0;
 
        spin_lock(&kvm->lock);
@@ -741,15 +763,17 @@ static int kvm_dev_ioctl_get_dirty_log(struct kvm *kvm,
 
 
        if (any) {
-               spin_lock(&kvm->lock);
-               kvm_mmu_slot_remove_write_access(kvm, log->slot);
-               spin_unlock(&kvm->lock);
-               memset(memslot->dirty_bitmap, 0, n);
+               cleared = 0;
                for (i = 0; i < KVM_MAX_VCPUS; ++i) {
                        struct kvm_vcpu *vcpu = vcpu_load(kvm, i);
 
                        if (!vcpu)
                                continue;
+                       if (!cleared) {
+                               do_remove_write_access(vcpu, log->slot);
+                               memset(memslot->dirty_bitmap, 0, n);
+                               cleared = 1;
+                       }
                        kvm_arch_ops->tlb_flush(vcpu);
                        vcpu_put(vcpu);
                }
@@ -936,6 +960,30 @@ static int emulator_cmpxchg_emulated(unsigned long addr,
        return emulator_write_emulated(addr, new, bytes, ctxt);
 }
 
+#ifdef CONFIG_X86_32
+
+static int emulator_cmpxchg8b_emulated(unsigned long addr,
+                                      unsigned long old_lo,
+                                      unsigned long old_hi,
+                                      unsigned long new_lo,
+                                      unsigned long new_hi,
+                                      struct x86_emulate_ctxt *ctxt)
+{
+       static int reported;
+       int r;
+
+       if (!reported) {
+               reported = 1;
+               printk(KERN_WARNING "kvm: emulating exchange8b as write\n");
+       }
+       r = emulator_write_emulated(addr, new_lo, 4, ctxt);
+       if (r != X86EMUL_CONTINUE)
+               return r;
+       return emulator_write_emulated(addr+4, new_hi, 4, ctxt);
+}
+
+#endif
+
 static unsigned long get_segment_base(struct kvm_vcpu *vcpu, int seg)
 {
        return kvm_arch_ops->get_segment_base(vcpu, seg);
@@ -1010,6 +1058,9 @@ struct x86_emulate_ops emulate_ops = {
        .read_emulated       = emulator_read_emulated,
        .write_emulated      = emulator_write_emulated,
        .cmpxchg_emulated    = emulator_cmpxchg_emulated,
+#ifdef CONFIG_X86_32
+       .cmpxchg8b_emulated  = emulator_cmpxchg8b_emulated,
+#endif
 };
 
 int emulate_instruction(struct kvm_vcpu *vcpu,
@@ -1175,6 +1226,9 @@ int kvm_get_msr_common(struct kvm_vcpu *vcpu, u32 msr, u64 *pdata)
        case MSR_IA32_APICBASE:
                data = vcpu->apic_base;
                break;
+       case MSR_IA32_MISC_ENABLE:
+               data = vcpu->ia32_misc_enable_msr;
+               break;
 #ifdef CONFIG_X86_64
        case MSR_EFER:
                data = vcpu->shadow_efer;
@@ -1246,6 +1300,9 @@ int kvm_set_msr_common(struct kvm_vcpu *vcpu, u32 msr, u64 data)
        case MSR_IA32_APICBASE:
                vcpu->apic_base = data;
                break;
+       case MSR_IA32_MISC_ENABLE:
+               vcpu->ia32_misc_enable_msr = data;
+               break;
        default:
                printk(KERN_ERR "kvm: unhandled wrmsr: 0x%x\n", msr);
                return 1;
@@ -1549,6 +1606,10 @@ static u32 msrs_to_save[] = {
 
 static unsigned num_msrs_to_save;
 
+static u32 emulated_msrs[] = {
+       MSR_IA32_MISC_ENABLE,
+};
+
 static __init void kvm_init_msr_list(void)
 {
        u32 dummy[2];
@@ -1874,7 +1935,7 @@ static long kvm_dev_ioctl(struct file *filp,
                if (copy_from_user(&msr_list, user_msr_list, sizeof msr_list))
                        goto out;
                n = msr_list.nmsrs;
-               msr_list.nmsrs = num_msrs_to_save;
+               msr_list.nmsrs = num_msrs_to_save + ARRAY_SIZE(emulated_msrs);
                if (copy_to_user(user_msr_list, &msr_list, sizeof msr_list))
                        goto out;
                r = -E2BIG;
@@ -1884,7 +1945,13 @@ static long kvm_dev_ioctl(struct file *filp,
                if (copy_to_user(user_msr_list->indices, &msrs_to_save,
                                 num_msrs_to_save * sizeof(u32)))
                        goto out;
+               if (copy_to_user(user_msr_list->indices
+                                + num_msrs_to_save * sizeof(u32),
+                                &emulated_msrs,
+                                ARRAY_SIZE(emulated_msrs) * sizeof(u32)))
+                       goto out;
                r = 0;
+               break;
        }
        default:
                ;