Merge remote branch 'tip/perf/core'
[safe/jmp/linux-2.6] / arch / x86 / kvm / x86.c
index 58a295c..848c814 100644 (file)
@@ -40,6 +40,7 @@
 #include <linux/user-return-notifier.h>
 #include <linux/srcu.h>
 #include <linux/slab.h>
+#include <linux/perf_event.h>
 #include <trace/events/kvm.h>
 
 #define CREATE_TRACE_POINTS
@@ -487,7 +488,6 @@ void kvm_set_cr4(struct kvm_vcpu *vcpu, unsigned long cr4)
        }
        kvm_x86_ops->set_cr4(vcpu, cr4);
        vcpu->arch.cr4 = cr4;
-       vcpu->arch.mmu.base_role.cr4_pge = (cr4 & X86_CR4_PGE) && !tdp_enabled;
        kvm_mmu_reset_context(vcpu);
 }
 EXPORT_SYMBOL_GPL(kvm_set_cr4);
@@ -2496,7 +2496,7 @@ gfn_t unalias_gfn_instantiation(struct kvm *kvm, gfn_t gfn)
        struct kvm_mem_alias *alias;
        struct kvm_mem_aliases *aliases;
 
-       aliases = rcu_dereference(kvm->arch.aliases);
+       aliases = kvm_aliases(kvm);
 
        for (i = 0; i < aliases->naliases; ++i) {
                alias = &aliases->aliases[i];
@@ -2515,7 +2515,7 @@ gfn_t unalias_gfn(struct kvm *kvm, gfn_t gfn)
        struct kvm_mem_alias *alias;
        struct kvm_mem_aliases *aliases;
 
-       aliases = rcu_dereference(kvm->arch.aliases);
+       aliases = kvm_aliases(kvm);
 
        for (i = 0; i < aliases->naliases; ++i) {
                alias = &aliases->aliases[i];
@@ -3955,6 +3955,51 @@ static void kvm_timer_init(void)
        }
 }
 
+static DEFINE_PER_CPU(struct kvm_vcpu *, current_vcpu);
+
+static int kvm_is_in_guest(void)
+{
+       return percpu_read(current_vcpu) != NULL;
+}
+
+static int kvm_is_user_mode(void)
+{
+       int user_mode = 3;
+
+       if (percpu_read(current_vcpu))
+               user_mode = kvm_x86_ops->get_cpl(percpu_read(current_vcpu));
+
+       return user_mode != 0;
+}
+
+static unsigned long kvm_get_guest_ip(void)
+{
+       unsigned long ip = 0;
+
+       if (percpu_read(current_vcpu))
+               ip = kvm_rip_read(percpu_read(current_vcpu));
+
+       return ip;
+}
+
+static struct perf_guest_info_callbacks kvm_guest_cbs = {
+       .is_in_guest            = kvm_is_in_guest,
+       .is_user_mode           = kvm_is_user_mode,
+       .get_guest_ip           = kvm_get_guest_ip,
+};
+
+void kvm_before_handle_nmi(struct kvm_vcpu *vcpu)
+{
+       percpu_write(current_vcpu, vcpu);
+}
+EXPORT_SYMBOL_GPL(kvm_before_handle_nmi);
+
+void kvm_after_handle_nmi(struct kvm_vcpu *vcpu)
+{
+       percpu_write(current_vcpu, NULL);
+}
+EXPORT_SYMBOL_GPL(kvm_after_handle_nmi);
+
 int kvm_arch_init(void *opaque)
 {
        int r;
@@ -3991,6 +4036,8 @@ int kvm_arch_init(void *opaque)
 
        kvm_timer_init();
 
+       perf_register_guest_info_callbacks(&kvm_guest_cbs);
+
        return 0;
 
 out:
@@ -3999,6 +4046,8 @@ out:
 
 void kvm_arch_exit(void)
 {
+       perf_unregister_guest_info_callbacks(&kvm_guest_cbs);
+
        if (!boot_cpu_has(X86_FEATURE_CONSTANT_TSC))
                cpufreq_unregister_notifier(&kvmclock_cpufreq_notifier_block,
                                            CPUFREQ_TRANSITION_NOTIFIER);
@@ -4800,10 +4849,11 @@ int kvm_task_switch(struct kvm_vcpu *vcpu, u16 tss_selector, int reason,
                                   tss_selector, reason, has_error_code,
                                   error_code);
 
-       if (ret == X86EMUL_CONTINUE)
-               kvm_x86_ops->set_rflags(vcpu, vcpu->arch.emulate_ctxt.eflags);
+       if (ret)
+               return EMULATE_FAIL;
 
-       return (ret != X86EMUL_CONTINUE);
+       kvm_x86_ops->set_rflags(vcpu, vcpu->arch.emulate_ctxt.eflags);
+       return EMULATE_DONE;
 }
 EXPORT_SYMBOL_GPL(kvm_task_switch);