x86, perf events: Check if we have APIC enabled
[safe/jmp/linux-2.6] / arch / x86 / kernel / cpu / perf_event.c
index 9961d84..18f05ec 100644 (file)
@@ -124,7 +124,7 @@ static DEFINE_PER_CPU(struct cpu_hw_events, cpu_hw_events) = {
        .enabled = 1,
 };
 
-static const struct event_constraint *event_constraint;
+static const struct event_constraint *event_constraints;
 
 /*
  * Not sure about some of these
@@ -245,7 +245,7 @@ static u64 __read_mostly hw_cache_event_ids
                                [PERF_COUNT_HW_CACHE_OP_MAX]
                                [PERF_COUNT_HW_CACHE_RESULT_MAX];
 
-static const u64 nehalem_hw_cache_event_ids
+static __initconst u64 nehalem_hw_cache_event_ids
                                [PERF_COUNT_HW_CACHE_MAX]
                                [PERF_COUNT_HW_CACHE_OP_MAX]
                                [PERF_COUNT_HW_CACHE_RESULT_MAX] =
@@ -336,7 +336,7 @@ static const u64 nehalem_hw_cache_event_ids
  },
 };
 
-static const u64 core2_hw_cache_event_ids
+static __initconst u64 core2_hw_cache_event_ids
                                [PERF_COUNT_HW_CACHE_MAX]
                                [PERF_COUNT_HW_CACHE_OP_MAX]
                                [PERF_COUNT_HW_CACHE_RESULT_MAX] =
@@ -427,7 +427,7 @@ static const u64 core2_hw_cache_event_ids
  },
 };
 
-static const u64 atom_hw_cache_event_ids
+static __initconst u64 atom_hw_cache_event_ids
                                [PERF_COUNT_HW_CACHE_MAX]
                                [PERF_COUNT_HW_CACHE_OP_MAX]
                                [PERF_COUNT_HW_CACHE_RESULT_MAX] =
@@ -536,7 +536,7 @@ static u64 intel_pmu_raw_event(u64 hw_event)
        return hw_event & CORE_EVNTSEL_MASK;
 }
 
-static const u64 amd_hw_cache_event_ids
+static __initconst u64 amd_hw_cache_event_ids
                                [PERF_COUNT_HW_CACHE_MAX]
                                [PERF_COUNT_HW_CACHE_OP_MAX]
                                [PERF_COUNT_HW_CACHE_RESULT_MAX] =
@@ -1442,12 +1442,12 @@ intel_get_event_idx(struct cpu_hw_events *cpuc, struct hw_perf_event *hwc)
        const struct event_constraint *event_constraint;
        int i, code;
 
-       if (!event_constraint)
+       if (!event_constraints)
                goto skip;
 
        code = hwc->config & CORE_EVNTSEL_EVENT_MASK;
 
-       for_each_event_constraint(event_constraint, event_constraint) {
+       for_each_event_constraint(event_constraint, event_constraints) {
                if (code == event_constraint->code) {
                        for_each_bit(i, event_constraint->idxmsk, X86_PMC_IDX_MAX) {
                                if (!test_and_set_bit(i, cpuc->used_mask))
@@ -1632,6 +1632,7 @@ static void intel_pmu_drain_bts_buffer(struct cpu_hw_events *cpuc)
 
        data.period     = event->hw.last_period;
        data.addr       = 0;
+       data.raw        = NULL;
        regs.ip         = 0;
 
        /*
@@ -1749,6 +1750,7 @@ static int p6_pmu_handle_irq(struct pt_regs *regs)
        u64 val;
 
        data.addr = 0;
+       data.raw = NULL;
 
        cpuc = &__get_cpu_var(cpu_hw_events);
 
@@ -1794,6 +1796,7 @@ static int intel_pmu_handle_irq(struct pt_regs *regs)
        u64 ack, status;
 
        data.addr = 0;
+       data.raw = NULL;
 
        cpuc = &__get_cpu_var(cpu_hw_events);
 
@@ -1857,6 +1860,7 @@ static int amd_pmu_handle_irq(struct pt_regs *regs)
        u64 val;
 
        data.addr = 0;
+       data.raw = NULL;
 
        cpuc = &__get_cpu_var(cpu_hw_events);
 
@@ -1964,7 +1968,7 @@ static __read_mostly struct notifier_block perf_event_nmi_notifier = {
        .priority               = 1
 };
 
-static struct x86_pmu p6_pmu = {
+static __initconst struct x86_pmu p6_pmu = {
        .name                   = "p6",
        .handle_irq             = p6_pmu_handle_irq,
        .disable_all            = p6_pmu_disable_all,
@@ -1992,7 +1996,7 @@ static struct x86_pmu p6_pmu = {
        .get_event_idx          = intel_get_event_idx,
 };
 
-static struct x86_pmu intel_pmu = {
+static __initconst struct x86_pmu intel_pmu = {
        .name                   = "Intel",
        .handle_irq             = intel_pmu_handle_irq,
        .disable_all            = intel_pmu_disable_all,
@@ -2016,7 +2020,7 @@ static struct x86_pmu intel_pmu = {
        .get_event_idx          = intel_get_event_idx,
 };
 
-static struct x86_pmu amd_pmu = {
+static __initconst struct x86_pmu amd_pmu = {
        .name                   = "AMD",
        .handle_irq             = amd_pmu_handle_irq,
        .disable_all            = amd_pmu_disable_all,
@@ -2037,7 +2041,7 @@ static struct x86_pmu amd_pmu = {
        .get_event_idx          = gen_get_event_idx,
 };
 
-static int p6_pmu_init(void)
+static __init int p6_pmu_init(void)
 {
        switch (boot_cpu_data.x86_model) {
        case 1:
@@ -2047,12 +2051,12 @@ static int p6_pmu_init(void)
        case 7:
        case 8:
        case 11: /* Pentium III */
-               event_constraint = intel_p6_event_constraints;
+               event_constraints = intel_p6_event_constraints;
                break;
        case 9:
        case 13:
                /* Pentium M */
-               event_constraint = intel_p6_event_constraints;
+               event_constraints = intel_p6_event_constraints;
                break;
        default:
                pr_cont("unsupported p6 CPU model %d ",
@@ -2062,16 +2066,10 @@ static int p6_pmu_init(void)
 
        x86_pmu = p6_pmu;
 
-       if (!cpu_has_apic) {
-               pr_info("no APIC, boot with the \"lapic\" boot parameter to force-enable it.\n");
-               pr_info("no hardware sampling interrupt available.\n");
-               x86_pmu.apic = 0;
-       }
-
        return 0;
 }
 
-static int intel_pmu_init(void)
+static __init int intel_pmu_init(void)
 {
        union cpuid10_edx edx;
        union cpuid10_eax eax;
@@ -2124,14 +2122,14 @@ static int intel_pmu_init(void)
                       sizeof(hw_cache_event_ids));
 
                pr_cont("Core2 events, ");
-               event_constraint = intel_core_event_constraints;
+               event_constraints = intel_core_event_constraints;
                break;
        default:
        case 26:
                memcpy(hw_cache_event_ids, nehalem_hw_cache_event_ids,
                       sizeof(hw_cache_event_ids));
 
-               event_constraint = intel_nehalem_event_constraints;
+               event_constraints = intel_nehalem_event_constraints;
                pr_cont("Nehalem/Corei7 events, ");
                break;
        case 28:
@@ -2144,7 +2142,7 @@ static int intel_pmu_init(void)
        return 0;
 }
 
-static int amd_pmu_init(void)
+static __init int amd_pmu_init(void)
 {
        /* Performance-monitoring supported from K7 and later: */
        if (boot_cpu_data.x86 < 6)
@@ -2159,6 +2157,16 @@ static int amd_pmu_init(void)
        return 0;
 }
 
+static void __init pmu_check_apic(void)
+{
+       if (cpu_has_apic)
+               return;
+
+       x86_pmu.apic = 0;
+       pr_info("no APIC, boot with the \"lapic\" boot parameter to force-enable it.\n");
+       pr_info("no hardware sampling interrupt available.\n");
+}
+
 void __init init_hw_perf_events(void)
 {
        int err;
@@ -2180,6 +2188,8 @@ void __init init_hw_perf_events(void)
                return;
        }
 
+       pmu_check_apic();
+
        pr_cont("%s PMU driver.\n", x86_pmu.name);
 
        if (x86_pmu.num_events > X86_PMC_MAX_GENERIC) {
@@ -2229,10 +2239,10 @@ validate_event(struct cpu_hw_events *cpuc, struct perf_event *event)
 {
        struct hw_perf_event fake_event = event->hw;
 
-       if (event->pmu != &pmu)
+       if (event->pmu && event->pmu != &pmu)
                return 0;
 
-       return x86_schedule_event(cpuc, &fake_event);
+       return x86_schedule_event(cpuc, &fake_event) >= 0;
 }
 
 static int validate_group(struct perf_event *event)
@@ -2287,7 +2297,7 @@ void callchain_store(struct perf_callchain_entry *entry, u64 ip)
 
 static DEFINE_PER_CPU(struct perf_callchain_entry, pmc_irq_entry);
 static DEFINE_PER_CPU(struct perf_callchain_entry, pmc_nmi_entry);
-static DEFINE_PER_CPU(int, in_nmi_frame);
+static DEFINE_PER_CPU(int, in_ignored_frame);
 
 
 static void
@@ -2303,8 +2313,9 @@ static void backtrace_warning(void *data, char *msg)
 
 static int backtrace_stack(void *data, char *name)
 {
-       per_cpu(in_nmi_frame, smp_processor_id()) =
-                       x86_is_stack_id(NMI_STACK, name);
+       per_cpu(in_ignored_frame, smp_processor_id()) =
+                       x86_is_stack_id(NMI_STACK, name) ||
+                       x86_is_stack_id(DEBUG_STACK, name);
 
        return 0;
 }
@@ -2313,7 +2324,7 @@ static void backtrace_address(void *data, unsigned long addr, int reliable)
 {
        struct perf_callchain_entry *entry = data;
 
-       if (per_cpu(in_nmi_frame, smp_processor_id()))
+       if (per_cpu(in_ignored_frame, smp_processor_id()))
                return;
 
        if (reliable)