x86, perf: Use apic_write unconditionally
[safe/jmp/linux-2.6] / arch / x86 / kernel / cpu / perf_event.c
index 0c03d5c..14eca80 100644 (file)
 #include <asm/stacktrace.h>
 #include <asm/nmi.h>
 
-static u64 perf_event_mask __read_mostly;
+#if 0
+#undef wrmsrl
+#define wrmsrl(msr, val)                                       \
+do {                                                           \
+       trace_printk("wrmsrl(%lx, %lx)\n", (unsigned long)(msr),\
+                       (unsigned long)(val));                  \
+       native_write_msr((msr), (u32)((u64)(val)),              \
+                       (u32)((u64)(val) >> 32));               \
+} while (0)
+#endif
+
+/*
+ * best effort, GUP based copy_from_user() that assumes IRQ or NMI context
+ */
+static unsigned long
+copy_from_user_nmi(void *to, const void __user *from, unsigned long n)
+{
+       unsigned long offset, addr = (unsigned long)from;
+       int type = in_nmi() ? KM_NMI : KM_IRQ0;
+       unsigned long size, len = 0;
+       struct page *page;
+       void *map;
+       int ret;
+
+       do {
+               ret = __get_user_pages_fast(addr, 1, 0, &page);
+               if (!ret)
+                       break;
+
+               offset = addr & (PAGE_SIZE - 1);
+               size = min(PAGE_SIZE - offset, n - len);
+
+               map = kmap_atomic(page, type);
+               memcpy(to, map+offset, size);
+               kunmap_atomic(map, type);
+               put_page(page);
+
+               len  += size;
+               to   += size;
+               addr += size;
+
+       } while (len < n);
+
+       return len;
+}
 
 struct event_constraint {
        union {
@@ -48,13 +92,14 @@ struct amd_nb {
        struct event_constraint event_constraints[X86_PMC_IDX_MAX];
 };
 
+#define MAX_LBR_ENTRIES                16
+
 struct cpu_hw_events {
        /*
         * Generic x86 PMC bits
         */
        struct perf_event       *events[X86_PMC_IDX_MAX]; /* in counter order */
        unsigned long           active_mask[BITS_TO_LONGS(X86_PMC_IDX_MAX)];
-       unsigned long           interrupts;
        int                     enabled;
 
        int                     n_events;
@@ -70,6 +115,14 @@ struct cpu_hw_events {
        u64                     pebs_enabled;
 
        /*
+        * Intel LBR bits
+        */
+       int                             lbr_users;
+       void                            *lbr_context;
+       struct perf_branch_stack        lbr_stack;
+       struct perf_branch_entry        lbr_entries[MAX_LBR_ENTRIES];
+
+       /*
         * AMD specific bits
         */
        struct amd_nb           *amd_nb;
@@ -109,6 +162,17 @@ struct cpu_hw_events {
 #define for_each_event_constraint(e, c)        \
        for ((e) = (c); (e)->cmask; (e)++)
 
+union perf_capabilities {
+       struct {
+               u64     lbr_format    : 6;
+               u64     pebs_trap     : 1;
+               u64     pebs_arch_reg : 1;
+               u64     pebs_format   : 4;
+               u64     smm_freeze    : 1;
+       };
+       u64     capabilities;
+};
+
 /*
  * struct x86_pmu - generic x86 pmu
  */
@@ -123,6 +187,8 @@ struct x86_pmu {
        void            (*enable_all)(void);
        void            (*enable)(struct perf_event *);
        void            (*disable)(struct perf_event *);
+       int             (*hw_config)(struct perf_event_attr *attr, struct hw_perf_event *hwc);
+       int             (*schedule_events)(struct cpu_hw_events *cpuc, int n, int *assign);
        unsigned        eventsel;
        unsigned        perfctr;
        u64             (*event_map)(int);
@@ -141,6 +207,7 @@ struct x86_pmu {
        void            (*put_event_constraints)(struct cpu_hw_events *cpuc,
                                                 struct perf_event *event);
        struct event_constraint *event_constraints;
+       void            (*quirks)(void);
 
        void            (*cpu_prepare)(int cpu);
        void            (*cpu_starting)(int cpu);
@@ -150,7 +217,8 @@ struct x86_pmu {
        /*
         * Intel Arch Perfmon v2+
         */
-       u64             intel_ctrl;
+       u64                     intel_ctrl;
+       union perf_capabilities intel_cap;
 
        /*
         * Intel DebugStore bits
@@ -159,6 +227,12 @@ struct x86_pmu {
        int             pebs_record_size;
        void            (*drain_pebs)(struct pt_regs *regs);
        struct event_constraint *pebs_constraints;
+
+       /*
+        * Intel LBR
+        */
+       unsigned long   lbr_tos, lbr_from, lbr_to; /* MSR base regs       */
+       int             lbr_nr;                    /* hardware stack size */
 };
 
 static struct x86_pmu x86_pmu __read_mostly;
@@ -236,9 +310,10 @@ again:
 static atomic_t active_events;
 static DEFINE_MUTEX(pmc_reserve_mutex);
 
+#ifdef CONFIG_X86_LOCAL_APIC
+
 static bool reserve_pmc_hardware(void)
 {
-#ifdef CONFIG_X86_LOCAL_APIC
        int i;
 
        if (nmi_watchdog == NMI_LOCAL_APIC)
@@ -253,11 +328,9 @@ static bool reserve_pmc_hardware(void)
                if (!reserve_evntsel_nmi(x86_pmu.eventsel + i))
                        goto eventsel_fail;
        }
-#endif
 
        return true;
 
-#ifdef CONFIG_X86_LOCAL_APIC
 eventsel_fail:
        for (i--; i >= 0; i--)
                release_evntsel_nmi(x86_pmu.eventsel + i);
@@ -272,12 +345,10 @@ perfctr_fail:
                enable_lapic_nmi_watchdog();
 
        return false;
-#endif
 }
 
 static void release_pmc_hardware(void)
 {
-#ifdef CONFIG_X86_LOCAL_APIC
        int i;
 
        for (i = 0; i < x86_pmu.num_events; i++) {
@@ -287,9 +358,15 @@ static void release_pmc_hardware(void)
 
        if (nmi_watchdog == NMI_LOCAL_APIC)
                enable_lapic_nmi_watchdog();
-#endif
 }
 
+#else
+
+static bool reserve_pmc_hardware(void) { return true; }
+static void release_pmc_hardware(void) {}
+
+#endif
+
 static int reserve_ds_buffers(void);
 static void release_ds_buffers(void);
 
@@ -340,6 +417,25 @@ set_ext_hw_attr(struct hw_perf_event *hwc, struct perf_event_attr *attr)
        return 0;
 }
 
+static int x86_hw_config(struct perf_event_attr *attr, struct hw_perf_event *hwc)
+{
+       /*
+        * Generate PMC IRQs:
+        * (keep 'enabled' bit clear for now)
+        */
+       hwc->config = ARCH_PERFMON_EVENTSEL_INT;
+
+       /*
+        * Count user and OS events unless requested not to
+        */
+       if (!attr->exclude_user)
+               hwc->config |= ARCH_PERFMON_EVENTSEL_USR;
+       if (!attr->exclude_kernel)
+               hwc->config |= ARCH_PERFMON_EVENTSEL_OS;
+
+       return 0;
+}
+
 /*
  * Setup the hardware configuration for a given attr_type
  */
@@ -371,23 +467,14 @@ static int __hw_perf_event_init(struct perf_event *event)
 
        event->destroy = hw_perf_event_destroy;
 
-       /*
-        * Generate PMC IRQs:
-        * (keep 'enabled' bit clear for now)
-        */
-       hwc->config = ARCH_PERFMON_EVENTSEL_INT;
-
        hwc->idx = -1;
        hwc->last_cpu = -1;
        hwc->last_tag = ~0ULL;
 
-       /*
-        * Count user and OS events unless requested not to.
-        */
-       if (!attr->exclude_user)
-               hwc->config |= ARCH_PERFMON_EVENTSEL_USR;
-       if (!attr->exclude_kernel)
-               hwc->config |= ARCH_PERFMON_EVENTSEL_OS;
+       /* Processor specifics */
+       err = x86_pmu.hw_config(attr, hwc);
+       if (err)
+               return err;
 
        if (!hwc->sample_period) {
                hwc->sample_period = x86_pmu.max_period;
@@ -442,7 +529,7 @@ static int __hw_perf_event_init(struct perf_event *event)
                        return -EOPNOTSUPP;
 
                /* BTS is currently only allowed for user-mode. */
-               if (hwc->config & ARCH_PERFMON_EVENTSEL_OS)
+               if (!attr->exclude_kernel)
                        return -EOPNOTSUPP;
        }
 
@@ -712,7 +799,6 @@ void hw_perf_enable(void)
                 * step2: reprogram moved events into new counters
                 */
                for (i = 0; i < n_running; i++) {
-
                        event = cpuc->event_list[i];
                        hwc = &event->hw;
 
@@ -727,21 +813,16 @@ void hw_perf_enable(void)
                                continue;
 
                        x86_pmu_stop(event);
-
-                       hwc->idx = -1;
                }
 
                for (i = 0; i < cpuc->n_events; i++) {
-
                        event = cpuc->event_list[i];
                        hwc = &event->hw;
 
-                       if (i < n_running &&
-                           match_prev_assignment(hwc, cpuc, i))
-                               continue;
-
-                       if (hwc->idx == -1)
+                       if (!match_prev_assignment(hwc, cpuc, i))
                                x86_assign_hw_event(event, cpuc, i);
+                       else if (i < n_running)
+                               continue;
 
                        x86_pmu_start(event);
                }
@@ -757,14 +838,15 @@ void hw_perf_enable(void)
 
 static inline void __x86_pmu_enable_event(struct hw_perf_event *hwc)
 {
-       (void)checking_wrmsrl(hwc->config_base + hwc->idx,
+       wrmsrl(hwc->config_base + hwc->idx,
                              hwc->config | ARCH_PERFMON_EVENTSEL_ENABLE);
 }
 
 static inline void x86_pmu_disable_event(struct perf_event *event)
 {
        struct hw_perf_event *hwc = &event->hw;
-       (void)checking_wrmsrl(hwc->config_base + hwc->idx, hwc->config);
+
+       wrmsrl(hwc->config_base + hwc->idx, hwc->config);
 }
 
 static DEFINE_PER_CPU(u64 [X86_PMC_IDX_MAX], pmc_prev_left);
@@ -779,7 +861,7 @@ x86_perf_event_set_period(struct perf_event *event)
        struct hw_perf_event *hwc = &event->hw;
        s64 left = atomic64_read(&hwc->period_left);
        s64 period = hwc->sample_period;
-       int err, ret = 0, idx = hwc->idx;
+       int ret = 0, idx = hwc->idx;
 
        if (idx == X86_PMC_IDX_FIXED_BTS)
                return 0;
@@ -817,8 +899,8 @@ x86_perf_event_set_period(struct perf_event *event)
         */
        atomic64_set(&hwc->prev_count, (u64)-left);
 
-       err = checking_wrmsrl(hwc->event_base + idx,
-                            (u64)(-left) & x86_pmu.event_mask);
+       wrmsrl(hwc->event_base + idx,
+                       (u64)(-left) & x86_pmu.event_mask);
 
        perf_event_update_userpage(event);
 
@@ -855,7 +937,7 @@ static int x86_pmu_enable(struct perf_event *event)
        if (n < 0)
                return n;
 
-       ret = x86_schedule_events(cpuc, n, assign);
+       ret = x86_pmu.schedule_events(cpuc, n, assign);
        if (ret)
                return ret;
        /*
@@ -923,7 +1005,7 @@ void perf_event_print_debug(void)
                pr_info("CPU#%d: fixed:      %016llx\n", cpu, fixed);
                pr_info("CPU#%d: pebs:       %016llx\n", cpu, pebs);
        }
-       pr_info("CPU#%d: active:       %016llx\n", cpu, *(u64 *)cpuc->active_mask);
+       pr_info("CPU#%d: active:     %016llx\n", cpu, *(u64 *)cpuc->active_mask);
 
        for (idx = 0; idx < x86_pmu.num_events; idx++) {
                rdmsrl(x86_pmu.eventsel + idx, pmc_ctrl);
@@ -1054,7 +1136,6 @@ void set_perf_event_pending(void)
 
 void perf_events_lapic_init(void)
 {
-#ifdef CONFIG_X86_LOCAL_APIC
        if (!x86_pmu.apic || !x86_pmu_initialized())
                return;
 
@@ -1062,7 +1143,6 @@ void perf_events_lapic_init(void)
         * Always use NMI for PMU
         */
        apic_write(APIC_LVTPC, APIC_DM_NMI);
-#endif
 }
 
 static int __kprobes
@@ -1086,9 +1166,7 @@ perf_event_nmi_handler(struct notifier_block *self,
 
        regs = args->regs;
 
-#ifdef CONFIG_X86_LOCAL_APIC
        apic_write(APIC_LVTPC, APIC_DM_NMI);
-#endif
        /*
         * Can't rely on the handled return value to say it was our NMI, two
         * events could trigger 'simultaneously' raising two back-to-back NMIs.
@@ -1182,12 +1260,15 @@ int hw_perf_group_sched_in(struct perf_event *leader,
        int assign[X86_PMC_IDX_MAX];
        int n0, n1, ret;
 
+       if (!x86_pmu_initialized())
+               return 0;
+
        /* n0 = total number of events */
        n0 = collect_events(cpuc, leader, true);
        if (n0 < 0)
                return n0;
 
-       ret = x86_schedule_events(cpuc, n0, assign);
+       ret = x86_pmu.schedule_events(cpuc, n0, assign);
        if (ret)
                return ret;
 
@@ -1237,6 +1318,8 @@ undo:
 
 #include "perf_event_amd.c"
 #include "perf_event_p6.c"
+#include "perf_event_p4.c"
+#include "perf_event_intel_lbr.c"
 #include "perf_event_intel_ds.c"
 #include "perf_event_intel.c"
 
@@ -1309,12 +1392,15 @@ void __init init_hw_perf_events(void)
 
        pr_cont("%s PMU driver.\n", x86_pmu.name);
 
+       if (x86_pmu.quirks)
+               x86_pmu.quirks();
+
        if (x86_pmu.num_events > X86_PMC_MAX_GENERIC) {
                WARN(1, KERN_ERR "hw perf events %d > max(%d), clipping!",
                     x86_pmu.num_events, X86_PMC_MAX_GENERIC);
                x86_pmu.num_events = X86_PMC_MAX_GENERIC;
        }
-       perf_event_mask = (1 << x86_pmu.num_events) - 1;
+       x86_pmu.intel_ctrl = (1 << x86_pmu.num_events) - 1;
        perf_max_events = x86_pmu.num_events;
 
        if (x86_pmu.num_events_fixed > X86_PMC_MAX_FIXED) {
@@ -1323,9 +1409,8 @@ void __init init_hw_perf_events(void)
                x86_pmu.num_events_fixed = X86_PMC_MAX_FIXED;
        }
 
-       perf_event_mask |=
+       x86_pmu.intel_ctrl |=
                ((1LL << x86_pmu.num_events_fixed)-1) << X86_PMC_IDX_FIXED;
-       x86_pmu.intel_ctrl = perf_event_mask;
 
        perf_events_lapic_init();
        register_die_notifier(&perf_event_nmi_notifier);
@@ -1350,7 +1435,7 @@ void __init init_hw_perf_events(void)
        pr_info("... value mask:             %016Lx\n", x86_pmu.event_mask);
        pr_info("... max period:             %016Lx\n", x86_pmu.max_period);
        pr_info("... fixed-purpose events:   %d\n",     x86_pmu.num_events_fixed);
-       pr_info("... event mask:             %016Lx\n", perf_event_mask);
+       pr_info("... event mask:             %016Lx\n", x86_pmu.intel_ctrl);
 
        perf_cpu_notifier(x86_pmu_notifier);
 }
@@ -1435,7 +1520,7 @@ static int validate_group(struct perf_event *event)
 
        fake_cpuc->n_events = n;
 
-       ret = x86_schedule_events(fake_cpuc, n, NULL);
+       ret = x86_pmu.schedule_events(fake_cpuc, n, NULL);
 
 out_free:
        kfree(fake_cpuc);
@@ -1532,41 +1617,6 @@ perf_callchain_kernel(struct pt_regs *regs, struct perf_callchain_entry *entry)
        dump_trace(NULL, regs, NULL, regs->bp, &backtrace_ops, entry);
 }
 
-/*
- * best effort, GUP based copy_from_user() that assumes IRQ or NMI context
- */
-static unsigned long
-copy_from_user_nmi(void *to, const void __user *from, unsigned long n)
-{
-       unsigned long offset, addr = (unsigned long)from;
-       int type = in_nmi() ? KM_NMI : KM_IRQ0;
-       unsigned long size, len = 0;
-       struct page *page;
-       void *map;
-       int ret;
-
-       do {
-               ret = __get_user_pages_fast(addr, 1, 0, &page);
-               if (!ret)
-                       break;
-
-               offset = addr & (PAGE_SIZE - 1);
-               size = min(PAGE_SIZE - offset, n - len);
-
-               map = kmap_atomic(page, type);
-               memcpy(to, map+offset, size);
-               kunmap_atomic(map, type);
-               put_page(page);
-
-               len  += size;
-               to   += size;
-               addr += size;
-
-       } while (len < n);
-
-       return len;
-}
-
 static int copy_stack_frame(const void __user *fp, struct stack_frame *frame)
 {
        unsigned long bytes;
@@ -1640,3 +1690,17 @@ struct perf_callchain_entry *perf_callchain(struct pt_regs *regs)
 
        return entry;
 }
+
+#ifdef CONFIG_EVENT_TRACING
+void perf_arch_fetch_caller_regs(struct pt_regs *regs, unsigned long ip, int skip)
+{
+       regs->ip = ip;
+       /*
+        * perf_arch_fetch_caller_regs adds another call, we need to increment
+        * the skip level
+        */
+       regs->bp = rewind_frame_pointer(skip + 1);
+       regs->cs = __KERNEL_CS;
+       local_save_flags(regs->flags);
+}
+#endif