2 * Performance events x86 architecture code
4 * Copyright (C) 2008 Thomas Gleixner <tglx@linutronix.de>
5 * Copyright (C) 2008-2009 Red Hat, Inc., Ingo Molnar
6 * Copyright (C) 2009 Jaswinder Singh Rajput
7 * Copyright (C) 2009 Advanced Micro Devices, Inc., Robert Richter
8 * Copyright (C) 2008-2009 Red Hat, Inc., Peter Zijlstra <pzijlstr@redhat.com>
9 * Copyright (C) 2009 Intel Corporation, <markus.t.metzger@intel.com>
10 * Copyright (C) 2009 Google, Inc., Stephane Eranian
12 * For licencing details see kernel-base/COPYING
15 #include <linux/perf_event.h>
16 #include <linux/capability.h>
17 #include <linux/notifier.h>
18 #include <linux/hardirq.h>
19 #include <linux/kprobes.h>
20 #include <linux/module.h>
21 #include <linux/kdebug.h>
22 #include <linux/sched.h>
23 #include <linux/uaccess.h>
24 #include <linux/highmem.h>
25 #include <linux/cpu.h>
28 #include <asm/stacktrace.h>
31 static u64 perf_event_mask __read_mostly;
33 /* The maximal number of PEBS events: */
34 #define MAX_PEBS_EVENTS 4
36 /* The size of a BTS record in bytes: */
37 #define BTS_RECORD_SIZE 24
39 /* The size of a per-cpu BTS buffer in bytes: */
40 #define BTS_BUFFER_SIZE (BTS_RECORD_SIZE * 2048)
42 /* The BTS overflow threshold in bytes from the end of the buffer: */
43 #define BTS_OVFL_TH (BTS_RECORD_SIZE * 128)
47 * Bits in the debugctlmsr controlling branch tracing.
49 #define X86_DEBUGCTL_TR (1 << 6)
50 #define X86_DEBUGCTL_BTS (1 << 7)
51 #define X86_DEBUGCTL_BTINT (1 << 8)
52 #define X86_DEBUGCTL_BTS_OFF_OS (1 << 9)
53 #define X86_DEBUGCTL_BTS_OFF_USR (1 << 10)
56 * A debug store configuration.
58 * We only support architectures that use 64bit fields.
63 u64 bts_absolute_maximum;
64 u64 bts_interrupt_threshold;
67 u64 pebs_absolute_maximum;
68 u64 pebs_interrupt_threshold;
69 u64 pebs_event_reset[MAX_PEBS_EVENTS];
72 struct event_constraint {
74 unsigned long idxmsk[BITS_TO_LONGS(X86_PMC_IDX_MAX)];
81 struct cpu_hw_events {
82 struct perf_event *events[X86_PMC_IDX_MAX]; /* in counter order */
83 unsigned long active_mask[BITS_TO_LONGS(X86_PMC_IDX_MAX)];
84 unsigned long interrupts;
86 struct debug_store *ds;
90 int assign[X86_PMC_IDX_MAX]; /* event to counter assignment */
91 struct perf_event *event_list[X86_PMC_IDX_MAX]; /* in enabled order */
94 #define EVENT_CONSTRAINT(c, n, m) { \
95 { .idxmsk64[0] = (n) }, \
100 #define INTEL_EVENT_CONSTRAINT(c, n) \
101 EVENT_CONSTRAINT(c, n, INTEL_ARCH_EVENT_MASK)
103 #define FIXED_EVENT_CONSTRAINT(c, n) \
104 EVENT_CONSTRAINT(c, n, INTEL_ARCH_FIXED_MASK)
106 #define EVENT_CONSTRAINT_END \
107 EVENT_CONSTRAINT(0, 0, 0)
109 #define for_each_event_constraint(e, c) \
110 for ((e) = (c); (e)->cmask; (e)++)
113 * struct x86_pmu - generic x86 pmu
118 int (*handle_irq)(struct pt_regs *);
119 void (*disable_all)(void);
120 void (*enable_all)(void);
121 void (*enable)(struct hw_perf_event *, int);
122 void (*disable)(struct hw_perf_event *, int);
125 u64 (*event_map)(int);
126 u64 (*raw_event)(u64);
129 int num_events_fixed;
135 void (*enable_bts)(u64 config);
136 void (*disable_bts)(void);
137 void (*get_event_constraints)(struct cpu_hw_events *cpuc,
138 struct perf_event *event,
139 unsigned long *idxmsk);
140 void (*put_event_constraints)(struct cpu_hw_events *cpuc,
141 struct perf_event *event);
142 const struct event_constraint *event_constraints;
145 static struct x86_pmu x86_pmu __read_mostly;
147 static DEFINE_PER_CPU(struct cpu_hw_events, cpu_hw_events) = {
151 static int x86_perf_event_set_period(struct perf_event *event,
152 struct hw_perf_event *hwc, int idx);
155 * Not sure about some of these
157 static const u64 p6_perfmon_event_map[] =
159 [PERF_COUNT_HW_CPU_CYCLES] = 0x0079,
160 [PERF_COUNT_HW_INSTRUCTIONS] = 0x00c0,
161 [PERF_COUNT_HW_CACHE_REFERENCES] = 0x0f2e,
162 [PERF_COUNT_HW_CACHE_MISSES] = 0x012e,
163 [PERF_COUNT_HW_BRANCH_INSTRUCTIONS] = 0x00c4,
164 [PERF_COUNT_HW_BRANCH_MISSES] = 0x00c5,
165 [PERF_COUNT_HW_BUS_CYCLES] = 0x0062,
168 static u64 p6_pmu_event_map(int hw_event)
170 return p6_perfmon_event_map[hw_event];
174 * Event setting that is specified not to count anything.
175 * We use this to effectively disable a counter.
177 * L2_RQSTS with 0 MESI unit mask.
179 #define P6_NOP_EVENT 0x0000002EULL
181 static u64 p6_pmu_raw_event(u64 hw_event)
183 #define P6_EVNTSEL_EVENT_MASK 0x000000FFULL
184 #define P6_EVNTSEL_UNIT_MASK 0x0000FF00ULL
185 #define P6_EVNTSEL_EDGE_MASK 0x00040000ULL
186 #define P6_EVNTSEL_INV_MASK 0x00800000ULL
187 #define P6_EVNTSEL_REG_MASK 0xFF000000ULL
189 #define P6_EVNTSEL_MASK \
190 (P6_EVNTSEL_EVENT_MASK | \
191 P6_EVNTSEL_UNIT_MASK | \
192 P6_EVNTSEL_EDGE_MASK | \
193 P6_EVNTSEL_INV_MASK | \
196 return hw_event & P6_EVNTSEL_MASK;
199 static struct event_constraint intel_p6_event_constraints[] =
201 INTEL_EVENT_CONSTRAINT(0xc1, 0x1), /* FLOPS */
202 INTEL_EVENT_CONSTRAINT(0x10, 0x1), /* FP_COMP_OPS_EXE */
203 INTEL_EVENT_CONSTRAINT(0x11, 0x1), /* FP_ASSIST */
204 INTEL_EVENT_CONSTRAINT(0x12, 0x2), /* MUL */
205 INTEL_EVENT_CONSTRAINT(0x13, 0x2), /* DIV */
206 INTEL_EVENT_CONSTRAINT(0x14, 0x1), /* CYCLES_DIV_BUSY */
211 * Intel PerfMon v3. Used on Core2 and later.
213 static const u64 intel_perfmon_event_map[] =
215 [PERF_COUNT_HW_CPU_CYCLES] = 0x003c,
216 [PERF_COUNT_HW_INSTRUCTIONS] = 0x00c0,
217 [PERF_COUNT_HW_CACHE_REFERENCES] = 0x4f2e,
218 [PERF_COUNT_HW_CACHE_MISSES] = 0x412e,
219 [PERF_COUNT_HW_BRANCH_INSTRUCTIONS] = 0x00c4,
220 [PERF_COUNT_HW_BRANCH_MISSES] = 0x00c5,
221 [PERF_COUNT_HW_BUS_CYCLES] = 0x013c,
224 static struct event_constraint intel_core_event_constraints[] =
226 FIXED_EVENT_CONSTRAINT(0xc0, (0x3|(1ULL<<32))), /* INSTRUCTIONS_RETIRED */
227 FIXED_EVENT_CONSTRAINT(0x3c, (0x3|(1ULL<<33))), /* UNHALTED_CORE_CYCLES */
228 INTEL_EVENT_CONSTRAINT(0x10, 0x1), /* FP_COMP_OPS_EXE */
229 INTEL_EVENT_CONSTRAINT(0x11, 0x2), /* FP_ASSIST */
230 INTEL_EVENT_CONSTRAINT(0x12, 0x2), /* MUL */
231 INTEL_EVENT_CONSTRAINT(0x13, 0x2), /* DIV */
232 INTEL_EVENT_CONSTRAINT(0x14, 0x1), /* CYCLES_DIV_BUSY */
233 INTEL_EVENT_CONSTRAINT(0x18, 0x1), /* IDLE_DURING_DIV */
234 INTEL_EVENT_CONSTRAINT(0x19, 0x2), /* DELAYED_BYPASS */
235 INTEL_EVENT_CONSTRAINT(0xa1, 0x1), /* RS_UOPS_DISPATCH_CYCLES */
236 INTEL_EVENT_CONSTRAINT(0xcb, 0x1), /* MEM_LOAD_RETIRED */
240 static struct event_constraint intel_nehalem_event_constraints[] =
242 FIXED_EVENT_CONSTRAINT(0xc0, (0x3|(1ULL<<32))), /* INSTRUCTIONS_RETIRED */
243 FIXED_EVENT_CONSTRAINT(0x3c, (0x3|(1ULL<<33))), /* UNHALTED_CORE_CYCLES */
244 INTEL_EVENT_CONSTRAINT(0x40, 0x3), /* L1D_CACHE_LD */
245 INTEL_EVENT_CONSTRAINT(0x41, 0x3), /* L1D_CACHE_ST */
246 INTEL_EVENT_CONSTRAINT(0x42, 0x3), /* L1D_CACHE_LOCK */
247 INTEL_EVENT_CONSTRAINT(0x43, 0x3), /* L1D_ALL_REF */
248 INTEL_EVENT_CONSTRAINT(0x4e, 0x3), /* L1D_PREFETCH */
249 INTEL_EVENT_CONSTRAINT(0x4c, 0x3), /* LOAD_HIT_PRE */
250 INTEL_EVENT_CONSTRAINT(0x51, 0x3), /* L1D */
251 INTEL_EVENT_CONSTRAINT(0x52, 0x3), /* L1D_CACHE_PREFETCH_LOCK_FB_HIT */
252 INTEL_EVENT_CONSTRAINT(0x53, 0x3), /* L1D_CACHE_LOCK_FB_HIT */
253 INTEL_EVENT_CONSTRAINT(0xc5, 0x3), /* CACHE_LOCK_CYCLES */
257 static struct event_constraint intel_gen_event_constraints[] =
259 FIXED_EVENT_CONSTRAINT(0xc0, (0x3|(1ULL<<32))), /* INSTRUCTIONS_RETIRED */
260 FIXED_EVENT_CONSTRAINT(0x3c, (0x3|(1ULL<<33))), /* UNHALTED_CORE_CYCLES */
264 static u64 intel_pmu_event_map(int hw_event)
266 return intel_perfmon_event_map[hw_event];
270 * Generalized hw caching related hw_event table, filled
271 * in on a per model basis. A value of 0 means
272 * 'not supported', -1 means 'hw_event makes no sense on
273 * this CPU', any other value means the raw hw_event
277 #define C(x) PERF_COUNT_HW_CACHE_##x
279 static u64 __read_mostly hw_cache_event_ids
280 [PERF_COUNT_HW_CACHE_MAX]
281 [PERF_COUNT_HW_CACHE_OP_MAX]
282 [PERF_COUNT_HW_CACHE_RESULT_MAX];
284 static __initconst u64 nehalem_hw_cache_event_ids
285 [PERF_COUNT_HW_CACHE_MAX]
286 [PERF_COUNT_HW_CACHE_OP_MAX]
287 [PERF_COUNT_HW_CACHE_RESULT_MAX] =
291 [ C(RESULT_ACCESS) ] = 0x0f40, /* L1D_CACHE_LD.MESI */
292 [ C(RESULT_MISS) ] = 0x0140, /* L1D_CACHE_LD.I_STATE */
295 [ C(RESULT_ACCESS) ] = 0x0f41, /* L1D_CACHE_ST.MESI */
296 [ C(RESULT_MISS) ] = 0x0141, /* L1D_CACHE_ST.I_STATE */
298 [ C(OP_PREFETCH) ] = {
299 [ C(RESULT_ACCESS) ] = 0x014e, /* L1D_PREFETCH.REQUESTS */
300 [ C(RESULT_MISS) ] = 0x024e, /* L1D_PREFETCH.MISS */
305 [ C(RESULT_ACCESS) ] = 0x0380, /* L1I.READS */
306 [ C(RESULT_MISS) ] = 0x0280, /* L1I.MISSES */
309 [ C(RESULT_ACCESS) ] = -1,
310 [ C(RESULT_MISS) ] = -1,
312 [ C(OP_PREFETCH) ] = {
313 [ C(RESULT_ACCESS) ] = 0x0,
314 [ C(RESULT_MISS) ] = 0x0,
319 [ C(RESULT_ACCESS) ] = 0x0324, /* L2_RQSTS.LOADS */
320 [ C(RESULT_MISS) ] = 0x0224, /* L2_RQSTS.LD_MISS */
323 [ C(RESULT_ACCESS) ] = 0x0c24, /* L2_RQSTS.RFOS */
324 [ C(RESULT_MISS) ] = 0x0824, /* L2_RQSTS.RFO_MISS */
326 [ C(OP_PREFETCH) ] = {
327 [ C(RESULT_ACCESS) ] = 0x4f2e, /* LLC Reference */
328 [ C(RESULT_MISS) ] = 0x412e, /* LLC Misses */
333 [ C(RESULT_ACCESS) ] = 0x0f40, /* L1D_CACHE_LD.MESI (alias) */
334 [ C(RESULT_MISS) ] = 0x0108, /* DTLB_LOAD_MISSES.ANY */
337 [ C(RESULT_ACCESS) ] = 0x0f41, /* L1D_CACHE_ST.MESI (alias) */
338 [ C(RESULT_MISS) ] = 0x010c, /* MEM_STORE_RETIRED.DTLB_MISS */
340 [ C(OP_PREFETCH) ] = {
341 [ C(RESULT_ACCESS) ] = 0x0,
342 [ C(RESULT_MISS) ] = 0x0,
347 [ C(RESULT_ACCESS) ] = 0x01c0, /* INST_RETIRED.ANY_P */
348 [ C(RESULT_MISS) ] = 0x20c8, /* ITLB_MISS_RETIRED */
351 [ C(RESULT_ACCESS) ] = -1,
352 [ C(RESULT_MISS) ] = -1,
354 [ C(OP_PREFETCH) ] = {
355 [ C(RESULT_ACCESS) ] = -1,
356 [ C(RESULT_MISS) ] = -1,
361 [ C(RESULT_ACCESS) ] = 0x00c4, /* BR_INST_RETIRED.ALL_BRANCHES */
362 [ C(RESULT_MISS) ] = 0x03e8, /* BPU_CLEARS.ANY */
365 [ C(RESULT_ACCESS) ] = -1,
366 [ C(RESULT_MISS) ] = -1,
368 [ C(OP_PREFETCH) ] = {
369 [ C(RESULT_ACCESS) ] = -1,
370 [ C(RESULT_MISS) ] = -1,
375 static __initconst u64 core2_hw_cache_event_ids
376 [PERF_COUNT_HW_CACHE_MAX]
377 [PERF_COUNT_HW_CACHE_OP_MAX]
378 [PERF_COUNT_HW_CACHE_RESULT_MAX] =
382 [ C(RESULT_ACCESS) ] = 0x0f40, /* L1D_CACHE_LD.MESI */
383 [ C(RESULT_MISS) ] = 0x0140, /* L1D_CACHE_LD.I_STATE */
386 [ C(RESULT_ACCESS) ] = 0x0f41, /* L1D_CACHE_ST.MESI */
387 [ C(RESULT_MISS) ] = 0x0141, /* L1D_CACHE_ST.I_STATE */
389 [ C(OP_PREFETCH) ] = {
390 [ C(RESULT_ACCESS) ] = 0x104e, /* L1D_PREFETCH.REQUESTS */
391 [ C(RESULT_MISS) ] = 0,
396 [ C(RESULT_ACCESS) ] = 0x0080, /* L1I.READS */
397 [ C(RESULT_MISS) ] = 0x0081, /* L1I.MISSES */
400 [ C(RESULT_ACCESS) ] = -1,
401 [ C(RESULT_MISS) ] = -1,
403 [ C(OP_PREFETCH) ] = {
404 [ C(RESULT_ACCESS) ] = 0,
405 [ C(RESULT_MISS) ] = 0,
410 [ C(RESULT_ACCESS) ] = 0x4f29, /* L2_LD.MESI */
411 [ C(RESULT_MISS) ] = 0x4129, /* L2_LD.ISTATE */
414 [ C(RESULT_ACCESS) ] = 0x4f2A, /* L2_ST.MESI */
415 [ C(RESULT_MISS) ] = 0x412A, /* L2_ST.ISTATE */
417 [ C(OP_PREFETCH) ] = {
418 [ C(RESULT_ACCESS) ] = 0,
419 [ C(RESULT_MISS) ] = 0,
424 [ C(RESULT_ACCESS) ] = 0x0f40, /* L1D_CACHE_LD.MESI (alias) */
425 [ C(RESULT_MISS) ] = 0x0208, /* DTLB_MISSES.MISS_LD */
428 [ C(RESULT_ACCESS) ] = 0x0f41, /* L1D_CACHE_ST.MESI (alias) */
429 [ C(RESULT_MISS) ] = 0x0808, /* DTLB_MISSES.MISS_ST */
431 [ C(OP_PREFETCH) ] = {
432 [ C(RESULT_ACCESS) ] = 0,
433 [ C(RESULT_MISS) ] = 0,
438 [ C(RESULT_ACCESS) ] = 0x00c0, /* INST_RETIRED.ANY_P */
439 [ C(RESULT_MISS) ] = 0x1282, /* ITLBMISSES */
442 [ C(RESULT_ACCESS) ] = -1,
443 [ C(RESULT_MISS) ] = -1,
445 [ C(OP_PREFETCH) ] = {
446 [ C(RESULT_ACCESS) ] = -1,
447 [ C(RESULT_MISS) ] = -1,
452 [ C(RESULT_ACCESS) ] = 0x00c4, /* BR_INST_RETIRED.ANY */
453 [ C(RESULT_MISS) ] = 0x00c5, /* BP_INST_RETIRED.MISPRED */
456 [ C(RESULT_ACCESS) ] = -1,
457 [ C(RESULT_MISS) ] = -1,
459 [ C(OP_PREFETCH) ] = {
460 [ C(RESULT_ACCESS) ] = -1,
461 [ C(RESULT_MISS) ] = -1,
466 static __initconst u64 atom_hw_cache_event_ids
467 [PERF_COUNT_HW_CACHE_MAX]
468 [PERF_COUNT_HW_CACHE_OP_MAX]
469 [PERF_COUNT_HW_CACHE_RESULT_MAX] =
473 [ C(RESULT_ACCESS) ] = 0x2140, /* L1D_CACHE.LD */
474 [ C(RESULT_MISS) ] = 0,
477 [ C(RESULT_ACCESS) ] = 0x2240, /* L1D_CACHE.ST */
478 [ C(RESULT_MISS) ] = 0,
480 [ C(OP_PREFETCH) ] = {
481 [ C(RESULT_ACCESS) ] = 0x0,
482 [ C(RESULT_MISS) ] = 0,
487 [ C(RESULT_ACCESS) ] = 0x0380, /* L1I.READS */
488 [ C(RESULT_MISS) ] = 0x0280, /* L1I.MISSES */
491 [ C(RESULT_ACCESS) ] = -1,
492 [ C(RESULT_MISS) ] = -1,
494 [ C(OP_PREFETCH) ] = {
495 [ C(RESULT_ACCESS) ] = 0,
496 [ C(RESULT_MISS) ] = 0,
501 [ C(RESULT_ACCESS) ] = 0x4f29, /* L2_LD.MESI */
502 [ C(RESULT_MISS) ] = 0x4129, /* L2_LD.ISTATE */
505 [ C(RESULT_ACCESS) ] = 0x4f2A, /* L2_ST.MESI */
506 [ C(RESULT_MISS) ] = 0x412A, /* L2_ST.ISTATE */
508 [ C(OP_PREFETCH) ] = {
509 [ C(RESULT_ACCESS) ] = 0,
510 [ C(RESULT_MISS) ] = 0,
515 [ C(RESULT_ACCESS) ] = 0x2140, /* L1D_CACHE_LD.MESI (alias) */
516 [ C(RESULT_MISS) ] = 0x0508, /* DTLB_MISSES.MISS_LD */
519 [ C(RESULT_ACCESS) ] = 0x2240, /* L1D_CACHE_ST.MESI (alias) */
520 [ C(RESULT_MISS) ] = 0x0608, /* DTLB_MISSES.MISS_ST */
522 [ C(OP_PREFETCH) ] = {
523 [ C(RESULT_ACCESS) ] = 0,
524 [ C(RESULT_MISS) ] = 0,
529 [ C(RESULT_ACCESS) ] = 0x00c0, /* INST_RETIRED.ANY_P */
530 [ C(RESULT_MISS) ] = 0x0282, /* ITLB.MISSES */
533 [ C(RESULT_ACCESS) ] = -1,
534 [ C(RESULT_MISS) ] = -1,
536 [ C(OP_PREFETCH) ] = {
537 [ C(RESULT_ACCESS) ] = -1,
538 [ C(RESULT_MISS) ] = -1,
543 [ C(RESULT_ACCESS) ] = 0x00c4, /* BR_INST_RETIRED.ANY */
544 [ C(RESULT_MISS) ] = 0x00c5, /* BP_INST_RETIRED.MISPRED */
547 [ C(RESULT_ACCESS) ] = -1,
548 [ C(RESULT_MISS) ] = -1,
550 [ C(OP_PREFETCH) ] = {
551 [ C(RESULT_ACCESS) ] = -1,
552 [ C(RESULT_MISS) ] = -1,
557 static u64 intel_pmu_raw_event(u64 hw_event)
559 #define CORE_EVNTSEL_EVENT_MASK 0x000000FFULL
560 #define CORE_EVNTSEL_UNIT_MASK 0x0000FF00ULL
561 #define CORE_EVNTSEL_EDGE_MASK 0x00040000ULL
562 #define CORE_EVNTSEL_INV_MASK 0x00800000ULL
563 #define CORE_EVNTSEL_REG_MASK 0xFF000000ULL
565 #define CORE_EVNTSEL_MASK \
566 (INTEL_ARCH_EVTSEL_MASK | \
567 INTEL_ARCH_UNIT_MASK | \
568 INTEL_ARCH_EDGE_MASK | \
569 INTEL_ARCH_INV_MASK | \
572 return hw_event & CORE_EVNTSEL_MASK;
575 static __initconst u64 amd_hw_cache_event_ids
576 [PERF_COUNT_HW_CACHE_MAX]
577 [PERF_COUNT_HW_CACHE_OP_MAX]
578 [PERF_COUNT_HW_CACHE_RESULT_MAX] =
582 [ C(RESULT_ACCESS) ] = 0x0040, /* Data Cache Accesses */
583 [ C(RESULT_MISS) ] = 0x0041, /* Data Cache Misses */
586 [ C(RESULT_ACCESS) ] = 0x0142, /* Data Cache Refills :system */
587 [ C(RESULT_MISS) ] = 0,
589 [ C(OP_PREFETCH) ] = {
590 [ C(RESULT_ACCESS) ] = 0x0267, /* Data Prefetcher :attempts */
591 [ C(RESULT_MISS) ] = 0x0167, /* Data Prefetcher :cancelled */
596 [ C(RESULT_ACCESS) ] = 0x0080, /* Instruction cache fetches */
597 [ C(RESULT_MISS) ] = 0x0081, /* Instruction cache misses */
600 [ C(RESULT_ACCESS) ] = -1,
601 [ C(RESULT_MISS) ] = -1,
603 [ C(OP_PREFETCH) ] = {
604 [ C(RESULT_ACCESS) ] = 0x014B, /* Prefetch Instructions :Load */
605 [ C(RESULT_MISS) ] = 0,
610 [ C(RESULT_ACCESS) ] = 0x037D, /* Requests to L2 Cache :IC+DC */
611 [ C(RESULT_MISS) ] = 0x037E, /* L2 Cache Misses : IC+DC */
614 [ C(RESULT_ACCESS) ] = 0x017F, /* L2 Fill/Writeback */
615 [ C(RESULT_MISS) ] = 0,
617 [ C(OP_PREFETCH) ] = {
618 [ C(RESULT_ACCESS) ] = 0,
619 [ C(RESULT_MISS) ] = 0,
624 [ C(RESULT_ACCESS) ] = 0x0040, /* Data Cache Accesses */
625 [ C(RESULT_MISS) ] = 0x0046, /* L1 DTLB and L2 DLTB Miss */
628 [ C(RESULT_ACCESS) ] = 0,
629 [ C(RESULT_MISS) ] = 0,
631 [ C(OP_PREFETCH) ] = {
632 [ C(RESULT_ACCESS) ] = 0,
633 [ C(RESULT_MISS) ] = 0,
638 [ C(RESULT_ACCESS) ] = 0x0080, /* Instruction fecthes */
639 [ C(RESULT_MISS) ] = 0x0085, /* Instr. fetch ITLB misses */
642 [ C(RESULT_ACCESS) ] = -1,
643 [ C(RESULT_MISS) ] = -1,
645 [ C(OP_PREFETCH) ] = {
646 [ C(RESULT_ACCESS) ] = -1,
647 [ C(RESULT_MISS) ] = -1,
652 [ C(RESULT_ACCESS) ] = 0x00c2, /* Retired Branch Instr. */
653 [ C(RESULT_MISS) ] = 0x00c3, /* Retired Mispredicted BI */
656 [ C(RESULT_ACCESS) ] = -1,
657 [ C(RESULT_MISS) ] = -1,
659 [ C(OP_PREFETCH) ] = {
660 [ C(RESULT_ACCESS) ] = -1,
661 [ C(RESULT_MISS) ] = -1,
667 * AMD Performance Monitor K7 and later.
669 static const u64 amd_perfmon_event_map[] =
671 [PERF_COUNT_HW_CPU_CYCLES] = 0x0076,
672 [PERF_COUNT_HW_INSTRUCTIONS] = 0x00c0,
673 [PERF_COUNT_HW_CACHE_REFERENCES] = 0x0080,
674 [PERF_COUNT_HW_CACHE_MISSES] = 0x0081,
675 [PERF_COUNT_HW_BRANCH_INSTRUCTIONS] = 0x00c4,
676 [PERF_COUNT_HW_BRANCH_MISSES] = 0x00c5,
679 static u64 amd_pmu_event_map(int hw_event)
681 return amd_perfmon_event_map[hw_event];
684 static u64 amd_pmu_raw_event(u64 hw_event)
686 #define K7_EVNTSEL_EVENT_MASK 0x7000000FFULL
687 #define K7_EVNTSEL_UNIT_MASK 0x00000FF00ULL
688 #define K7_EVNTSEL_EDGE_MASK 0x000040000ULL
689 #define K7_EVNTSEL_INV_MASK 0x000800000ULL
690 #define K7_EVNTSEL_REG_MASK 0x0FF000000ULL
692 #define K7_EVNTSEL_MASK \
693 (K7_EVNTSEL_EVENT_MASK | \
694 K7_EVNTSEL_UNIT_MASK | \
695 K7_EVNTSEL_EDGE_MASK | \
696 K7_EVNTSEL_INV_MASK | \
699 return hw_event & K7_EVNTSEL_MASK;
703 * Propagate event elapsed time into the generic event.
704 * Can only be executed on the CPU where the event is active.
705 * Returns the delta events processed.
708 x86_perf_event_update(struct perf_event *event,
709 struct hw_perf_event *hwc, int idx)
711 int shift = 64 - x86_pmu.event_bits;
712 u64 prev_raw_count, new_raw_count;
715 if (idx == X86_PMC_IDX_FIXED_BTS)
719 * Careful: an NMI might modify the previous event value.
721 * Our tactic to handle this is to first atomically read and
722 * exchange a new raw count - then add that new-prev delta
723 * count to the generic event atomically:
726 prev_raw_count = atomic64_read(&hwc->prev_count);
727 rdmsrl(hwc->event_base + idx, new_raw_count);
729 if (atomic64_cmpxchg(&hwc->prev_count, prev_raw_count,
730 new_raw_count) != prev_raw_count)
734 * Now we have the new raw value and have updated the prev
735 * timestamp already. We can now calculate the elapsed delta
736 * (event-)time and add that to the generic event.
738 * Careful, not all hw sign-extends above the physical width
741 delta = (new_raw_count << shift) - (prev_raw_count << shift);
744 atomic64_add(delta, &event->count);
745 atomic64_sub(delta, &hwc->period_left);
747 return new_raw_count;
750 static atomic_t active_events;
751 static DEFINE_MUTEX(pmc_reserve_mutex);
753 static bool reserve_pmc_hardware(void)
755 #ifdef CONFIG_X86_LOCAL_APIC
758 if (nmi_watchdog == NMI_LOCAL_APIC)
759 disable_lapic_nmi_watchdog();
761 for (i = 0; i < x86_pmu.num_events; i++) {
762 if (!reserve_perfctr_nmi(x86_pmu.perfctr + i))
766 for (i = 0; i < x86_pmu.num_events; i++) {
767 if (!reserve_evntsel_nmi(x86_pmu.eventsel + i))
774 #ifdef CONFIG_X86_LOCAL_APIC
776 for (i--; i >= 0; i--)
777 release_evntsel_nmi(x86_pmu.eventsel + i);
779 i = x86_pmu.num_events;
782 for (i--; i >= 0; i--)
783 release_perfctr_nmi(x86_pmu.perfctr + i);
785 if (nmi_watchdog == NMI_LOCAL_APIC)
786 enable_lapic_nmi_watchdog();
792 static void release_pmc_hardware(void)
794 #ifdef CONFIG_X86_LOCAL_APIC
797 for (i = 0; i < x86_pmu.num_events; i++) {
798 release_perfctr_nmi(x86_pmu.perfctr + i);
799 release_evntsel_nmi(x86_pmu.eventsel + i);
802 if (nmi_watchdog == NMI_LOCAL_APIC)
803 enable_lapic_nmi_watchdog();
807 static inline bool bts_available(void)
809 return x86_pmu.enable_bts != NULL;
812 static inline void init_debug_store_on_cpu(int cpu)
814 struct debug_store *ds = per_cpu(cpu_hw_events, cpu).ds;
819 wrmsr_on_cpu(cpu, MSR_IA32_DS_AREA,
820 (u32)((u64)(unsigned long)ds),
821 (u32)((u64)(unsigned long)ds >> 32));
824 static inline void fini_debug_store_on_cpu(int cpu)
826 if (!per_cpu(cpu_hw_events, cpu).ds)
829 wrmsr_on_cpu(cpu, MSR_IA32_DS_AREA, 0, 0);
832 static void release_bts_hardware(void)
836 if (!bts_available())
841 for_each_online_cpu(cpu)
842 fini_debug_store_on_cpu(cpu);
844 for_each_possible_cpu(cpu) {
845 struct debug_store *ds = per_cpu(cpu_hw_events, cpu).ds;
850 per_cpu(cpu_hw_events, cpu).ds = NULL;
852 kfree((void *)(unsigned long)ds->bts_buffer_base);
859 static int reserve_bts_hardware(void)
863 if (!bts_available())
868 for_each_possible_cpu(cpu) {
869 struct debug_store *ds;
873 buffer = kzalloc(BTS_BUFFER_SIZE, GFP_KERNEL);
874 if (unlikely(!buffer))
877 ds = kzalloc(sizeof(*ds), GFP_KERNEL);
883 ds->bts_buffer_base = (u64)(unsigned long)buffer;
884 ds->bts_index = ds->bts_buffer_base;
885 ds->bts_absolute_maximum =
886 ds->bts_buffer_base + BTS_BUFFER_SIZE;
887 ds->bts_interrupt_threshold =
888 ds->bts_absolute_maximum - BTS_OVFL_TH;
890 per_cpu(cpu_hw_events, cpu).ds = ds;
895 release_bts_hardware();
897 for_each_online_cpu(cpu)
898 init_debug_store_on_cpu(cpu);
906 static void hw_perf_event_destroy(struct perf_event *event)
908 if (atomic_dec_and_mutex_lock(&active_events, &pmc_reserve_mutex)) {
909 release_pmc_hardware();
910 release_bts_hardware();
911 mutex_unlock(&pmc_reserve_mutex);
915 static inline int x86_pmu_initialized(void)
917 return x86_pmu.handle_irq != NULL;
921 set_ext_hw_attr(struct hw_perf_event *hwc, struct perf_event_attr *attr)
923 unsigned int cache_type, cache_op, cache_result;
926 config = attr->config;
928 cache_type = (config >> 0) & 0xff;
929 if (cache_type >= PERF_COUNT_HW_CACHE_MAX)
932 cache_op = (config >> 8) & 0xff;
933 if (cache_op >= PERF_COUNT_HW_CACHE_OP_MAX)
936 cache_result = (config >> 16) & 0xff;
937 if (cache_result >= PERF_COUNT_HW_CACHE_RESULT_MAX)
940 val = hw_cache_event_ids[cache_type][cache_op][cache_result];
953 static void intel_pmu_enable_bts(u64 config)
955 unsigned long debugctlmsr;
957 debugctlmsr = get_debugctlmsr();
959 debugctlmsr |= X86_DEBUGCTL_TR;
960 debugctlmsr |= X86_DEBUGCTL_BTS;
961 debugctlmsr |= X86_DEBUGCTL_BTINT;
963 if (!(config & ARCH_PERFMON_EVENTSEL_OS))
964 debugctlmsr |= X86_DEBUGCTL_BTS_OFF_OS;
966 if (!(config & ARCH_PERFMON_EVENTSEL_USR))
967 debugctlmsr |= X86_DEBUGCTL_BTS_OFF_USR;
969 update_debugctlmsr(debugctlmsr);
972 static void intel_pmu_disable_bts(void)
974 struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events);
975 unsigned long debugctlmsr;
980 debugctlmsr = get_debugctlmsr();
983 ~(X86_DEBUGCTL_TR | X86_DEBUGCTL_BTS | X86_DEBUGCTL_BTINT |
984 X86_DEBUGCTL_BTS_OFF_OS | X86_DEBUGCTL_BTS_OFF_USR);
986 update_debugctlmsr(debugctlmsr);
990 * Setup the hardware configuration for a given attr_type
992 static int __hw_perf_event_init(struct perf_event *event)
994 struct perf_event_attr *attr = &event->attr;
995 struct hw_perf_event *hwc = &event->hw;
999 if (!x86_pmu_initialized())
1003 if (!atomic_inc_not_zero(&active_events)) {
1004 mutex_lock(&pmc_reserve_mutex);
1005 if (atomic_read(&active_events) == 0) {
1006 if (!reserve_pmc_hardware())
1009 err = reserve_bts_hardware();
1012 atomic_inc(&active_events);
1013 mutex_unlock(&pmc_reserve_mutex);
1018 event->destroy = hw_perf_event_destroy;
1021 * Generate PMC IRQs:
1022 * (keep 'enabled' bit clear for now)
1024 hwc->config = ARCH_PERFMON_EVENTSEL_INT;
1029 * Count user and OS events unless requested not to.
1031 if (!attr->exclude_user)
1032 hwc->config |= ARCH_PERFMON_EVENTSEL_USR;
1033 if (!attr->exclude_kernel)
1034 hwc->config |= ARCH_PERFMON_EVENTSEL_OS;
1036 if (!hwc->sample_period) {
1037 hwc->sample_period = x86_pmu.max_period;
1038 hwc->last_period = hwc->sample_period;
1039 atomic64_set(&hwc->period_left, hwc->sample_period);
1042 * If we have a PMU initialized but no APIC
1043 * interrupts, we cannot sample hardware
1044 * events (user-space has to fall back and
1045 * sample via a hrtimer based software event):
1052 * Raw hw_event type provide the config in the hw_event structure
1054 if (attr->type == PERF_TYPE_RAW) {
1055 hwc->config |= x86_pmu.raw_event(attr->config);
1059 if (attr->type == PERF_TYPE_HW_CACHE)
1060 return set_ext_hw_attr(hwc, attr);
1062 if (attr->config >= x86_pmu.max_events)
1068 config = x86_pmu.event_map(attr->config);
1079 if ((attr->config == PERF_COUNT_HW_BRANCH_INSTRUCTIONS) &&
1080 (hwc->sample_period == 1)) {
1081 /* BTS is not supported by this architecture. */
1082 if (!bts_available())
1085 /* BTS is currently only allowed for user-mode. */
1086 if (hwc->config & ARCH_PERFMON_EVENTSEL_OS)
1090 hwc->config |= config;
1095 static void p6_pmu_disable_all(void)
1097 struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events);
1106 /* p6 only has one enable register */
1107 rdmsrl(MSR_P6_EVNTSEL0, val);
1108 val &= ~ARCH_PERFMON_EVENTSEL0_ENABLE;
1109 wrmsrl(MSR_P6_EVNTSEL0, val);
1112 static void intel_pmu_disable_all(void)
1114 struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events);
1122 wrmsrl(MSR_CORE_PERF_GLOBAL_CTRL, 0);
1124 if (test_bit(X86_PMC_IDX_FIXED_BTS, cpuc->active_mask))
1125 intel_pmu_disable_bts();
1128 static void amd_pmu_disable_all(void)
1130 struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events);
1138 * ensure we write the disable before we start disabling the
1139 * events proper, so that amd_pmu_enable_event() does the
1144 for (idx = 0; idx < x86_pmu.num_events; idx++) {
1147 if (!test_bit(idx, cpuc->active_mask))
1149 rdmsrl(MSR_K7_EVNTSEL0 + idx, val);
1150 if (!(val & ARCH_PERFMON_EVENTSEL0_ENABLE))
1152 val &= ~ARCH_PERFMON_EVENTSEL0_ENABLE;
1153 wrmsrl(MSR_K7_EVNTSEL0 + idx, val);
1157 void hw_perf_disable(void)
1159 struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events);
1161 if (!x86_pmu_initialized())
1167 x86_pmu.disable_all();
1170 static void p6_pmu_enable_all(void)
1172 struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events);
1181 /* p6 only has one enable register */
1182 rdmsrl(MSR_P6_EVNTSEL0, val);
1183 val |= ARCH_PERFMON_EVENTSEL0_ENABLE;
1184 wrmsrl(MSR_P6_EVNTSEL0, val);
1187 static void intel_pmu_enable_all(void)
1189 struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events);
1197 wrmsrl(MSR_CORE_PERF_GLOBAL_CTRL, x86_pmu.intel_ctrl);
1199 if (test_bit(X86_PMC_IDX_FIXED_BTS, cpuc->active_mask)) {
1200 struct perf_event *event =
1201 cpuc->events[X86_PMC_IDX_FIXED_BTS];
1203 if (WARN_ON_ONCE(!event))
1206 intel_pmu_enable_bts(event->hw.config);
1210 static void amd_pmu_enable_all(void)
1212 struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events);
1221 for (idx = 0; idx < x86_pmu.num_events; idx++) {
1222 struct perf_event *event = cpuc->events[idx];
1225 if (!test_bit(idx, cpuc->active_mask))
1228 val = event->hw.config;
1229 val |= ARCH_PERFMON_EVENTSEL0_ENABLE;
1230 wrmsrl(MSR_K7_EVNTSEL0 + idx, val);
1234 static const struct pmu pmu;
1236 static inline int is_x86_event(struct perf_event *event)
1238 return event->pmu == &pmu;
1241 static int x86_schedule_events(struct cpu_hw_events *cpuc, int n, int *assign)
1246 unsigned long constraints[X86_PMC_IDX_MAX][BITS_TO_LONGS(X86_PMC_IDX_MAX)];
1247 unsigned long used_mask[BITS_TO_LONGS(X86_PMC_IDX_MAX)];
1248 struct hw_perf_event *hwc;
1250 bitmap_zero(used_mask, X86_PMC_IDX_MAX);
1252 for (i = 0; i < n; i++) {
1253 x86_pmu.get_event_constraints(cpuc,
1254 cpuc->event_list[i],
1259 * fastpath, try to reuse previous register
1261 for (i = 0, num = n; i < n; i++, num--) {
1262 hwc = &cpuc->event_list[i]->hw;
1265 /* never assigned */
1269 /* constraint still honored */
1270 if (!test_bit(hwc->idx, c))
1273 /* not already used */
1274 if (test_bit(hwc->idx, used_mask))
1278 pr_debug("CPU%d fast config=0x%llx idx=%d assign=%c\n",
1282 assign ? 'y' : 'n');
1285 set_bit(hwc->idx, used_mask);
1287 assign[i] = hwc->idx;
1296 bitmap_zero(used_mask, X86_PMC_IDX_MAX);
1299 * weight = number of possible counters
1301 * 1 = most constrained, only works on one counter
1302 * wmax = least constrained, works on any counter
1304 * assign events to counters starting with most
1305 * constrained events.
1307 wmax = x86_pmu.num_events;
1310 * when fixed event counters are present,
1311 * wmax is incremented by 1 to account
1312 * for one more choice
1314 if (x86_pmu.num_events_fixed)
1317 for (w = 1, num = n; num && w <= wmax; w++) {
1318 /* for each event */
1319 for (i = 0; num && i < n; i++) {
1321 hwc = &cpuc->event_list[i]->hw;
1323 weight = bitmap_weight(c, X86_PMC_IDX_MAX);
1327 for_each_bit(j, c, X86_PMC_IDX_MAX) {
1328 if (!test_bit(j, used_mask))
1332 if (j == X86_PMC_IDX_MAX)
1336 pr_debug("CPU%d slow config=0x%llx idx=%d assign=%c\n",
1340 assign ? 'y' : 'n');
1343 set_bit(j, used_mask);
1352 * scheduling failed or is just a simulation,
1353 * free resources if necessary
1355 if (!assign || num) {
1356 for (i = 0; i < n; i++) {
1357 if (x86_pmu.put_event_constraints)
1358 x86_pmu.put_event_constraints(cpuc, cpuc->event_list[i]);
1361 return num ? -ENOSPC : 0;
1365 * dogrp: true if must collect siblings events (group)
1366 * returns total number of events and error code
1368 static int collect_events(struct cpu_hw_events *cpuc, struct perf_event *leader, bool dogrp)
1370 struct perf_event *event;
1373 max_count = x86_pmu.num_events + x86_pmu.num_events_fixed;
1375 /* current number of events already accepted */
1378 if (is_x86_event(leader)) {
1381 cpuc->event_list[n] = leader;
1387 list_for_each_entry(event, &leader->sibling_list, group_entry) {
1388 if (!is_x86_event(event) ||
1389 event->state <= PERF_EVENT_STATE_OFF)
1395 cpuc->event_list[n] = event;
1402 static inline void x86_assign_hw_event(struct perf_event *event,
1403 struct hw_perf_event *hwc, int idx)
1407 if (hwc->idx == X86_PMC_IDX_FIXED_BTS) {
1408 hwc->config_base = 0;
1409 hwc->event_base = 0;
1410 } else if (hwc->idx >= X86_PMC_IDX_FIXED) {
1411 hwc->config_base = MSR_ARCH_PERFMON_FIXED_CTR_CTRL;
1413 * We set it so that event_base + idx in wrmsr/rdmsr maps to
1414 * MSR_ARCH_PERFMON_FIXED_CTR0 ... CTR2:
1417 MSR_ARCH_PERFMON_FIXED_CTR0 - X86_PMC_IDX_FIXED;
1419 hwc->config_base = x86_pmu.eventsel;
1420 hwc->event_base = x86_pmu.perfctr;
1424 void hw_perf_enable(void)
1426 struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events);
1427 struct perf_event *event;
1428 struct hw_perf_event *hwc;
1431 if (!x86_pmu_initialized())
1433 if (cpuc->n_added) {
1435 * apply assignment obtained either from
1436 * hw_perf_group_sched_in() or x86_pmu_enable()
1438 * step1: save events moving to new counters
1439 * step2: reprogram moved events into new counters
1441 for (i = 0; i < cpuc->n_events; i++) {
1443 event = cpuc->event_list[i];
1446 if (hwc->idx == -1 || hwc->idx == cpuc->assign[i])
1449 x86_pmu.disable(hwc, hwc->idx);
1451 clear_bit(hwc->idx, cpuc->active_mask);
1453 cpuc->events[hwc->idx] = NULL;
1455 x86_perf_event_update(event, hwc, hwc->idx);
1460 for (i = 0; i < cpuc->n_events; i++) {
1462 event = cpuc->event_list[i];
1465 if (hwc->idx == -1) {
1466 x86_assign_hw_event(event, hwc, cpuc->assign[i]);
1467 x86_perf_event_set_period(event, hwc, hwc->idx);
1470 * need to mark as active because x86_pmu_disable()
1471 * clear active_mask and eventsp[] yet it preserves
1474 set_bit(hwc->idx, cpuc->active_mask);
1475 cpuc->events[hwc->idx] = event;
1477 x86_pmu.enable(hwc, hwc->idx);
1478 perf_event_update_userpage(event);
1481 perf_events_lapic_init();
1483 x86_pmu.enable_all();
1486 static inline u64 intel_pmu_get_status(void)
1490 rdmsrl(MSR_CORE_PERF_GLOBAL_STATUS, status);
1495 static inline void intel_pmu_ack_status(u64 ack)
1497 wrmsrl(MSR_CORE_PERF_GLOBAL_OVF_CTRL, ack);
1500 static inline void x86_pmu_enable_event(struct hw_perf_event *hwc, int idx)
1502 (void)checking_wrmsrl(hwc->config_base + idx,
1503 hwc->config | ARCH_PERFMON_EVENTSEL0_ENABLE);
1506 static inline void x86_pmu_disable_event(struct hw_perf_event *hwc, int idx)
1508 (void)checking_wrmsrl(hwc->config_base + idx, hwc->config);
1512 intel_pmu_disable_fixed(struct hw_perf_event *hwc, int __idx)
1514 int idx = __idx - X86_PMC_IDX_FIXED;
1517 mask = 0xfULL << (idx * 4);
1519 rdmsrl(hwc->config_base, ctrl_val);
1521 (void)checking_wrmsrl(hwc->config_base, ctrl_val);
1525 p6_pmu_disable_event(struct hw_perf_event *hwc, int idx)
1527 struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events);
1528 u64 val = P6_NOP_EVENT;
1531 val |= ARCH_PERFMON_EVENTSEL0_ENABLE;
1533 (void)checking_wrmsrl(hwc->config_base + idx, val);
1537 intel_pmu_disable_event(struct hw_perf_event *hwc, int idx)
1539 if (unlikely(idx == X86_PMC_IDX_FIXED_BTS)) {
1540 intel_pmu_disable_bts();
1544 if (unlikely(hwc->config_base == MSR_ARCH_PERFMON_FIXED_CTR_CTRL)) {
1545 intel_pmu_disable_fixed(hwc, idx);
1549 x86_pmu_disable_event(hwc, idx);
1553 amd_pmu_disable_event(struct hw_perf_event *hwc, int idx)
1555 x86_pmu_disable_event(hwc, idx);
1558 static DEFINE_PER_CPU(u64 [X86_PMC_IDX_MAX], pmc_prev_left);
1561 * Set the next IRQ period, based on the hwc->period_left value.
1562 * To be called with the event disabled in hw:
1565 x86_perf_event_set_period(struct perf_event *event,
1566 struct hw_perf_event *hwc, int idx)
1568 s64 left = atomic64_read(&hwc->period_left);
1569 s64 period = hwc->sample_period;
1572 if (idx == X86_PMC_IDX_FIXED_BTS)
1576 * If we are way outside a reasonable range then just skip forward:
1578 if (unlikely(left <= -period)) {
1580 atomic64_set(&hwc->period_left, left);
1581 hwc->last_period = period;
1585 if (unlikely(left <= 0)) {
1587 atomic64_set(&hwc->period_left, left);
1588 hwc->last_period = period;
1592 * Quirk: certain CPUs dont like it if just 1 hw_event is left:
1594 if (unlikely(left < 2))
1597 if (left > x86_pmu.max_period)
1598 left = x86_pmu.max_period;
1600 per_cpu(pmc_prev_left[idx], smp_processor_id()) = left;
1603 * The hw event starts counting from this event offset,
1604 * mark it to be able to extra future deltas:
1606 atomic64_set(&hwc->prev_count, (u64)-left);
1608 err = checking_wrmsrl(hwc->event_base + idx,
1609 (u64)(-left) & x86_pmu.event_mask);
1611 perf_event_update_userpage(event);
1617 intel_pmu_enable_fixed(struct hw_perf_event *hwc, int __idx)
1619 int idx = __idx - X86_PMC_IDX_FIXED;
1620 u64 ctrl_val, bits, mask;
1624 * Enable IRQ generation (0x8),
1625 * and enable ring-3 counting (0x2) and ring-0 counting (0x1)
1629 if (hwc->config & ARCH_PERFMON_EVENTSEL_USR)
1631 if (hwc->config & ARCH_PERFMON_EVENTSEL_OS)
1634 mask = 0xfULL << (idx * 4);
1636 rdmsrl(hwc->config_base, ctrl_val);
1639 err = checking_wrmsrl(hwc->config_base, ctrl_val);
1642 static void p6_pmu_enable_event(struct hw_perf_event *hwc, int idx)
1644 struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events);
1649 val |= ARCH_PERFMON_EVENTSEL0_ENABLE;
1651 (void)checking_wrmsrl(hwc->config_base + idx, val);
1655 static void intel_pmu_enable_event(struct hw_perf_event *hwc, int idx)
1657 if (unlikely(idx == X86_PMC_IDX_FIXED_BTS)) {
1658 if (!__get_cpu_var(cpu_hw_events).enabled)
1661 intel_pmu_enable_bts(hwc->config);
1665 if (unlikely(hwc->config_base == MSR_ARCH_PERFMON_FIXED_CTR_CTRL)) {
1666 intel_pmu_enable_fixed(hwc, idx);
1670 x86_pmu_enable_event(hwc, idx);
1673 static void amd_pmu_enable_event(struct hw_perf_event *hwc, int idx)
1675 struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events);
1678 x86_pmu_enable_event(hwc, idx);
1682 * activate a single event
1684 * The event is added to the group of enabled events
1685 * but only if it can be scehduled with existing events.
1687 * Called with PMU disabled. If successful and return value 1,
1688 * then guaranteed to call perf_enable() and hw_perf_enable()
1690 static int x86_pmu_enable(struct perf_event *event)
1692 struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events);
1693 struct hw_perf_event *hwc;
1694 int assign[X86_PMC_IDX_MAX];
1699 n0 = cpuc->n_events;
1700 n = collect_events(cpuc, event, false);
1704 ret = x86_schedule_events(cpuc, n, assign);
1708 * copy new assignment, now we know it is possible
1709 * will be used by hw_perf_enable()
1711 memcpy(cpuc->assign, assign, n*sizeof(int));
1714 cpuc->n_added = n - n0;
1717 x86_perf_event_set_period(event, hwc, hwc->idx);
1722 static void x86_pmu_unthrottle(struct perf_event *event)
1724 struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events);
1725 struct hw_perf_event *hwc = &event->hw;
1727 if (WARN_ON_ONCE(hwc->idx >= X86_PMC_IDX_MAX ||
1728 cpuc->events[hwc->idx] != event))
1731 x86_pmu.enable(hwc, hwc->idx);
1734 void perf_event_print_debug(void)
1736 u64 ctrl, status, overflow, pmc_ctrl, pmc_count, prev_left, fixed;
1737 struct cpu_hw_events *cpuc;
1738 unsigned long flags;
1741 if (!x86_pmu.num_events)
1744 local_irq_save(flags);
1746 cpu = smp_processor_id();
1747 cpuc = &per_cpu(cpu_hw_events, cpu);
1749 if (x86_pmu.version >= 2) {
1750 rdmsrl(MSR_CORE_PERF_GLOBAL_CTRL, ctrl);
1751 rdmsrl(MSR_CORE_PERF_GLOBAL_STATUS, status);
1752 rdmsrl(MSR_CORE_PERF_GLOBAL_OVF_CTRL, overflow);
1753 rdmsrl(MSR_ARCH_PERFMON_FIXED_CTR_CTRL, fixed);
1756 pr_info("CPU#%d: ctrl: %016llx\n", cpu, ctrl);
1757 pr_info("CPU#%d: status: %016llx\n", cpu, status);
1758 pr_info("CPU#%d: overflow: %016llx\n", cpu, overflow);
1759 pr_info("CPU#%d: fixed: %016llx\n", cpu, fixed);
1761 pr_info("CPU#%d: active: %016llx\n", cpu, *(u64 *)cpuc->active_mask);
1763 for (idx = 0; idx < x86_pmu.num_events; idx++) {
1764 rdmsrl(x86_pmu.eventsel + idx, pmc_ctrl);
1765 rdmsrl(x86_pmu.perfctr + idx, pmc_count);
1767 prev_left = per_cpu(pmc_prev_left[idx], cpu);
1769 pr_info("CPU#%d: gen-PMC%d ctrl: %016llx\n",
1770 cpu, idx, pmc_ctrl);
1771 pr_info("CPU#%d: gen-PMC%d count: %016llx\n",
1772 cpu, idx, pmc_count);
1773 pr_info("CPU#%d: gen-PMC%d left: %016llx\n",
1774 cpu, idx, prev_left);
1776 for (idx = 0; idx < x86_pmu.num_events_fixed; idx++) {
1777 rdmsrl(MSR_ARCH_PERFMON_FIXED_CTR0 + idx, pmc_count);
1779 pr_info("CPU#%d: fixed-PMC%d count: %016llx\n",
1780 cpu, idx, pmc_count);
1782 local_irq_restore(flags);
1785 static void intel_pmu_drain_bts_buffer(struct cpu_hw_events *cpuc)
1787 struct debug_store *ds = cpuc->ds;
1793 struct perf_event *event = cpuc->events[X86_PMC_IDX_FIXED_BTS];
1794 struct bts_record *at, *top;
1795 struct perf_output_handle handle;
1796 struct perf_event_header header;
1797 struct perf_sample_data data;
1798 struct pt_regs regs;
1806 at = (struct bts_record *)(unsigned long)ds->bts_buffer_base;
1807 top = (struct bts_record *)(unsigned long)ds->bts_index;
1812 ds->bts_index = ds->bts_buffer_base;
1815 data.period = event->hw.last_period;
1821 * Prepare a generic sample, i.e. fill in the invariant fields.
1822 * We will overwrite the from and to address before we output
1825 perf_prepare_sample(&header, &data, event, ®s);
1827 if (perf_output_begin(&handle, event,
1828 header.size * (top - at), 1, 1))
1831 for (; at < top; at++) {
1835 perf_output_sample(&handle, &header, &data, event);
1838 perf_output_end(&handle);
1840 /* There's new data available. */
1841 event->hw.interrupts++;
1842 event->pending_kill = POLL_IN;
1845 static void x86_pmu_disable(struct perf_event *event)
1847 struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events);
1848 struct hw_perf_event *hwc = &event->hw;
1849 int i, idx = hwc->idx;
1852 * Must be done before we disable, otherwise the nmi handler
1853 * could reenable again:
1855 clear_bit(idx, cpuc->active_mask);
1856 x86_pmu.disable(hwc, idx);
1859 * Make sure the cleared pointer becomes visible before we
1860 * (potentially) free the event:
1865 * Drain the remaining delta count out of a event
1866 * that we are disabling:
1868 x86_perf_event_update(event, hwc, idx);
1870 /* Drain the remaining BTS records. */
1871 if (unlikely(idx == X86_PMC_IDX_FIXED_BTS))
1872 intel_pmu_drain_bts_buffer(cpuc);
1874 cpuc->events[idx] = NULL;
1876 for (i = 0; i < cpuc->n_events; i++) {
1877 if (event == cpuc->event_list[i]) {
1879 if (x86_pmu.put_event_constraints)
1880 x86_pmu.put_event_constraints(cpuc, event);
1882 while (++i < cpuc->n_events)
1883 cpuc->event_list[i-1] = cpuc->event_list[i];
1888 perf_event_update_userpage(event);
1892 * Save and restart an expired event. Called by NMI contexts,
1893 * so it has to be careful about preempting normal event ops:
1895 static int intel_pmu_save_and_restart(struct perf_event *event)
1897 struct hw_perf_event *hwc = &event->hw;
1901 x86_perf_event_update(event, hwc, idx);
1902 ret = x86_perf_event_set_period(event, hwc, idx);
1904 if (event->state == PERF_EVENT_STATE_ACTIVE)
1905 intel_pmu_enable_event(hwc, idx);
1910 static void intel_pmu_reset(void)
1912 struct debug_store *ds = __get_cpu_var(cpu_hw_events).ds;
1913 unsigned long flags;
1916 if (!x86_pmu.num_events)
1919 local_irq_save(flags);
1921 printk("clearing PMU state on CPU#%d\n", smp_processor_id());
1923 for (idx = 0; idx < x86_pmu.num_events; idx++) {
1924 checking_wrmsrl(x86_pmu.eventsel + idx, 0ull);
1925 checking_wrmsrl(x86_pmu.perfctr + idx, 0ull);
1927 for (idx = 0; idx < x86_pmu.num_events_fixed; idx++) {
1928 checking_wrmsrl(MSR_ARCH_PERFMON_FIXED_CTR0 + idx, 0ull);
1931 ds->bts_index = ds->bts_buffer_base;
1933 local_irq_restore(flags);
1936 static int p6_pmu_handle_irq(struct pt_regs *regs)
1938 struct perf_sample_data data;
1939 struct cpu_hw_events *cpuc;
1940 struct perf_event *event;
1941 struct hw_perf_event *hwc;
1942 int idx, handled = 0;
1948 cpuc = &__get_cpu_var(cpu_hw_events);
1950 for (idx = 0; idx < x86_pmu.num_events; idx++) {
1951 if (!test_bit(idx, cpuc->active_mask))
1954 event = cpuc->events[idx];
1957 val = x86_perf_event_update(event, hwc, idx);
1958 if (val & (1ULL << (x86_pmu.event_bits - 1)))
1965 data.period = event->hw.last_period;
1967 if (!x86_perf_event_set_period(event, hwc, idx))
1970 if (perf_event_overflow(event, 1, &data, regs))
1971 p6_pmu_disable_event(hwc, idx);
1975 inc_irq_stat(apic_perf_irqs);
1981 * This handler is triggered by the local APIC, so the APIC IRQ handling
1984 static int intel_pmu_handle_irq(struct pt_regs *regs)
1986 struct perf_sample_data data;
1987 struct cpu_hw_events *cpuc;
1994 cpuc = &__get_cpu_var(cpu_hw_events);
1997 intel_pmu_drain_bts_buffer(cpuc);
1998 status = intel_pmu_get_status();
2006 if (++loops > 100) {
2007 WARN_ONCE(1, "perfevents: irq loop stuck!\n");
2008 perf_event_print_debug();
2014 inc_irq_stat(apic_perf_irqs);
2016 for_each_bit(bit, (unsigned long *)&status, X86_PMC_IDX_MAX) {
2017 struct perf_event *event = cpuc->events[bit];
2019 clear_bit(bit, (unsigned long *) &status);
2020 if (!test_bit(bit, cpuc->active_mask))
2023 if (!intel_pmu_save_and_restart(event))
2026 data.period = event->hw.last_period;
2028 if (perf_event_overflow(event, 1, &data, regs))
2029 intel_pmu_disable_event(&event->hw, bit);
2032 intel_pmu_ack_status(ack);
2035 * Repeat if there is more work to be done:
2037 status = intel_pmu_get_status();
2046 static int amd_pmu_handle_irq(struct pt_regs *regs)
2048 struct perf_sample_data data;
2049 struct cpu_hw_events *cpuc;
2050 struct perf_event *event;
2051 struct hw_perf_event *hwc;
2052 int idx, handled = 0;
2058 cpuc = &__get_cpu_var(cpu_hw_events);
2060 for (idx = 0; idx < x86_pmu.num_events; idx++) {
2061 if (!test_bit(idx, cpuc->active_mask))
2064 event = cpuc->events[idx];
2067 val = x86_perf_event_update(event, hwc, idx);
2068 if (val & (1ULL << (x86_pmu.event_bits - 1)))
2075 data.period = event->hw.last_period;
2077 if (!x86_perf_event_set_period(event, hwc, idx))
2080 if (perf_event_overflow(event, 1, &data, regs))
2081 amd_pmu_disable_event(hwc, idx);
2085 inc_irq_stat(apic_perf_irqs);
2090 void smp_perf_pending_interrupt(struct pt_regs *regs)
2094 inc_irq_stat(apic_pending_irqs);
2095 perf_event_do_pending();
2099 void set_perf_event_pending(void)
2101 #ifdef CONFIG_X86_LOCAL_APIC
2102 if (!x86_pmu.apic || !x86_pmu_initialized())
2105 apic->send_IPI_self(LOCAL_PENDING_VECTOR);
2109 void perf_events_lapic_init(void)
2111 #ifdef CONFIG_X86_LOCAL_APIC
2112 if (!x86_pmu.apic || !x86_pmu_initialized())
2116 * Always use NMI for PMU
2118 apic_write(APIC_LVTPC, APIC_DM_NMI);
2122 static int __kprobes
2123 perf_event_nmi_handler(struct notifier_block *self,
2124 unsigned long cmd, void *__args)
2126 struct die_args *args = __args;
2127 struct pt_regs *regs;
2129 if (!atomic_read(&active_events))
2143 #ifdef CONFIG_X86_LOCAL_APIC
2144 apic_write(APIC_LVTPC, APIC_DM_NMI);
2147 * Can't rely on the handled return value to say it was our NMI, two
2148 * events could trigger 'simultaneously' raising two back-to-back NMIs.
2150 * If the first NMI handles both, the latter will be empty and daze
2153 x86_pmu.handle_irq(regs);
2158 static struct event_constraint bts_constraint =
2159 EVENT_CONSTRAINT(0, 1ULL << X86_PMC_IDX_FIXED_BTS, 0);
2161 static int intel_special_constraints(struct perf_event *event,
2162 unsigned long *idxmsk)
2164 unsigned int hw_event;
2166 hw_event = event->hw.config & INTEL_ARCH_EVENT_MASK;
2168 if (unlikely((hw_event ==
2169 x86_pmu.event_map(PERF_COUNT_HW_BRANCH_INSTRUCTIONS)) &&
2170 (event->hw.sample_period == 1))) {
2172 bitmap_copy((unsigned long *)idxmsk,
2173 (unsigned long *)bts_constraint.idxmsk,
2180 static void intel_get_event_constraints(struct cpu_hw_events *cpuc,
2181 struct perf_event *event,
2182 unsigned long *idxmsk)
2184 const struct event_constraint *c;
2189 bitmap_zero(idxmsk, X86_PMC_IDX_MAX);
2191 if (intel_special_constraints(event, idxmsk))
2194 if (x86_pmu.event_constraints) {
2195 for_each_event_constraint(c, x86_pmu.event_constraints) {
2196 if ((event->hw.config & c->cmask) == c->code) {
2197 bitmap_copy(idxmsk, c->idxmsk, X86_PMC_IDX_MAX);
2202 /* no constraints, means supports all generic counters */
2203 bitmap_fill((unsigned long *)idxmsk, x86_pmu.num_events);
2206 static void amd_get_event_constraints(struct cpu_hw_events *cpuc,
2207 struct perf_event *event,
2208 unsigned long *idxmsk)
2210 /* no constraints, means supports all generic counters */
2211 bitmap_fill(idxmsk, x86_pmu.num_events);
2214 static int x86_event_sched_in(struct perf_event *event,
2215 struct perf_cpu_context *cpuctx, int cpu)
2219 event->state = PERF_EVENT_STATE_ACTIVE;
2221 event->tstamp_running += event->ctx->time - event->tstamp_stopped;
2223 if (!is_x86_event(event))
2224 ret = event->pmu->enable(event);
2226 if (!ret && !is_software_event(event))
2227 cpuctx->active_oncpu++;
2229 if (!ret && event->attr.exclusive)
2230 cpuctx->exclusive = 1;
2235 static void x86_event_sched_out(struct perf_event *event,
2236 struct perf_cpu_context *cpuctx, int cpu)
2238 event->state = PERF_EVENT_STATE_INACTIVE;
2241 if (!is_x86_event(event))
2242 event->pmu->disable(event);
2244 event->tstamp_running -= event->ctx->time - event->tstamp_stopped;
2246 if (!is_software_event(event))
2247 cpuctx->active_oncpu--;
2249 if (event->attr.exclusive || !cpuctx->active_oncpu)
2250 cpuctx->exclusive = 0;
2254 * Called to enable a whole group of events.
2255 * Returns 1 if the group was enabled, or -EAGAIN if it could not be.
2256 * Assumes the caller has disabled interrupts and has
2257 * frozen the PMU with hw_perf_save_disable.
2259 * called with PMU disabled. If successful and return value 1,
2260 * then guaranteed to call perf_enable() and hw_perf_enable()
2262 int hw_perf_group_sched_in(struct perf_event *leader,
2263 struct perf_cpu_context *cpuctx,
2264 struct perf_event_context *ctx, int cpu)
2266 struct cpu_hw_events *cpuc = &per_cpu(cpu_hw_events, cpu);
2267 struct perf_event *sub;
2268 int assign[X86_PMC_IDX_MAX];
2271 /* n0 = total number of events */
2272 n0 = collect_events(cpuc, leader, true);
2276 ret = x86_schedule_events(cpuc, n0, assign);
2280 ret = x86_event_sched_in(leader, cpuctx, cpu);
2285 list_for_each_entry(sub, &leader->sibling_list, group_entry) {
2286 if (sub->state > PERF_EVENT_STATE_OFF) {
2287 ret = x86_event_sched_in(sub, cpuctx, cpu);
2294 * copy new assignment, now we know it is possible
2295 * will be used by hw_perf_enable()
2297 memcpy(cpuc->assign, assign, n0*sizeof(int));
2299 cpuc->n_events = n0;
2301 ctx->nr_active += n1;
2304 * 1 means successful and events are active
2305 * This is not quite true because we defer
2306 * actual activation until hw_perf_enable() but
2307 * this way we* ensure caller won't try to enable
2312 x86_event_sched_out(leader, cpuctx, cpu);
2314 list_for_each_entry(sub, &leader->sibling_list, group_entry) {
2315 if (sub->state == PERF_EVENT_STATE_ACTIVE) {
2316 x86_event_sched_out(sub, cpuctx, cpu);
2324 static __read_mostly struct notifier_block perf_event_nmi_notifier = {
2325 .notifier_call = perf_event_nmi_handler,
2330 static __initconst struct x86_pmu p6_pmu = {
2332 .handle_irq = p6_pmu_handle_irq,
2333 .disable_all = p6_pmu_disable_all,
2334 .enable_all = p6_pmu_enable_all,
2335 .enable = p6_pmu_enable_event,
2336 .disable = p6_pmu_disable_event,
2337 .eventsel = MSR_P6_EVNTSEL0,
2338 .perfctr = MSR_P6_PERFCTR0,
2339 .event_map = p6_pmu_event_map,
2340 .raw_event = p6_pmu_raw_event,
2341 .max_events = ARRAY_SIZE(p6_perfmon_event_map),
2343 .max_period = (1ULL << 31) - 1,
2347 * Events have 40 bits implemented. However they are designed such
2348 * that bits [32-39] are sign extensions of bit 31. As such the
2349 * effective width of a event for P6-like PMU is 32 bits only.
2351 * See IA-32 Intel Architecture Software developer manual Vol 3B
2354 .event_mask = (1ULL << 32) - 1,
2355 .get_event_constraints = intel_get_event_constraints,
2356 .event_constraints = intel_p6_event_constraints
2359 static __initconst struct x86_pmu intel_pmu = {
2361 .handle_irq = intel_pmu_handle_irq,
2362 .disable_all = intel_pmu_disable_all,
2363 .enable_all = intel_pmu_enable_all,
2364 .enable = intel_pmu_enable_event,
2365 .disable = intel_pmu_disable_event,
2366 .eventsel = MSR_ARCH_PERFMON_EVENTSEL0,
2367 .perfctr = MSR_ARCH_PERFMON_PERFCTR0,
2368 .event_map = intel_pmu_event_map,
2369 .raw_event = intel_pmu_raw_event,
2370 .max_events = ARRAY_SIZE(intel_perfmon_event_map),
2373 * Intel PMCs cannot be accessed sanely above 32 bit width,
2374 * so we install an artificial 1<<31 period regardless of
2375 * the generic event period:
2377 .max_period = (1ULL << 31) - 1,
2378 .enable_bts = intel_pmu_enable_bts,
2379 .disable_bts = intel_pmu_disable_bts,
2380 .get_event_constraints = intel_get_event_constraints
2383 static __initconst struct x86_pmu amd_pmu = {
2385 .handle_irq = amd_pmu_handle_irq,
2386 .disable_all = amd_pmu_disable_all,
2387 .enable_all = amd_pmu_enable_all,
2388 .enable = amd_pmu_enable_event,
2389 .disable = amd_pmu_disable_event,
2390 .eventsel = MSR_K7_EVNTSEL0,
2391 .perfctr = MSR_K7_PERFCTR0,
2392 .event_map = amd_pmu_event_map,
2393 .raw_event = amd_pmu_raw_event,
2394 .max_events = ARRAY_SIZE(amd_perfmon_event_map),
2397 .event_mask = (1ULL << 48) - 1,
2399 /* use highest bit to detect overflow */
2400 .max_period = (1ULL << 47) - 1,
2401 .get_event_constraints = amd_get_event_constraints
2404 static __init int p6_pmu_init(void)
2406 switch (boot_cpu_data.x86_model) {
2408 case 3: /* Pentium Pro */
2410 case 6: /* Pentium II */
2413 case 11: /* Pentium III */
2419 pr_cont("unsupported p6 CPU model %d ",
2420 boot_cpu_data.x86_model);
2429 static __init int intel_pmu_init(void)
2431 union cpuid10_edx edx;
2432 union cpuid10_eax eax;
2433 unsigned int unused;
2437 if (!cpu_has(&boot_cpu_data, X86_FEATURE_ARCH_PERFMON)) {
2438 /* check for P6 processor family */
2439 if (boot_cpu_data.x86 == 6) {
2440 return p6_pmu_init();
2447 * Check whether the Architectural PerfMon supports
2448 * Branch Misses Retired hw_event or not.
2450 cpuid(10, &eax.full, &ebx, &unused, &edx.full);
2451 if (eax.split.mask_length <= ARCH_PERFMON_BRANCH_MISSES_RETIRED)
2454 version = eax.split.version_id;
2458 x86_pmu = intel_pmu;
2459 x86_pmu.version = version;
2460 x86_pmu.num_events = eax.split.num_events;
2461 x86_pmu.event_bits = eax.split.bit_width;
2462 x86_pmu.event_mask = (1ULL << eax.split.bit_width) - 1;
2465 * Quirk: v2 perfmon does not report fixed-purpose events, so
2466 * assume at least 3 events:
2468 x86_pmu.num_events_fixed = max((int)edx.split.num_events_fixed, 3);
2471 * Install the hw-cache-events table:
2473 switch (boot_cpu_data.x86_model) {
2474 case 15: /* original 65 nm celeron/pentium/core2/xeon, "Merom"/"Conroe" */
2475 case 22: /* single-core 65 nm celeron/core2solo "Merom-L"/"Conroe-L" */
2476 case 23: /* current 45 nm celeron/core2/xeon "Penryn"/"Wolfdale" */
2477 case 29: /* six-core 45 nm xeon "Dunnington" */
2478 memcpy(hw_cache_event_ids, core2_hw_cache_event_ids,
2479 sizeof(hw_cache_event_ids));
2481 x86_pmu.event_constraints = intel_core_event_constraints;
2482 pr_cont("Core2 events, ");
2485 memcpy(hw_cache_event_ids, nehalem_hw_cache_event_ids,
2486 sizeof(hw_cache_event_ids));
2488 x86_pmu.event_constraints = intel_nehalem_event_constraints;
2489 pr_cont("Nehalem/Corei7 events, ");
2492 memcpy(hw_cache_event_ids, atom_hw_cache_event_ids,
2493 sizeof(hw_cache_event_ids));
2495 x86_pmu.event_constraints = intel_gen_event_constraints;
2496 pr_cont("Atom events, ");
2500 * default constraints for v2 and up
2502 x86_pmu.event_constraints = intel_gen_event_constraints;
2503 pr_cont("generic architected perfmon, ");
2508 static __init int amd_pmu_init(void)
2510 /* Performance-monitoring supported from K7 and later: */
2511 if (boot_cpu_data.x86 < 6)
2516 /* Events are common for all AMDs */
2517 memcpy(hw_cache_event_ids, amd_hw_cache_event_ids,
2518 sizeof(hw_cache_event_ids));
2523 static void __init pmu_check_apic(void)
2529 pr_info("no APIC, boot with the \"lapic\" boot parameter to force-enable it.\n");
2530 pr_info("no hardware sampling interrupt available.\n");
2533 void __init init_hw_perf_events(void)
2537 pr_info("Performance Events: ");
2539 switch (boot_cpu_data.x86_vendor) {
2540 case X86_VENDOR_INTEL:
2541 err = intel_pmu_init();
2543 case X86_VENDOR_AMD:
2544 err = amd_pmu_init();
2550 pr_cont("no PMU driver, software events only.\n");
2556 pr_cont("%s PMU driver.\n", x86_pmu.name);
2558 if (x86_pmu.num_events > X86_PMC_MAX_GENERIC) {
2559 WARN(1, KERN_ERR "hw perf events %d > max(%d), clipping!",
2560 x86_pmu.num_events, X86_PMC_MAX_GENERIC);
2561 x86_pmu.num_events = X86_PMC_MAX_GENERIC;
2563 perf_event_mask = (1 << x86_pmu.num_events) - 1;
2564 perf_max_events = x86_pmu.num_events;
2566 if (x86_pmu.num_events_fixed > X86_PMC_MAX_FIXED) {
2567 WARN(1, KERN_ERR "hw perf events fixed %d > max(%d), clipping!",
2568 x86_pmu.num_events_fixed, X86_PMC_MAX_FIXED);
2569 x86_pmu.num_events_fixed = X86_PMC_MAX_FIXED;
2573 ((1LL << x86_pmu.num_events_fixed)-1) << X86_PMC_IDX_FIXED;
2574 x86_pmu.intel_ctrl = perf_event_mask;
2576 perf_events_lapic_init();
2577 register_die_notifier(&perf_event_nmi_notifier);
2579 pr_info("... version: %d\n", x86_pmu.version);
2580 pr_info("... bit width: %d\n", x86_pmu.event_bits);
2581 pr_info("... generic registers: %d\n", x86_pmu.num_events);
2582 pr_info("... value mask: %016Lx\n", x86_pmu.event_mask);
2583 pr_info("... max period: %016Lx\n", x86_pmu.max_period);
2584 pr_info("... fixed-purpose events: %d\n", x86_pmu.num_events_fixed);
2585 pr_info("... event mask: %016Lx\n", perf_event_mask);
2588 static inline void x86_pmu_read(struct perf_event *event)
2590 x86_perf_event_update(event, &event->hw, event->hw.idx);
2593 static const struct pmu pmu = {
2594 .enable = x86_pmu_enable,
2595 .disable = x86_pmu_disable,
2596 .read = x86_pmu_read,
2597 .unthrottle = x86_pmu_unthrottle,
2601 * validate a single event group
2603 * validation include:
2604 * - check events are compatible which each other
2605 * - events do not compete for the same counter
2606 * - number of events <= number of counters
2608 * validation ensures the group can be loaded onto the
2609 * PMU if it was the only group available.
2611 static int validate_group(struct perf_event *event)
2613 struct perf_event *leader = event->group_leader;
2614 struct cpu_hw_events *fake_cpuc;
2618 fake_cpuc = kmalloc(sizeof(*fake_cpuc), GFP_KERNEL | __GFP_ZERO);
2623 * the event is not yet connected with its
2624 * siblings therefore we must first collect
2625 * existing siblings, then add the new event
2626 * before we can simulate the scheduling
2629 n = collect_events(fake_cpuc, leader, true);
2633 fake_cpuc->n_events = n;
2634 n = collect_events(fake_cpuc, event, false);
2638 fake_cpuc->n_events = n;
2640 ret = x86_schedule_events(fake_cpuc, n, NULL);
2648 const struct pmu *hw_perf_event_init(struct perf_event *event)
2650 const struct pmu *tmp;
2653 err = __hw_perf_event_init(event);
2656 * we temporarily connect event to its pmu
2657 * such that validate_group() can classify
2658 * it as an x86 event using is_x86_event()
2663 if (event->group_leader != event)
2664 err = validate_group(event);
2670 event->destroy(event);
2671 return ERR_PTR(err);
2682 void callchain_store(struct perf_callchain_entry *entry, u64 ip)
2684 if (entry->nr < PERF_MAX_STACK_DEPTH)
2685 entry->ip[entry->nr++] = ip;
2688 static DEFINE_PER_CPU(struct perf_callchain_entry, pmc_irq_entry);
2689 static DEFINE_PER_CPU(struct perf_callchain_entry, pmc_nmi_entry);
2693 backtrace_warning_symbol(void *data, char *msg, unsigned long symbol)
2695 /* Ignore warnings */
2698 static void backtrace_warning(void *data, char *msg)
2700 /* Ignore warnings */
2703 static int backtrace_stack(void *data, char *name)
2708 static void backtrace_address(void *data, unsigned long addr, int reliable)
2710 struct perf_callchain_entry *entry = data;
2713 callchain_store(entry, addr);
2716 static const struct stacktrace_ops backtrace_ops = {
2717 .warning = backtrace_warning,
2718 .warning_symbol = backtrace_warning_symbol,
2719 .stack = backtrace_stack,
2720 .address = backtrace_address,
2721 .walk_stack = print_context_stack_bp,
2724 #include "../dumpstack.h"
2727 perf_callchain_kernel(struct pt_regs *regs, struct perf_callchain_entry *entry)
2729 callchain_store(entry, PERF_CONTEXT_KERNEL);
2730 callchain_store(entry, regs->ip);
2732 dump_trace(NULL, regs, NULL, regs->bp, &backtrace_ops, entry);
2736 * best effort, GUP based copy_from_user() that assumes IRQ or NMI context
2738 static unsigned long
2739 copy_from_user_nmi(void *to, const void __user *from, unsigned long n)
2741 unsigned long offset, addr = (unsigned long)from;
2742 int type = in_nmi() ? KM_NMI : KM_IRQ0;
2743 unsigned long size, len = 0;
2749 ret = __get_user_pages_fast(addr, 1, 0, &page);
2753 offset = addr & (PAGE_SIZE - 1);
2754 size = min(PAGE_SIZE - offset, n - len);
2756 map = kmap_atomic(page, type);
2757 memcpy(to, map+offset, size);
2758 kunmap_atomic(map, type);
2770 static int copy_stack_frame(const void __user *fp, struct stack_frame *frame)
2772 unsigned long bytes;
2774 bytes = copy_from_user_nmi(frame, fp, sizeof(*frame));
2776 return bytes == sizeof(*frame);
2780 perf_callchain_user(struct pt_regs *regs, struct perf_callchain_entry *entry)
2782 struct stack_frame frame;
2783 const void __user *fp;
2785 if (!user_mode(regs))
2786 regs = task_pt_regs(current);
2788 fp = (void __user *)regs->bp;
2790 callchain_store(entry, PERF_CONTEXT_USER);
2791 callchain_store(entry, regs->ip);
2793 while (entry->nr < PERF_MAX_STACK_DEPTH) {
2794 frame.next_frame = NULL;
2795 frame.return_address = 0;
2797 if (!copy_stack_frame(fp, &frame))
2800 if ((unsigned long)fp < regs->sp)
2803 callchain_store(entry, frame.return_address);
2804 fp = frame.next_frame;
2809 perf_do_callchain(struct pt_regs *regs, struct perf_callchain_entry *entry)
2816 is_user = user_mode(regs);
2818 if (is_user && current->state != TASK_RUNNING)
2822 perf_callchain_kernel(regs, entry);
2825 perf_callchain_user(regs, entry);
2828 struct perf_callchain_entry *perf_callchain(struct pt_regs *regs)
2830 struct perf_callchain_entry *entry;
2833 entry = &__get_cpu_var(pmc_nmi_entry);
2835 entry = &__get_cpu_var(pmc_irq_entry);
2839 perf_do_callchain(regs, entry);
2844 void hw_perf_event_setup_online(int cpu)
2846 init_debug_store_on_cpu(cpu);