perf_events, x86: AMD event scheduling
[safe/jmp/linux-2.6] / arch / x86 / kernel / cpu / perf_event.c
1 /*
2  * Performance events x86 architecture code
3  *
4  *  Copyright (C) 2008 Thomas Gleixner <tglx@linutronix.de>
5  *  Copyright (C) 2008-2009 Red Hat, Inc., Ingo Molnar
6  *  Copyright (C) 2009 Jaswinder Singh Rajput
7  *  Copyright (C) 2009 Advanced Micro Devices, Inc., Robert Richter
8  *  Copyright (C) 2008-2009 Red Hat, Inc., Peter Zijlstra <pzijlstr@redhat.com>
9  *  Copyright (C) 2009 Intel Corporation, <markus.t.metzger@intel.com>
10  *  Copyright (C) 2009 Google, Inc., Stephane Eranian
11  *
12  *  For licencing details see kernel-base/COPYING
13  */
14
15 #include <linux/perf_event.h>
16 #include <linux/capability.h>
17 #include <linux/notifier.h>
18 #include <linux/hardirq.h>
19 #include <linux/kprobes.h>
20 #include <linux/module.h>
21 #include <linux/kdebug.h>
22 #include <linux/sched.h>
23 #include <linux/uaccess.h>
24 #include <linux/highmem.h>
25 #include <linux/cpu.h>
26 #include <linux/bitops.h>
27
28 #include <asm/apic.h>
29 #include <asm/stacktrace.h>
30 #include <asm/nmi.h>
31
32 static u64 perf_event_mask __read_mostly;
33
34 /* The maximal number of PEBS events: */
35 #define MAX_PEBS_EVENTS 4
36
37 /* The size of a BTS record in bytes: */
38 #define BTS_RECORD_SIZE         24
39
40 /* The size of a per-cpu BTS buffer in bytes: */
41 #define BTS_BUFFER_SIZE         (BTS_RECORD_SIZE * 2048)
42
43 /* The BTS overflow threshold in bytes from the end of the buffer: */
44 #define BTS_OVFL_TH             (BTS_RECORD_SIZE * 128)
45
46
47 /*
48  * Bits in the debugctlmsr controlling branch tracing.
49  */
50 #define X86_DEBUGCTL_TR                 (1 << 6)
51 #define X86_DEBUGCTL_BTS                (1 << 7)
52 #define X86_DEBUGCTL_BTINT              (1 << 8)
53 #define X86_DEBUGCTL_BTS_OFF_OS         (1 << 9)
54 #define X86_DEBUGCTL_BTS_OFF_USR        (1 << 10)
55
56 /*
57  * A debug store configuration.
58  *
59  * We only support architectures that use 64bit fields.
60  */
61 struct debug_store {
62         u64     bts_buffer_base;
63         u64     bts_index;
64         u64     bts_absolute_maximum;
65         u64     bts_interrupt_threshold;
66         u64     pebs_buffer_base;
67         u64     pebs_index;
68         u64     pebs_absolute_maximum;
69         u64     pebs_interrupt_threshold;
70         u64     pebs_event_reset[MAX_PEBS_EVENTS];
71 };
72
73 struct event_constraint {
74         union {
75                 unsigned long   idxmsk[BITS_TO_LONGS(X86_PMC_IDX_MAX)];
76                 u64             idxmsk64[1];
77         };
78         int     code;
79         int     cmask;
80         int     weight;
81 };
82
83 struct amd_nb {
84         int nb_id;  /* NorthBridge id */
85         int refcnt; /* reference count */
86         struct perf_event *owners[X86_PMC_IDX_MAX];
87         struct event_constraint event_constraints[X86_PMC_IDX_MAX];
88 };
89
90 struct cpu_hw_events {
91         struct perf_event       *events[X86_PMC_IDX_MAX]; /* in counter order */
92         unsigned long           active_mask[BITS_TO_LONGS(X86_PMC_IDX_MAX)];
93         unsigned long           interrupts;
94         int                     enabled;
95         struct debug_store      *ds;
96
97         int                     n_events;
98         int                     n_added;
99         int                     assign[X86_PMC_IDX_MAX]; /* event to counter assignment */
100         u64                     tags[X86_PMC_IDX_MAX];
101         struct perf_event       *event_list[X86_PMC_IDX_MAX]; /* in enabled order */
102         struct amd_nb           *amd_nb;
103 };
104
105 #define __EVENT_CONSTRAINT(c, n, m, w) {\
106         { .idxmsk64[0] = (n) },         \
107         .code = (c),                    \
108         .cmask = (m),                   \
109         .weight = (w),                  \
110 }
111
112 #define EVENT_CONSTRAINT(c, n, m)       \
113         __EVENT_CONSTRAINT(c, n, m, HWEIGHT(n))
114
115 #define INTEL_EVENT_CONSTRAINT(c, n)    \
116         EVENT_CONSTRAINT(c, n, INTEL_ARCH_EVTSEL_MASK)
117
118 #define FIXED_EVENT_CONSTRAINT(c, n)    \
119         EVENT_CONSTRAINT(c, n, INTEL_ARCH_FIXED_MASK)
120
121 #define EVENT_CONSTRAINT_END            \
122         EVENT_CONSTRAINT(0, 0, 0)
123
124 #define for_each_event_constraint(e, c) \
125         for ((e) = (c); (e)->cmask; (e)++)
126
127 /*
128  * struct x86_pmu - generic x86 pmu
129  */
130 struct x86_pmu {
131         const char      *name;
132         int             version;
133         int             (*handle_irq)(struct pt_regs *);
134         void            (*disable_all)(void);
135         void            (*enable_all)(void);
136         void            (*enable)(struct hw_perf_event *, int);
137         void            (*disable)(struct hw_perf_event *, int);
138         unsigned        eventsel;
139         unsigned        perfctr;
140         u64             (*event_map)(int);
141         u64             (*raw_event)(u64);
142         int             max_events;
143         int             num_events;
144         int             num_events_fixed;
145         int             event_bits;
146         u64             event_mask;
147         int             apic;
148         u64             max_period;
149         u64             intel_ctrl;
150         void            (*enable_bts)(u64 config);
151         void            (*disable_bts)(void);
152
153         struct event_constraint *
154                         (*get_event_constraints)(struct cpu_hw_events *cpuc,
155                                                  struct perf_event *event);
156
157         void            (*put_event_constraints)(struct cpu_hw_events *cpuc,
158                                                  struct perf_event *event);
159         struct event_constraint *event_constraints;
160 };
161
162 static struct x86_pmu x86_pmu __read_mostly;
163
164 static raw_spinlock_t amd_nb_lock;
165
166 static DEFINE_PER_CPU(struct cpu_hw_events, cpu_hw_events) = {
167         .enabled = 1,
168 };
169
170 static int x86_perf_event_set_period(struct perf_event *event,
171                              struct hw_perf_event *hwc, int idx);
172
173 /*
174  * Not sure about some of these
175  */
176 static const u64 p6_perfmon_event_map[] =
177 {
178   [PERF_COUNT_HW_CPU_CYCLES]            = 0x0079,
179   [PERF_COUNT_HW_INSTRUCTIONS]          = 0x00c0,
180   [PERF_COUNT_HW_CACHE_REFERENCES]      = 0x0f2e,
181   [PERF_COUNT_HW_CACHE_MISSES]          = 0x012e,
182   [PERF_COUNT_HW_BRANCH_INSTRUCTIONS]   = 0x00c4,
183   [PERF_COUNT_HW_BRANCH_MISSES]         = 0x00c5,
184   [PERF_COUNT_HW_BUS_CYCLES]            = 0x0062,
185 };
186
187 static u64 p6_pmu_event_map(int hw_event)
188 {
189         return p6_perfmon_event_map[hw_event];
190 }
191
192 /*
193  * Event setting that is specified not to count anything.
194  * We use this to effectively disable a counter.
195  *
196  * L2_RQSTS with 0 MESI unit mask.
197  */
198 #define P6_NOP_EVENT                    0x0000002EULL
199
200 static u64 p6_pmu_raw_event(u64 hw_event)
201 {
202 #define P6_EVNTSEL_EVENT_MASK           0x000000FFULL
203 #define P6_EVNTSEL_UNIT_MASK            0x0000FF00ULL
204 #define P6_EVNTSEL_EDGE_MASK            0x00040000ULL
205 #define P6_EVNTSEL_INV_MASK             0x00800000ULL
206 #define P6_EVNTSEL_REG_MASK             0xFF000000ULL
207
208 #define P6_EVNTSEL_MASK                 \
209         (P6_EVNTSEL_EVENT_MASK |        \
210          P6_EVNTSEL_UNIT_MASK  |        \
211          P6_EVNTSEL_EDGE_MASK  |        \
212          P6_EVNTSEL_INV_MASK   |        \
213          P6_EVNTSEL_REG_MASK)
214
215         return hw_event & P6_EVNTSEL_MASK;
216 }
217
218 static struct event_constraint intel_p6_event_constraints[] =
219 {
220         INTEL_EVENT_CONSTRAINT(0xc1, 0x1),      /* FLOPS */
221         INTEL_EVENT_CONSTRAINT(0x10, 0x1),      /* FP_COMP_OPS_EXE */
222         INTEL_EVENT_CONSTRAINT(0x11, 0x1),      /* FP_ASSIST */
223         INTEL_EVENT_CONSTRAINT(0x12, 0x2),      /* MUL */
224         INTEL_EVENT_CONSTRAINT(0x13, 0x2),      /* DIV */
225         INTEL_EVENT_CONSTRAINT(0x14, 0x1),      /* CYCLES_DIV_BUSY */
226         EVENT_CONSTRAINT_END
227 };
228
229 /*
230  * Intel PerfMon v3. Used on Core2 and later.
231  */
232 static const u64 intel_perfmon_event_map[] =
233 {
234   [PERF_COUNT_HW_CPU_CYCLES]            = 0x003c,
235   [PERF_COUNT_HW_INSTRUCTIONS]          = 0x00c0,
236   [PERF_COUNT_HW_CACHE_REFERENCES]      = 0x4f2e,
237   [PERF_COUNT_HW_CACHE_MISSES]          = 0x412e,
238   [PERF_COUNT_HW_BRANCH_INSTRUCTIONS]   = 0x00c4,
239   [PERF_COUNT_HW_BRANCH_MISSES]         = 0x00c5,
240   [PERF_COUNT_HW_BUS_CYCLES]            = 0x013c,
241 };
242
243 static struct event_constraint intel_core_event_constraints[] =
244 {
245         INTEL_EVENT_CONSTRAINT(0x11, 0x2), /* FP_ASSIST */
246         INTEL_EVENT_CONSTRAINT(0x12, 0x2), /* MUL */
247         INTEL_EVENT_CONSTRAINT(0x13, 0x2), /* DIV */
248         INTEL_EVENT_CONSTRAINT(0x14, 0x1), /* CYCLES_DIV_BUSY */
249         INTEL_EVENT_CONSTRAINT(0x19, 0x2), /* DELAYED_BYPASS */
250         INTEL_EVENT_CONSTRAINT(0xc1, 0x1), /* FP_COMP_INSTR_RET */
251         EVENT_CONSTRAINT_END
252 };
253
254 static struct event_constraint intel_core2_event_constraints[] =
255 {
256         FIXED_EVENT_CONSTRAINT(0xc0, (0x3|(1ULL<<32))), /* INSTRUCTIONS_RETIRED */
257         FIXED_EVENT_CONSTRAINT(0x3c, (0x3|(1ULL<<33))), /* UNHALTED_CORE_CYCLES */
258         INTEL_EVENT_CONSTRAINT(0x10, 0x1), /* FP_COMP_OPS_EXE */
259         INTEL_EVENT_CONSTRAINT(0x11, 0x2), /* FP_ASSIST */
260         INTEL_EVENT_CONSTRAINT(0x12, 0x2), /* MUL */
261         INTEL_EVENT_CONSTRAINT(0x13, 0x2), /* DIV */
262         INTEL_EVENT_CONSTRAINT(0x14, 0x1), /* CYCLES_DIV_BUSY */
263         INTEL_EVENT_CONSTRAINT(0x18, 0x1), /* IDLE_DURING_DIV */
264         INTEL_EVENT_CONSTRAINT(0x19, 0x2), /* DELAYED_BYPASS */
265         INTEL_EVENT_CONSTRAINT(0xa1, 0x1), /* RS_UOPS_DISPATCH_CYCLES */
266         INTEL_EVENT_CONSTRAINT(0xcb, 0x1), /* MEM_LOAD_RETIRED */
267         EVENT_CONSTRAINT_END
268 };
269
270 static struct event_constraint intel_nehalem_event_constraints[] =
271 {
272         FIXED_EVENT_CONSTRAINT(0xc0, (0xf|(1ULL<<32))), /* INSTRUCTIONS_RETIRED */
273         FIXED_EVENT_CONSTRAINT(0x3c, (0xf|(1ULL<<33))), /* UNHALTED_CORE_CYCLES */
274         INTEL_EVENT_CONSTRAINT(0x40, 0x3), /* L1D_CACHE_LD */
275         INTEL_EVENT_CONSTRAINT(0x41, 0x3), /* L1D_CACHE_ST */
276         INTEL_EVENT_CONSTRAINT(0x42, 0x3), /* L1D_CACHE_LOCK */
277         INTEL_EVENT_CONSTRAINT(0x43, 0x3), /* L1D_ALL_REF */
278         INTEL_EVENT_CONSTRAINT(0x48, 0x3), /* L1D_PEND_MISS */
279         INTEL_EVENT_CONSTRAINT(0x4e, 0x3), /* L1D_PREFETCH */
280         INTEL_EVENT_CONSTRAINT(0x51, 0x3), /* L1D */
281         INTEL_EVENT_CONSTRAINT(0x63, 0x3), /* CACHE_LOCK_CYCLES */
282         EVENT_CONSTRAINT_END
283 };
284
285 static struct event_constraint intel_westmere_event_constraints[] =
286 {
287         FIXED_EVENT_CONSTRAINT(0xc0, (0xf|(1ULL<<32))), /* INSTRUCTIONS_RETIRED */
288         FIXED_EVENT_CONSTRAINT(0x3c, (0xf|(1ULL<<33))), /* UNHALTED_CORE_CYCLES */
289         INTEL_EVENT_CONSTRAINT(0x51, 0x3), /* L1D */
290         INTEL_EVENT_CONSTRAINT(0x60, 0x1), /* OFFCORE_REQUESTS_OUTSTANDING */
291         INTEL_EVENT_CONSTRAINT(0x63, 0x3), /* CACHE_LOCK_CYCLES */
292         EVENT_CONSTRAINT_END
293 };
294
295 static struct event_constraint intel_gen_event_constraints[] =
296 {
297         FIXED_EVENT_CONSTRAINT(0xc0, (0x3|(1ULL<<32))), /* INSTRUCTIONS_RETIRED */
298         FIXED_EVENT_CONSTRAINT(0x3c, (0x3|(1ULL<<33))), /* UNHALTED_CORE_CYCLES */
299         EVENT_CONSTRAINT_END
300 };
301
302 static u64 intel_pmu_event_map(int hw_event)
303 {
304         return intel_perfmon_event_map[hw_event];
305 }
306
307 /*
308  * Generalized hw caching related hw_event table, filled
309  * in on a per model basis. A value of 0 means
310  * 'not supported', -1 means 'hw_event makes no sense on
311  * this CPU', any other value means the raw hw_event
312  * ID.
313  */
314
315 #define C(x) PERF_COUNT_HW_CACHE_##x
316
317 static u64 __read_mostly hw_cache_event_ids
318                                 [PERF_COUNT_HW_CACHE_MAX]
319                                 [PERF_COUNT_HW_CACHE_OP_MAX]
320                                 [PERF_COUNT_HW_CACHE_RESULT_MAX];
321
322 static __initconst u64 westmere_hw_cache_event_ids
323                                 [PERF_COUNT_HW_CACHE_MAX]
324                                 [PERF_COUNT_HW_CACHE_OP_MAX]
325                                 [PERF_COUNT_HW_CACHE_RESULT_MAX] =
326 {
327  [ C(L1D) ] = {
328         [ C(OP_READ) ] = {
329                 [ C(RESULT_ACCESS) ] = 0x010b, /* MEM_INST_RETIRED.LOADS       */
330                 [ C(RESULT_MISS)   ] = 0x0151, /* L1D.REPL                     */
331         },
332         [ C(OP_WRITE) ] = {
333                 [ C(RESULT_ACCESS) ] = 0x020b, /* MEM_INST_RETURED.STORES      */
334                 [ C(RESULT_MISS)   ] = 0x0251, /* L1D.M_REPL                   */
335         },
336         [ C(OP_PREFETCH) ] = {
337                 [ C(RESULT_ACCESS) ] = 0x014e, /* L1D_PREFETCH.REQUESTS        */
338                 [ C(RESULT_MISS)   ] = 0x024e, /* L1D_PREFETCH.MISS            */
339         },
340  },
341  [ C(L1I ) ] = {
342         [ C(OP_READ) ] = {
343                 [ C(RESULT_ACCESS) ] = 0x0380, /* L1I.READS                    */
344                 [ C(RESULT_MISS)   ] = 0x0280, /* L1I.MISSES                   */
345         },
346         [ C(OP_WRITE) ] = {
347                 [ C(RESULT_ACCESS) ] = -1,
348                 [ C(RESULT_MISS)   ] = -1,
349         },
350         [ C(OP_PREFETCH) ] = {
351                 [ C(RESULT_ACCESS) ] = 0x0,
352                 [ C(RESULT_MISS)   ] = 0x0,
353         },
354  },
355  [ C(LL  ) ] = {
356         [ C(OP_READ) ] = {
357                 [ C(RESULT_ACCESS) ] = 0x0324, /* L2_RQSTS.LOADS               */
358                 [ C(RESULT_MISS)   ] = 0x0224, /* L2_RQSTS.LD_MISS             */
359         },
360         [ C(OP_WRITE) ] = {
361                 [ C(RESULT_ACCESS) ] = 0x0c24, /* L2_RQSTS.RFOS                */
362                 [ C(RESULT_MISS)   ] = 0x0824, /* L2_RQSTS.RFO_MISS            */
363         },
364         [ C(OP_PREFETCH) ] = {
365                 [ C(RESULT_ACCESS) ] = 0x4f2e, /* LLC Reference                */
366                 [ C(RESULT_MISS)   ] = 0x412e, /* LLC Misses                   */
367         },
368  },
369  [ C(DTLB) ] = {
370         [ C(OP_READ) ] = {
371                 [ C(RESULT_ACCESS) ] = 0x010b, /* MEM_INST_RETIRED.LOADS       */
372                 [ C(RESULT_MISS)   ] = 0x0108, /* DTLB_LOAD_MISSES.ANY         */
373         },
374         [ C(OP_WRITE) ] = {
375                 [ C(RESULT_ACCESS) ] = 0x020b, /* MEM_INST_RETURED.STORES      */
376                 [ C(RESULT_MISS)   ] = 0x010c, /* MEM_STORE_RETIRED.DTLB_MISS  */
377         },
378         [ C(OP_PREFETCH) ] = {
379                 [ C(RESULT_ACCESS) ] = 0x0,
380                 [ C(RESULT_MISS)   ] = 0x0,
381         },
382  },
383  [ C(ITLB) ] = {
384         [ C(OP_READ) ] = {
385                 [ C(RESULT_ACCESS) ] = 0x01c0, /* INST_RETIRED.ANY_P           */
386                 [ C(RESULT_MISS)   ] = 0x0185, /* ITLB_MISSES.ANY              */
387         },
388         [ C(OP_WRITE) ] = {
389                 [ C(RESULT_ACCESS) ] = -1,
390                 [ C(RESULT_MISS)   ] = -1,
391         },
392         [ C(OP_PREFETCH) ] = {
393                 [ C(RESULT_ACCESS) ] = -1,
394                 [ C(RESULT_MISS)   ] = -1,
395         },
396  },
397  [ C(BPU ) ] = {
398         [ C(OP_READ) ] = {
399                 [ C(RESULT_ACCESS) ] = 0x00c4, /* BR_INST_RETIRED.ALL_BRANCHES */
400                 [ C(RESULT_MISS)   ] = 0x03e8, /* BPU_CLEARS.ANY               */
401         },
402         [ C(OP_WRITE) ] = {
403                 [ C(RESULT_ACCESS) ] = -1,
404                 [ C(RESULT_MISS)   ] = -1,
405         },
406         [ C(OP_PREFETCH) ] = {
407                 [ C(RESULT_ACCESS) ] = -1,
408                 [ C(RESULT_MISS)   ] = -1,
409         },
410  },
411 };
412
413 static __initconst u64 nehalem_hw_cache_event_ids
414                                 [PERF_COUNT_HW_CACHE_MAX]
415                                 [PERF_COUNT_HW_CACHE_OP_MAX]
416                                 [PERF_COUNT_HW_CACHE_RESULT_MAX] =
417 {
418  [ C(L1D) ] = {
419         [ C(OP_READ) ] = {
420                 [ C(RESULT_ACCESS) ] = 0x0f40, /* L1D_CACHE_LD.MESI            */
421                 [ C(RESULT_MISS)   ] = 0x0140, /* L1D_CACHE_LD.I_STATE         */
422         },
423         [ C(OP_WRITE) ] = {
424                 [ C(RESULT_ACCESS) ] = 0x0f41, /* L1D_CACHE_ST.MESI            */
425                 [ C(RESULT_MISS)   ] = 0x0141, /* L1D_CACHE_ST.I_STATE         */
426         },
427         [ C(OP_PREFETCH) ] = {
428                 [ C(RESULT_ACCESS) ] = 0x014e, /* L1D_PREFETCH.REQUESTS        */
429                 [ C(RESULT_MISS)   ] = 0x024e, /* L1D_PREFETCH.MISS            */
430         },
431  },
432  [ C(L1I ) ] = {
433         [ C(OP_READ) ] = {
434                 [ C(RESULT_ACCESS) ] = 0x0380, /* L1I.READS                    */
435                 [ C(RESULT_MISS)   ] = 0x0280, /* L1I.MISSES                   */
436         },
437         [ C(OP_WRITE) ] = {
438                 [ C(RESULT_ACCESS) ] = -1,
439                 [ C(RESULT_MISS)   ] = -1,
440         },
441         [ C(OP_PREFETCH) ] = {
442                 [ C(RESULT_ACCESS) ] = 0x0,
443                 [ C(RESULT_MISS)   ] = 0x0,
444         },
445  },
446  [ C(LL  ) ] = {
447         [ C(OP_READ) ] = {
448                 [ C(RESULT_ACCESS) ] = 0x0324, /* L2_RQSTS.LOADS               */
449                 [ C(RESULT_MISS)   ] = 0x0224, /* L2_RQSTS.LD_MISS             */
450         },
451         [ C(OP_WRITE) ] = {
452                 [ C(RESULT_ACCESS) ] = 0x0c24, /* L2_RQSTS.RFOS                */
453                 [ C(RESULT_MISS)   ] = 0x0824, /* L2_RQSTS.RFO_MISS            */
454         },
455         [ C(OP_PREFETCH) ] = {
456                 [ C(RESULT_ACCESS) ] = 0x4f2e, /* LLC Reference                */
457                 [ C(RESULT_MISS)   ] = 0x412e, /* LLC Misses                   */
458         },
459  },
460  [ C(DTLB) ] = {
461         [ C(OP_READ) ] = {
462                 [ C(RESULT_ACCESS) ] = 0x0f40, /* L1D_CACHE_LD.MESI   (alias)  */
463                 [ C(RESULT_MISS)   ] = 0x0108, /* DTLB_LOAD_MISSES.ANY         */
464         },
465         [ C(OP_WRITE) ] = {
466                 [ C(RESULT_ACCESS) ] = 0x0f41, /* L1D_CACHE_ST.MESI   (alias)  */
467                 [ C(RESULT_MISS)   ] = 0x010c, /* MEM_STORE_RETIRED.DTLB_MISS  */
468         },
469         [ C(OP_PREFETCH) ] = {
470                 [ C(RESULT_ACCESS) ] = 0x0,
471                 [ C(RESULT_MISS)   ] = 0x0,
472         },
473  },
474  [ C(ITLB) ] = {
475         [ C(OP_READ) ] = {
476                 [ C(RESULT_ACCESS) ] = 0x01c0, /* INST_RETIRED.ANY_P           */
477                 [ C(RESULT_MISS)   ] = 0x20c8, /* ITLB_MISS_RETIRED            */
478         },
479         [ C(OP_WRITE) ] = {
480                 [ C(RESULT_ACCESS) ] = -1,
481                 [ C(RESULT_MISS)   ] = -1,
482         },
483         [ C(OP_PREFETCH) ] = {
484                 [ C(RESULT_ACCESS) ] = -1,
485                 [ C(RESULT_MISS)   ] = -1,
486         },
487  },
488  [ C(BPU ) ] = {
489         [ C(OP_READ) ] = {
490                 [ C(RESULT_ACCESS) ] = 0x00c4, /* BR_INST_RETIRED.ALL_BRANCHES */
491                 [ C(RESULT_MISS)   ] = 0x03e8, /* BPU_CLEARS.ANY               */
492         },
493         [ C(OP_WRITE) ] = {
494                 [ C(RESULT_ACCESS) ] = -1,
495                 [ C(RESULT_MISS)   ] = -1,
496         },
497         [ C(OP_PREFETCH) ] = {
498                 [ C(RESULT_ACCESS) ] = -1,
499                 [ C(RESULT_MISS)   ] = -1,
500         },
501  },
502 };
503
504 static __initconst u64 core2_hw_cache_event_ids
505                                 [PERF_COUNT_HW_CACHE_MAX]
506                                 [PERF_COUNT_HW_CACHE_OP_MAX]
507                                 [PERF_COUNT_HW_CACHE_RESULT_MAX] =
508 {
509  [ C(L1D) ] = {
510         [ C(OP_READ) ] = {
511                 [ C(RESULT_ACCESS) ] = 0x0f40, /* L1D_CACHE_LD.MESI          */
512                 [ C(RESULT_MISS)   ] = 0x0140, /* L1D_CACHE_LD.I_STATE       */
513         },
514         [ C(OP_WRITE) ] = {
515                 [ C(RESULT_ACCESS) ] = 0x0f41, /* L1D_CACHE_ST.MESI          */
516                 [ C(RESULT_MISS)   ] = 0x0141, /* L1D_CACHE_ST.I_STATE       */
517         },
518         [ C(OP_PREFETCH) ] = {
519                 [ C(RESULT_ACCESS) ] = 0x104e, /* L1D_PREFETCH.REQUESTS      */
520                 [ C(RESULT_MISS)   ] = 0,
521         },
522  },
523  [ C(L1I ) ] = {
524         [ C(OP_READ) ] = {
525                 [ C(RESULT_ACCESS) ] = 0x0080, /* L1I.READS                  */
526                 [ C(RESULT_MISS)   ] = 0x0081, /* L1I.MISSES                 */
527         },
528         [ C(OP_WRITE) ] = {
529                 [ C(RESULT_ACCESS) ] = -1,
530                 [ C(RESULT_MISS)   ] = -1,
531         },
532         [ C(OP_PREFETCH) ] = {
533                 [ C(RESULT_ACCESS) ] = 0,
534                 [ C(RESULT_MISS)   ] = 0,
535         },
536  },
537  [ C(LL  ) ] = {
538         [ C(OP_READ) ] = {
539                 [ C(RESULT_ACCESS) ] = 0x4f29, /* L2_LD.MESI                 */
540                 [ C(RESULT_MISS)   ] = 0x4129, /* L2_LD.ISTATE               */
541         },
542         [ C(OP_WRITE) ] = {
543                 [ C(RESULT_ACCESS) ] = 0x4f2A, /* L2_ST.MESI                 */
544                 [ C(RESULT_MISS)   ] = 0x412A, /* L2_ST.ISTATE               */
545         },
546         [ C(OP_PREFETCH) ] = {
547                 [ C(RESULT_ACCESS) ] = 0,
548                 [ C(RESULT_MISS)   ] = 0,
549         },
550  },
551  [ C(DTLB) ] = {
552         [ C(OP_READ) ] = {
553                 [ C(RESULT_ACCESS) ] = 0x0f40, /* L1D_CACHE_LD.MESI  (alias) */
554                 [ C(RESULT_MISS)   ] = 0x0208, /* DTLB_MISSES.MISS_LD        */
555         },
556         [ C(OP_WRITE) ] = {
557                 [ C(RESULT_ACCESS) ] = 0x0f41, /* L1D_CACHE_ST.MESI  (alias) */
558                 [ C(RESULT_MISS)   ] = 0x0808, /* DTLB_MISSES.MISS_ST        */
559         },
560         [ C(OP_PREFETCH) ] = {
561                 [ C(RESULT_ACCESS) ] = 0,
562                 [ C(RESULT_MISS)   ] = 0,
563         },
564  },
565  [ C(ITLB) ] = {
566         [ C(OP_READ) ] = {
567                 [ C(RESULT_ACCESS) ] = 0x00c0, /* INST_RETIRED.ANY_P         */
568                 [ C(RESULT_MISS)   ] = 0x1282, /* ITLBMISSES                 */
569         },
570         [ C(OP_WRITE) ] = {
571                 [ C(RESULT_ACCESS) ] = -1,
572                 [ C(RESULT_MISS)   ] = -1,
573         },
574         [ C(OP_PREFETCH) ] = {
575                 [ C(RESULT_ACCESS) ] = -1,
576                 [ C(RESULT_MISS)   ] = -1,
577         },
578  },
579  [ C(BPU ) ] = {
580         [ C(OP_READ) ] = {
581                 [ C(RESULT_ACCESS) ] = 0x00c4, /* BR_INST_RETIRED.ANY        */
582                 [ C(RESULT_MISS)   ] = 0x00c5, /* BP_INST_RETIRED.MISPRED    */
583         },
584         [ C(OP_WRITE) ] = {
585                 [ C(RESULT_ACCESS) ] = -1,
586                 [ C(RESULT_MISS)   ] = -1,
587         },
588         [ C(OP_PREFETCH) ] = {
589                 [ C(RESULT_ACCESS) ] = -1,
590                 [ C(RESULT_MISS)   ] = -1,
591         },
592  },
593 };
594
595 static __initconst u64 atom_hw_cache_event_ids
596                                 [PERF_COUNT_HW_CACHE_MAX]
597                                 [PERF_COUNT_HW_CACHE_OP_MAX]
598                                 [PERF_COUNT_HW_CACHE_RESULT_MAX] =
599 {
600  [ C(L1D) ] = {
601         [ C(OP_READ) ] = {
602                 [ C(RESULT_ACCESS) ] = 0x2140, /* L1D_CACHE.LD               */
603                 [ C(RESULT_MISS)   ] = 0,
604         },
605         [ C(OP_WRITE) ] = {
606                 [ C(RESULT_ACCESS) ] = 0x2240, /* L1D_CACHE.ST               */
607                 [ C(RESULT_MISS)   ] = 0,
608         },
609         [ C(OP_PREFETCH) ] = {
610                 [ C(RESULT_ACCESS) ] = 0x0,
611                 [ C(RESULT_MISS)   ] = 0,
612         },
613  },
614  [ C(L1I ) ] = {
615         [ C(OP_READ) ] = {
616                 [ C(RESULT_ACCESS) ] = 0x0380, /* L1I.READS                  */
617                 [ C(RESULT_MISS)   ] = 0x0280, /* L1I.MISSES                 */
618         },
619         [ C(OP_WRITE) ] = {
620                 [ C(RESULT_ACCESS) ] = -1,
621                 [ C(RESULT_MISS)   ] = -1,
622         },
623         [ C(OP_PREFETCH) ] = {
624                 [ C(RESULT_ACCESS) ] = 0,
625                 [ C(RESULT_MISS)   ] = 0,
626         },
627  },
628  [ C(LL  ) ] = {
629         [ C(OP_READ) ] = {
630                 [ C(RESULT_ACCESS) ] = 0x4f29, /* L2_LD.MESI                 */
631                 [ C(RESULT_MISS)   ] = 0x4129, /* L2_LD.ISTATE               */
632         },
633         [ C(OP_WRITE) ] = {
634                 [ C(RESULT_ACCESS) ] = 0x4f2A, /* L2_ST.MESI                 */
635                 [ C(RESULT_MISS)   ] = 0x412A, /* L2_ST.ISTATE               */
636         },
637         [ C(OP_PREFETCH) ] = {
638                 [ C(RESULT_ACCESS) ] = 0,
639                 [ C(RESULT_MISS)   ] = 0,
640         },
641  },
642  [ C(DTLB) ] = {
643         [ C(OP_READ) ] = {
644                 [ C(RESULT_ACCESS) ] = 0x2140, /* L1D_CACHE_LD.MESI  (alias) */
645                 [ C(RESULT_MISS)   ] = 0x0508, /* DTLB_MISSES.MISS_LD        */
646         },
647         [ C(OP_WRITE) ] = {
648                 [ C(RESULT_ACCESS) ] = 0x2240, /* L1D_CACHE_ST.MESI  (alias) */
649                 [ C(RESULT_MISS)   ] = 0x0608, /* DTLB_MISSES.MISS_ST        */
650         },
651         [ C(OP_PREFETCH) ] = {
652                 [ C(RESULT_ACCESS) ] = 0,
653                 [ C(RESULT_MISS)   ] = 0,
654         },
655  },
656  [ C(ITLB) ] = {
657         [ C(OP_READ) ] = {
658                 [ C(RESULT_ACCESS) ] = 0x00c0, /* INST_RETIRED.ANY_P         */
659                 [ C(RESULT_MISS)   ] = 0x0282, /* ITLB.MISSES                */
660         },
661         [ C(OP_WRITE) ] = {
662                 [ C(RESULT_ACCESS) ] = -1,
663                 [ C(RESULT_MISS)   ] = -1,
664         },
665         [ C(OP_PREFETCH) ] = {
666                 [ C(RESULT_ACCESS) ] = -1,
667                 [ C(RESULT_MISS)   ] = -1,
668         },
669  },
670  [ C(BPU ) ] = {
671         [ C(OP_READ) ] = {
672                 [ C(RESULT_ACCESS) ] = 0x00c4, /* BR_INST_RETIRED.ANY        */
673                 [ C(RESULT_MISS)   ] = 0x00c5, /* BP_INST_RETIRED.MISPRED    */
674         },
675         [ C(OP_WRITE) ] = {
676                 [ C(RESULT_ACCESS) ] = -1,
677                 [ C(RESULT_MISS)   ] = -1,
678         },
679         [ C(OP_PREFETCH) ] = {
680                 [ C(RESULT_ACCESS) ] = -1,
681                 [ C(RESULT_MISS)   ] = -1,
682         },
683  },
684 };
685
686 static u64 intel_pmu_raw_event(u64 hw_event)
687 {
688 #define CORE_EVNTSEL_EVENT_MASK         0x000000FFULL
689 #define CORE_EVNTSEL_UNIT_MASK          0x0000FF00ULL
690 #define CORE_EVNTSEL_EDGE_MASK          0x00040000ULL
691 #define CORE_EVNTSEL_INV_MASK           0x00800000ULL
692 #define CORE_EVNTSEL_REG_MASK           0xFF000000ULL
693
694 #define CORE_EVNTSEL_MASK               \
695         (INTEL_ARCH_EVTSEL_MASK |       \
696          INTEL_ARCH_UNIT_MASK   |       \
697          INTEL_ARCH_EDGE_MASK   |       \
698          INTEL_ARCH_INV_MASK    |       \
699          INTEL_ARCH_CNT_MASK)
700
701         return hw_event & CORE_EVNTSEL_MASK;
702 }
703
704 static __initconst u64 amd_hw_cache_event_ids
705                                 [PERF_COUNT_HW_CACHE_MAX]
706                                 [PERF_COUNT_HW_CACHE_OP_MAX]
707                                 [PERF_COUNT_HW_CACHE_RESULT_MAX] =
708 {
709  [ C(L1D) ] = {
710         [ C(OP_READ) ] = {
711                 [ C(RESULT_ACCESS) ] = 0x0040, /* Data Cache Accesses        */
712                 [ C(RESULT_MISS)   ] = 0x0041, /* Data Cache Misses          */
713         },
714         [ C(OP_WRITE) ] = {
715                 [ C(RESULT_ACCESS) ] = 0x0142, /* Data Cache Refills :system */
716                 [ C(RESULT_MISS)   ] = 0,
717         },
718         [ C(OP_PREFETCH) ] = {
719                 [ C(RESULT_ACCESS) ] = 0x0267, /* Data Prefetcher :attempts  */
720                 [ C(RESULT_MISS)   ] = 0x0167, /* Data Prefetcher :cancelled */
721         },
722  },
723  [ C(L1I ) ] = {
724         [ C(OP_READ) ] = {
725                 [ C(RESULT_ACCESS) ] = 0x0080, /* Instruction cache fetches  */
726                 [ C(RESULT_MISS)   ] = 0x0081, /* Instruction cache misses   */
727         },
728         [ C(OP_WRITE) ] = {
729                 [ C(RESULT_ACCESS) ] = -1,
730                 [ C(RESULT_MISS)   ] = -1,
731         },
732         [ C(OP_PREFETCH) ] = {
733                 [ C(RESULT_ACCESS) ] = 0x014B, /* Prefetch Instructions :Load */
734                 [ C(RESULT_MISS)   ] = 0,
735         },
736  },
737  [ C(LL  ) ] = {
738         [ C(OP_READ) ] = {
739                 [ C(RESULT_ACCESS) ] = 0x037D, /* Requests to L2 Cache :IC+DC */
740                 [ C(RESULT_MISS)   ] = 0x037E, /* L2 Cache Misses : IC+DC     */
741         },
742         [ C(OP_WRITE) ] = {
743                 [ C(RESULT_ACCESS) ] = 0x017F, /* L2 Fill/Writeback           */
744                 [ C(RESULT_MISS)   ] = 0,
745         },
746         [ C(OP_PREFETCH) ] = {
747                 [ C(RESULT_ACCESS) ] = 0,
748                 [ C(RESULT_MISS)   ] = 0,
749         },
750  },
751  [ C(DTLB) ] = {
752         [ C(OP_READ) ] = {
753                 [ C(RESULT_ACCESS) ] = 0x0040, /* Data Cache Accesses        */
754                 [ C(RESULT_MISS)   ] = 0x0046, /* L1 DTLB and L2 DLTB Miss   */
755         },
756         [ C(OP_WRITE) ] = {
757                 [ C(RESULT_ACCESS) ] = 0,
758                 [ C(RESULT_MISS)   ] = 0,
759         },
760         [ C(OP_PREFETCH) ] = {
761                 [ C(RESULT_ACCESS) ] = 0,
762                 [ C(RESULT_MISS)   ] = 0,
763         },
764  },
765  [ C(ITLB) ] = {
766         [ C(OP_READ) ] = {
767                 [ C(RESULT_ACCESS) ] = 0x0080, /* Instruction fecthes        */
768                 [ C(RESULT_MISS)   ] = 0x0085, /* Instr. fetch ITLB misses   */
769         },
770         [ C(OP_WRITE) ] = {
771                 [ C(RESULT_ACCESS) ] = -1,
772                 [ C(RESULT_MISS)   ] = -1,
773         },
774         [ C(OP_PREFETCH) ] = {
775                 [ C(RESULT_ACCESS) ] = -1,
776                 [ C(RESULT_MISS)   ] = -1,
777         },
778  },
779  [ C(BPU ) ] = {
780         [ C(OP_READ) ] = {
781                 [ C(RESULT_ACCESS) ] = 0x00c2, /* Retired Branch Instr.      */
782                 [ C(RESULT_MISS)   ] = 0x00c3, /* Retired Mispredicted BI    */
783         },
784         [ C(OP_WRITE) ] = {
785                 [ C(RESULT_ACCESS) ] = -1,
786                 [ C(RESULT_MISS)   ] = -1,
787         },
788         [ C(OP_PREFETCH) ] = {
789                 [ C(RESULT_ACCESS) ] = -1,
790                 [ C(RESULT_MISS)   ] = -1,
791         },
792  },
793 };
794
795 /*
796  * AMD Performance Monitor K7 and later.
797  */
798 static const u64 amd_perfmon_event_map[] =
799 {
800   [PERF_COUNT_HW_CPU_CYCLES]            = 0x0076,
801   [PERF_COUNT_HW_INSTRUCTIONS]          = 0x00c0,
802   [PERF_COUNT_HW_CACHE_REFERENCES]      = 0x0080,
803   [PERF_COUNT_HW_CACHE_MISSES]          = 0x0081,
804   [PERF_COUNT_HW_BRANCH_INSTRUCTIONS]   = 0x00c4,
805   [PERF_COUNT_HW_BRANCH_MISSES]         = 0x00c5,
806 };
807
808 static u64 amd_pmu_event_map(int hw_event)
809 {
810         return amd_perfmon_event_map[hw_event];
811 }
812
813 static u64 amd_pmu_raw_event(u64 hw_event)
814 {
815 #define K7_EVNTSEL_EVENT_MASK   0xF000000FFULL
816 #define K7_EVNTSEL_UNIT_MASK    0x00000FF00ULL
817 #define K7_EVNTSEL_EDGE_MASK    0x000040000ULL
818 #define K7_EVNTSEL_INV_MASK     0x000800000ULL
819 #define K7_EVNTSEL_REG_MASK     0x0FF000000ULL
820
821 #define K7_EVNTSEL_MASK                 \
822         (K7_EVNTSEL_EVENT_MASK |        \
823          K7_EVNTSEL_UNIT_MASK  |        \
824          K7_EVNTSEL_EDGE_MASK  |        \
825          K7_EVNTSEL_INV_MASK   |        \
826          K7_EVNTSEL_REG_MASK)
827
828         return hw_event & K7_EVNTSEL_MASK;
829 }
830
831 /*
832  * Propagate event elapsed time into the generic event.
833  * Can only be executed on the CPU where the event is active.
834  * Returns the delta events processed.
835  */
836 static u64
837 x86_perf_event_update(struct perf_event *event,
838                         struct hw_perf_event *hwc, int idx)
839 {
840         int shift = 64 - x86_pmu.event_bits;
841         u64 prev_raw_count, new_raw_count;
842         s64 delta;
843
844         if (idx == X86_PMC_IDX_FIXED_BTS)
845                 return 0;
846
847         /*
848          * Careful: an NMI might modify the previous event value.
849          *
850          * Our tactic to handle this is to first atomically read and
851          * exchange a new raw count - then add that new-prev delta
852          * count to the generic event atomically:
853          */
854 again:
855         prev_raw_count = atomic64_read(&hwc->prev_count);
856         rdmsrl(hwc->event_base + idx, new_raw_count);
857
858         if (atomic64_cmpxchg(&hwc->prev_count, prev_raw_count,
859                                         new_raw_count) != prev_raw_count)
860                 goto again;
861
862         /*
863          * Now we have the new raw value and have updated the prev
864          * timestamp already. We can now calculate the elapsed delta
865          * (event-)time and add that to the generic event.
866          *
867          * Careful, not all hw sign-extends above the physical width
868          * of the count.
869          */
870         delta = (new_raw_count << shift) - (prev_raw_count << shift);
871         delta >>= shift;
872
873         atomic64_add(delta, &event->count);
874         atomic64_sub(delta, &hwc->period_left);
875
876         return new_raw_count;
877 }
878
879 static atomic_t active_events;
880 static DEFINE_MUTEX(pmc_reserve_mutex);
881
882 static bool reserve_pmc_hardware(void)
883 {
884 #ifdef CONFIG_X86_LOCAL_APIC
885         int i;
886
887         if (nmi_watchdog == NMI_LOCAL_APIC)
888                 disable_lapic_nmi_watchdog();
889
890         for (i = 0; i < x86_pmu.num_events; i++) {
891                 if (!reserve_perfctr_nmi(x86_pmu.perfctr + i))
892                         goto perfctr_fail;
893         }
894
895         for (i = 0; i < x86_pmu.num_events; i++) {
896                 if (!reserve_evntsel_nmi(x86_pmu.eventsel + i))
897                         goto eventsel_fail;
898         }
899 #endif
900
901         return true;
902
903 #ifdef CONFIG_X86_LOCAL_APIC
904 eventsel_fail:
905         for (i--; i >= 0; i--)
906                 release_evntsel_nmi(x86_pmu.eventsel + i);
907
908         i = x86_pmu.num_events;
909
910 perfctr_fail:
911         for (i--; i >= 0; i--)
912                 release_perfctr_nmi(x86_pmu.perfctr + i);
913
914         if (nmi_watchdog == NMI_LOCAL_APIC)
915                 enable_lapic_nmi_watchdog();
916
917         return false;
918 #endif
919 }
920
921 static void release_pmc_hardware(void)
922 {
923 #ifdef CONFIG_X86_LOCAL_APIC
924         int i;
925
926         for (i = 0; i < x86_pmu.num_events; i++) {
927                 release_perfctr_nmi(x86_pmu.perfctr + i);
928                 release_evntsel_nmi(x86_pmu.eventsel + i);
929         }
930
931         if (nmi_watchdog == NMI_LOCAL_APIC)
932                 enable_lapic_nmi_watchdog();
933 #endif
934 }
935
936 static inline bool bts_available(void)
937 {
938         return x86_pmu.enable_bts != NULL;
939 }
940
941 static inline void init_debug_store_on_cpu(int cpu)
942 {
943         struct debug_store *ds = per_cpu(cpu_hw_events, cpu).ds;
944
945         if (!ds)
946                 return;
947
948         wrmsr_on_cpu(cpu, MSR_IA32_DS_AREA,
949                      (u32)((u64)(unsigned long)ds),
950                      (u32)((u64)(unsigned long)ds >> 32));
951 }
952
953 static inline void fini_debug_store_on_cpu(int cpu)
954 {
955         if (!per_cpu(cpu_hw_events, cpu).ds)
956                 return;
957
958         wrmsr_on_cpu(cpu, MSR_IA32_DS_AREA, 0, 0);
959 }
960
961 static void release_bts_hardware(void)
962 {
963         int cpu;
964
965         if (!bts_available())
966                 return;
967
968         get_online_cpus();
969
970         for_each_online_cpu(cpu)
971                 fini_debug_store_on_cpu(cpu);
972
973         for_each_possible_cpu(cpu) {
974                 struct debug_store *ds = per_cpu(cpu_hw_events, cpu).ds;
975
976                 if (!ds)
977                         continue;
978
979                 per_cpu(cpu_hw_events, cpu).ds = NULL;
980
981                 kfree((void *)(unsigned long)ds->bts_buffer_base);
982                 kfree(ds);
983         }
984
985         put_online_cpus();
986 }
987
988 static int reserve_bts_hardware(void)
989 {
990         int cpu, err = 0;
991
992         if (!bts_available())
993                 return 0;
994
995         get_online_cpus();
996
997         for_each_possible_cpu(cpu) {
998                 struct debug_store *ds;
999                 void *buffer;
1000
1001                 err = -ENOMEM;
1002                 buffer = kzalloc(BTS_BUFFER_SIZE, GFP_KERNEL);
1003                 if (unlikely(!buffer))
1004                         break;
1005
1006                 ds = kzalloc(sizeof(*ds), GFP_KERNEL);
1007                 if (unlikely(!ds)) {
1008                         kfree(buffer);
1009                         break;
1010                 }
1011
1012                 ds->bts_buffer_base = (u64)(unsigned long)buffer;
1013                 ds->bts_index = ds->bts_buffer_base;
1014                 ds->bts_absolute_maximum =
1015                         ds->bts_buffer_base + BTS_BUFFER_SIZE;
1016                 ds->bts_interrupt_threshold =
1017                         ds->bts_absolute_maximum - BTS_OVFL_TH;
1018
1019                 per_cpu(cpu_hw_events, cpu).ds = ds;
1020                 err = 0;
1021         }
1022
1023         if (err)
1024                 release_bts_hardware();
1025         else {
1026                 for_each_online_cpu(cpu)
1027                         init_debug_store_on_cpu(cpu);
1028         }
1029
1030         put_online_cpus();
1031
1032         return err;
1033 }
1034
1035 static void hw_perf_event_destroy(struct perf_event *event)
1036 {
1037         if (atomic_dec_and_mutex_lock(&active_events, &pmc_reserve_mutex)) {
1038                 release_pmc_hardware();
1039                 release_bts_hardware();
1040                 mutex_unlock(&pmc_reserve_mutex);
1041         }
1042 }
1043
1044 static inline int x86_pmu_initialized(void)
1045 {
1046         return x86_pmu.handle_irq != NULL;
1047 }
1048
1049 static inline int
1050 set_ext_hw_attr(struct hw_perf_event *hwc, struct perf_event_attr *attr)
1051 {
1052         unsigned int cache_type, cache_op, cache_result;
1053         u64 config, val;
1054
1055         config = attr->config;
1056
1057         cache_type = (config >>  0) & 0xff;
1058         if (cache_type >= PERF_COUNT_HW_CACHE_MAX)
1059                 return -EINVAL;
1060
1061         cache_op = (config >>  8) & 0xff;
1062         if (cache_op >= PERF_COUNT_HW_CACHE_OP_MAX)
1063                 return -EINVAL;
1064
1065         cache_result = (config >> 16) & 0xff;
1066         if (cache_result >= PERF_COUNT_HW_CACHE_RESULT_MAX)
1067                 return -EINVAL;
1068
1069         val = hw_cache_event_ids[cache_type][cache_op][cache_result];
1070
1071         if (val == 0)
1072                 return -ENOENT;
1073
1074         if (val == -1)
1075                 return -EINVAL;
1076
1077         hwc->config |= val;
1078
1079         return 0;
1080 }
1081
1082 static void intel_pmu_enable_bts(u64 config)
1083 {
1084         unsigned long debugctlmsr;
1085
1086         debugctlmsr = get_debugctlmsr();
1087
1088         debugctlmsr |= X86_DEBUGCTL_TR;
1089         debugctlmsr |= X86_DEBUGCTL_BTS;
1090         debugctlmsr |= X86_DEBUGCTL_BTINT;
1091
1092         if (!(config & ARCH_PERFMON_EVENTSEL_OS))
1093                 debugctlmsr |= X86_DEBUGCTL_BTS_OFF_OS;
1094
1095         if (!(config & ARCH_PERFMON_EVENTSEL_USR))
1096                 debugctlmsr |= X86_DEBUGCTL_BTS_OFF_USR;
1097
1098         update_debugctlmsr(debugctlmsr);
1099 }
1100
1101 static void intel_pmu_disable_bts(void)
1102 {
1103         struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events);
1104         unsigned long debugctlmsr;
1105
1106         if (!cpuc->ds)
1107                 return;
1108
1109         debugctlmsr = get_debugctlmsr();
1110
1111         debugctlmsr &=
1112                 ~(X86_DEBUGCTL_TR | X86_DEBUGCTL_BTS | X86_DEBUGCTL_BTINT |
1113                   X86_DEBUGCTL_BTS_OFF_OS | X86_DEBUGCTL_BTS_OFF_USR);
1114
1115         update_debugctlmsr(debugctlmsr);
1116 }
1117
1118 /*
1119  * Setup the hardware configuration for a given attr_type
1120  */
1121 static int __hw_perf_event_init(struct perf_event *event)
1122 {
1123         struct perf_event_attr *attr = &event->attr;
1124         struct hw_perf_event *hwc = &event->hw;
1125         u64 config;
1126         int err;
1127
1128         if (!x86_pmu_initialized())
1129                 return -ENODEV;
1130
1131         err = 0;
1132         if (!atomic_inc_not_zero(&active_events)) {
1133                 mutex_lock(&pmc_reserve_mutex);
1134                 if (atomic_read(&active_events) == 0) {
1135                         if (!reserve_pmc_hardware())
1136                                 err = -EBUSY;
1137                         else
1138                                 err = reserve_bts_hardware();
1139                 }
1140                 if (!err)
1141                         atomic_inc(&active_events);
1142                 mutex_unlock(&pmc_reserve_mutex);
1143         }
1144         if (err)
1145                 return err;
1146
1147         event->destroy = hw_perf_event_destroy;
1148
1149         /*
1150          * Generate PMC IRQs:
1151          * (keep 'enabled' bit clear for now)
1152          */
1153         hwc->config = ARCH_PERFMON_EVENTSEL_INT;
1154
1155         hwc->idx = -1;
1156         hwc->last_cpu = -1;
1157         hwc->last_tag = ~0ULL;
1158
1159         /*
1160          * Count user and OS events unless requested not to.
1161          */
1162         if (!attr->exclude_user)
1163                 hwc->config |= ARCH_PERFMON_EVENTSEL_USR;
1164         if (!attr->exclude_kernel)
1165                 hwc->config |= ARCH_PERFMON_EVENTSEL_OS;
1166
1167         if (!hwc->sample_period) {
1168                 hwc->sample_period = x86_pmu.max_period;
1169                 hwc->last_period = hwc->sample_period;
1170                 atomic64_set(&hwc->period_left, hwc->sample_period);
1171         } else {
1172                 /*
1173                  * If we have a PMU initialized but no APIC
1174                  * interrupts, we cannot sample hardware
1175                  * events (user-space has to fall back and
1176                  * sample via a hrtimer based software event):
1177                  */
1178                 if (!x86_pmu.apic)
1179                         return -EOPNOTSUPP;
1180         }
1181
1182         /*
1183          * Raw hw_event type provide the config in the hw_event structure
1184          */
1185         if (attr->type == PERF_TYPE_RAW) {
1186                 hwc->config |= x86_pmu.raw_event(attr->config);
1187                 return 0;
1188         }
1189
1190         if (attr->type == PERF_TYPE_HW_CACHE)
1191                 return set_ext_hw_attr(hwc, attr);
1192
1193         if (attr->config >= x86_pmu.max_events)
1194                 return -EINVAL;
1195
1196         /*
1197          * The generic map:
1198          */
1199         config = x86_pmu.event_map(attr->config);
1200
1201         if (config == 0)
1202                 return -ENOENT;
1203
1204         if (config == -1LL)
1205                 return -EINVAL;
1206
1207         /*
1208          * Branch tracing:
1209          */
1210         if ((attr->config == PERF_COUNT_HW_BRANCH_INSTRUCTIONS) &&
1211             (hwc->sample_period == 1)) {
1212                 /* BTS is not supported by this architecture. */
1213                 if (!bts_available())
1214                         return -EOPNOTSUPP;
1215
1216                 /* BTS is currently only allowed for user-mode. */
1217                 if (hwc->config & ARCH_PERFMON_EVENTSEL_OS)
1218                         return -EOPNOTSUPP;
1219         }
1220
1221         hwc->config |= config;
1222
1223         return 0;
1224 }
1225
1226 static void p6_pmu_disable_all(void)
1227 {
1228         u64 val;
1229
1230         /* p6 only has one enable register */
1231         rdmsrl(MSR_P6_EVNTSEL0, val);
1232         val &= ~ARCH_PERFMON_EVENTSEL0_ENABLE;
1233         wrmsrl(MSR_P6_EVNTSEL0, val);
1234 }
1235
1236 static void intel_pmu_disable_all(void)
1237 {
1238         struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events);
1239
1240         wrmsrl(MSR_CORE_PERF_GLOBAL_CTRL, 0);
1241
1242         if (test_bit(X86_PMC_IDX_FIXED_BTS, cpuc->active_mask))
1243                 intel_pmu_disable_bts();
1244 }
1245
1246 static void x86_pmu_disable_all(void)
1247 {
1248         struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events);
1249         int idx;
1250
1251         for (idx = 0; idx < x86_pmu.num_events; idx++) {
1252                 u64 val;
1253
1254                 if (!test_bit(idx, cpuc->active_mask))
1255                         continue;
1256                 rdmsrl(x86_pmu.eventsel + idx, val);
1257                 if (!(val & ARCH_PERFMON_EVENTSEL0_ENABLE))
1258                         continue;
1259                 val &= ~ARCH_PERFMON_EVENTSEL0_ENABLE;
1260                 wrmsrl(x86_pmu.eventsel + idx, val);
1261         }
1262 }
1263
1264 void hw_perf_disable(void)
1265 {
1266         struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events);
1267
1268         if (!x86_pmu_initialized())
1269                 return;
1270
1271         if (!cpuc->enabled)
1272                 return;
1273
1274         cpuc->n_added = 0;
1275         cpuc->enabled = 0;
1276         barrier();
1277
1278         x86_pmu.disable_all();
1279 }
1280
1281 static void p6_pmu_enable_all(void)
1282 {
1283         unsigned long val;
1284
1285         /* p6 only has one enable register */
1286         rdmsrl(MSR_P6_EVNTSEL0, val);
1287         val |= ARCH_PERFMON_EVENTSEL0_ENABLE;
1288         wrmsrl(MSR_P6_EVNTSEL0, val);
1289 }
1290
1291 static void intel_pmu_enable_all(void)
1292 {
1293         struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events);
1294
1295         wrmsrl(MSR_CORE_PERF_GLOBAL_CTRL, x86_pmu.intel_ctrl);
1296
1297         if (test_bit(X86_PMC_IDX_FIXED_BTS, cpuc->active_mask)) {
1298                 struct perf_event *event =
1299                         cpuc->events[X86_PMC_IDX_FIXED_BTS];
1300
1301                 if (WARN_ON_ONCE(!event))
1302                         return;
1303
1304                 intel_pmu_enable_bts(event->hw.config);
1305         }
1306 }
1307
1308 static void x86_pmu_enable_all(void)
1309 {
1310         struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events);
1311         int idx;
1312
1313         for (idx = 0; idx < x86_pmu.num_events; idx++) {
1314                 struct perf_event *event = cpuc->events[idx];
1315                 u64 val;
1316
1317                 if (!test_bit(idx, cpuc->active_mask))
1318                         continue;
1319
1320                 val = event->hw.config;
1321                 val |= ARCH_PERFMON_EVENTSEL0_ENABLE;
1322                 wrmsrl(x86_pmu.eventsel + idx, val);
1323         }
1324 }
1325
1326 static const struct pmu pmu;
1327
1328 static inline int is_x86_event(struct perf_event *event)
1329 {
1330         return event->pmu == &pmu;
1331 }
1332
1333 static int x86_schedule_events(struct cpu_hw_events *cpuc, int n, int *assign)
1334 {
1335         struct event_constraint *c, *constraints[X86_PMC_IDX_MAX];
1336         unsigned long used_mask[BITS_TO_LONGS(X86_PMC_IDX_MAX)];
1337         int i, j, w, wmax, num = 0;
1338         struct hw_perf_event *hwc;
1339
1340         bitmap_zero(used_mask, X86_PMC_IDX_MAX);
1341
1342         for (i = 0; i < n; i++) {
1343                 constraints[i] =
1344                   x86_pmu.get_event_constraints(cpuc, cpuc->event_list[i]);
1345         }
1346
1347         /*
1348          * fastpath, try to reuse previous register
1349          */
1350         for (i = 0; i < n; i++) {
1351                 hwc = &cpuc->event_list[i]->hw;
1352                 c = constraints[i];
1353
1354                 /* never assigned */
1355                 if (hwc->idx == -1)
1356                         break;
1357
1358                 /* constraint still honored */
1359                 if (!test_bit(hwc->idx, c->idxmsk))
1360                         break;
1361
1362                 /* not already used */
1363                 if (test_bit(hwc->idx, used_mask))
1364                         break;
1365
1366                 set_bit(hwc->idx, used_mask);
1367                 if (assign)
1368                         assign[i] = hwc->idx;
1369         }
1370         if (i == n)
1371                 goto done;
1372
1373         /*
1374          * begin slow path
1375          */
1376
1377         bitmap_zero(used_mask, X86_PMC_IDX_MAX);
1378
1379         /*
1380          * weight = number of possible counters
1381          *
1382          * 1    = most constrained, only works on one counter
1383          * wmax = least constrained, works on any counter
1384          *
1385          * assign events to counters starting with most
1386          * constrained events.
1387          */
1388         wmax = x86_pmu.num_events;
1389
1390         /*
1391          * when fixed event counters are present,
1392          * wmax is incremented by 1 to account
1393          * for one more choice
1394          */
1395         if (x86_pmu.num_events_fixed)
1396                 wmax++;
1397
1398         for (w = 1, num = n; num && w <= wmax; w++) {
1399                 /* for each event */
1400                 for (i = 0; num && i < n; i++) {
1401                         c = constraints[i];
1402                         hwc = &cpuc->event_list[i]->hw;
1403
1404                         if (c->weight != w)
1405                                 continue;
1406
1407                         for_each_bit(j, c->idxmsk, X86_PMC_IDX_MAX) {
1408                                 if (!test_bit(j, used_mask))
1409                                         break;
1410                         }
1411
1412                         if (j == X86_PMC_IDX_MAX)
1413                                 break;
1414
1415                         set_bit(j, used_mask);
1416
1417                         if (assign)
1418                                 assign[i] = j;
1419                         num--;
1420                 }
1421         }
1422 done:
1423         /*
1424          * scheduling failed or is just a simulation,
1425          * free resources if necessary
1426          */
1427         if (!assign || num) {
1428                 for (i = 0; i < n; i++) {
1429                         if (x86_pmu.put_event_constraints)
1430                                 x86_pmu.put_event_constraints(cpuc, cpuc->event_list[i]);
1431                 }
1432         }
1433         return num ? -ENOSPC : 0;
1434 }
1435
1436 /*
1437  * dogrp: true if must collect siblings events (group)
1438  * returns total number of events and error code
1439  */
1440 static int collect_events(struct cpu_hw_events *cpuc, struct perf_event *leader, bool dogrp)
1441 {
1442         struct perf_event *event;
1443         int n, max_count;
1444
1445         max_count = x86_pmu.num_events + x86_pmu.num_events_fixed;
1446
1447         /* current number of events already accepted */
1448         n = cpuc->n_events;
1449
1450         if (is_x86_event(leader)) {
1451                 if (n >= max_count)
1452                         return -ENOSPC;
1453                 cpuc->event_list[n] = leader;
1454                 n++;
1455         }
1456         if (!dogrp)
1457                 return n;
1458
1459         list_for_each_entry(event, &leader->sibling_list, group_entry) {
1460                 if (!is_x86_event(event) ||
1461                     event->state <= PERF_EVENT_STATE_OFF)
1462                         continue;
1463
1464                 if (n >= max_count)
1465                         return -ENOSPC;
1466
1467                 cpuc->event_list[n] = event;
1468                 n++;
1469         }
1470         return n;
1471 }
1472
1473 static inline void x86_assign_hw_event(struct perf_event *event,
1474                                 struct cpu_hw_events *cpuc, int i)
1475 {
1476         struct hw_perf_event *hwc = &event->hw;
1477
1478         hwc->idx = cpuc->assign[i];
1479         hwc->last_cpu = smp_processor_id();
1480         hwc->last_tag = ++cpuc->tags[i];
1481
1482         if (hwc->idx == X86_PMC_IDX_FIXED_BTS) {
1483                 hwc->config_base = 0;
1484                 hwc->event_base = 0;
1485         } else if (hwc->idx >= X86_PMC_IDX_FIXED) {
1486                 hwc->config_base = MSR_ARCH_PERFMON_FIXED_CTR_CTRL;
1487                 /*
1488                  * We set it so that event_base + idx in wrmsr/rdmsr maps to
1489                  * MSR_ARCH_PERFMON_FIXED_CTR0 ... CTR2:
1490                  */
1491                 hwc->event_base =
1492                         MSR_ARCH_PERFMON_FIXED_CTR0 - X86_PMC_IDX_FIXED;
1493         } else {
1494                 hwc->config_base = x86_pmu.eventsel;
1495                 hwc->event_base  = x86_pmu.perfctr;
1496         }
1497 }
1498
1499 static inline int match_prev_assignment(struct hw_perf_event *hwc,
1500                                         struct cpu_hw_events *cpuc,
1501                                         int i)
1502 {
1503         return hwc->idx == cpuc->assign[i] &&
1504                 hwc->last_cpu == smp_processor_id() &&
1505                 hwc->last_tag == cpuc->tags[i];
1506 }
1507
1508 static void x86_pmu_stop(struct perf_event *event);
1509
1510 void hw_perf_enable(void)
1511 {
1512         struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events);
1513         struct perf_event *event;
1514         struct hw_perf_event *hwc;
1515         int i;
1516
1517         if (!x86_pmu_initialized())
1518                 return;
1519
1520         if (cpuc->enabled)
1521                 return;
1522
1523         if (cpuc->n_added) {
1524                 /*
1525                  * apply assignment obtained either from
1526                  * hw_perf_group_sched_in() or x86_pmu_enable()
1527                  *
1528                  * step1: save events moving to new counters
1529                  * step2: reprogram moved events into new counters
1530                  */
1531                 for (i = 0; i < cpuc->n_events; i++) {
1532
1533                         event = cpuc->event_list[i];
1534                         hwc = &event->hw;
1535
1536                         /*
1537                          * we can avoid reprogramming counter if:
1538                          * - assigned same counter as last time
1539                          * - running on same CPU as last time
1540                          * - no other event has used the counter since
1541                          */
1542                         if (hwc->idx == -1 ||
1543                             match_prev_assignment(hwc, cpuc, i))
1544                                 continue;
1545
1546                         x86_pmu_stop(event);
1547
1548                         hwc->idx = -1;
1549                 }
1550
1551                 for (i = 0; i < cpuc->n_events; i++) {
1552
1553                         event = cpuc->event_list[i];
1554                         hwc = &event->hw;
1555
1556                         if (hwc->idx == -1) {
1557                                 x86_assign_hw_event(event, cpuc, i);
1558                                 x86_perf_event_set_period(event, hwc, hwc->idx);
1559                         }
1560                         /*
1561                          * need to mark as active because x86_pmu_disable()
1562                          * clear active_mask and events[] yet it preserves
1563                          * idx
1564                          */
1565                         set_bit(hwc->idx, cpuc->active_mask);
1566                         cpuc->events[hwc->idx] = event;
1567
1568                         x86_pmu.enable(hwc, hwc->idx);
1569                         perf_event_update_userpage(event);
1570                 }
1571                 cpuc->n_added = 0;
1572                 perf_events_lapic_init();
1573         }
1574
1575         cpuc->enabled = 1;
1576         barrier();
1577
1578         x86_pmu.enable_all();
1579 }
1580
1581 static inline u64 intel_pmu_get_status(void)
1582 {
1583         u64 status;
1584
1585         rdmsrl(MSR_CORE_PERF_GLOBAL_STATUS, status);
1586
1587         return status;
1588 }
1589
1590 static inline void intel_pmu_ack_status(u64 ack)
1591 {
1592         wrmsrl(MSR_CORE_PERF_GLOBAL_OVF_CTRL, ack);
1593 }
1594
1595 static inline void __x86_pmu_enable_event(struct hw_perf_event *hwc, int idx)
1596 {
1597         (void)checking_wrmsrl(hwc->config_base + idx,
1598                               hwc->config | ARCH_PERFMON_EVENTSEL0_ENABLE);
1599 }
1600
1601 static inline void x86_pmu_disable_event(struct hw_perf_event *hwc, int idx)
1602 {
1603         (void)checking_wrmsrl(hwc->config_base + idx, hwc->config);
1604 }
1605
1606 static inline void
1607 intel_pmu_disable_fixed(struct hw_perf_event *hwc, int __idx)
1608 {
1609         int idx = __idx - X86_PMC_IDX_FIXED;
1610         u64 ctrl_val, mask;
1611
1612         mask = 0xfULL << (idx * 4);
1613
1614         rdmsrl(hwc->config_base, ctrl_val);
1615         ctrl_val &= ~mask;
1616         (void)checking_wrmsrl(hwc->config_base, ctrl_val);
1617 }
1618
1619 static inline void
1620 p6_pmu_disable_event(struct hw_perf_event *hwc, int idx)
1621 {
1622         struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events);
1623         u64 val = P6_NOP_EVENT;
1624
1625         if (cpuc->enabled)
1626                 val |= ARCH_PERFMON_EVENTSEL0_ENABLE;
1627
1628         (void)checking_wrmsrl(hwc->config_base + idx, val);
1629 }
1630
1631 static inline void
1632 intel_pmu_disable_event(struct hw_perf_event *hwc, int idx)
1633 {
1634         if (unlikely(idx == X86_PMC_IDX_FIXED_BTS)) {
1635                 intel_pmu_disable_bts();
1636                 return;
1637         }
1638
1639         if (unlikely(hwc->config_base == MSR_ARCH_PERFMON_FIXED_CTR_CTRL)) {
1640                 intel_pmu_disable_fixed(hwc, idx);
1641                 return;
1642         }
1643
1644         x86_pmu_disable_event(hwc, idx);
1645 }
1646
1647 static DEFINE_PER_CPU(u64 [X86_PMC_IDX_MAX], pmc_prev_left);
1648
1649 /*
1650  * Set the next IRQ period, based on the hwc->period_left value.
1651  * To be called with the event disabled in hw:
1652  */
1653 static int
1654 x86_perf_event_set_period(struct perf_event *event,
1655                              struct hw_perf_event *hwc, int idx)
1656 {
1657         s64 left = atomic64_read(&hwc->period_left);
1658         s64 period = hwc->sample_period;
1659         int err, ret = 0;
1660
1661         if (idx == X86_PMC_IDX_FIXED_BTS)
1662                 return 0;
1663
1664         /*
1665          * If we are way outside a reasonable range then just skip forward:
1666          */
1667         if (unlikely(left <= -period)) {
1668                 left = period;
1669                 atomic64_set(&hwc->period_left, left);
1670                 hwc->last_period = period;
1671                 ret = 1;
1672         }
1673
1674         if (unlikely(left <= 0)) {
1675                 left += period;
1676                 atomic64_set(&hwc->period_left, left);
1677                 hwc->last_period = period;
1678                 ret = 1;
1679         }
1680         /*
1681          * Quirk: certain CPUs dont like it if just 1 hw_event is left:
1682          */
1683         if (unlikely(left < 2))
1684                 left = 2;
1685
1686         if (left > x86_pmu.max_period)
1687                 left = x86_pmu.max_period;
1688
1689         per_cpu(pmc_prev_left[idx], smp_processor_id()) = left;
1690
1691         /*
1692          * The hw event starts counting from this event offset,
1693          * mark it to be able to extra future deltas:
1694          */
1695         atomic64_set(&hwc->prev_count, (u64)-left);
1696
1697         err = checking_wrmsrl(hwc->event_base + idx,
1698                              (u64)(-left) & x86_pmu.event_mask);
1699
1700         perf_event_update_userpage(event);
1701
1702         return ret;
1703 }
1704
1705 static inline void
1706 intel_pmu_enable_fixed(struct hw_perf_event *hwc, int __idx)
1707 {
1708         int idx = __idx - X86_PMC_IDX_FIXED;
1709         u64 ctrl_val, bits, mask;
1710         int err;
1711
1712         /*
1713          * Enable IRQ generation (0x8),
1714          * and enable ring-3 counting (0x2) and ring-0 counting (0x1)
1715          * if requested:
1716          */
1717         bits = 0x8ULL;
1718         if (hwc->config & ARCH_PERFMON_EVENTSEL_USR)
1719                 bits |= 0x2;
1720         if (hwc->config & ARCH_PERFMON_EVENTSEL_OS)
1721                 bits |= 0x1;
1722
1723         /*
1724          * ANY bit is supported in v3 and up
1725          */
1726         if (x86_pmu.version > 2 && hwc->config & ARCH_PERFMON_EVENTSEL_ANY)
1727                 bits |= 0x4;
1728
1729         bits <<= (idx * 4);
1730         mask = 0xfULL << (idx * 4);
1731
1732         rdmsrl(hwc->config_base, ctrl_val);
1733         ctrl_val &= ~mask;
1734         ctrl_val |= bits;
1735         err = checking_wrmsrl(hwc->config_base, ctrl_val);
1736 }
1737
1738 static void p6_pmu_enable_event(struct hw_perf_event *hwc, int idx)
1739 {
1740         struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events);
1741         u64 val;
1742
1743         val = hwc->config;
1744         if (cpuc->enabled)
1745                 val |= ARCH_PERFMON_EVENTSEL0_ENABLE;
1746
1747         (void)checking_wrmsrl(hwc->config_base + idx, val);
1748 }
1749
1750
1751 static void intel_pmu_enable_event(struct hw_perf_event *hwc, int idx)
1752 {
1753         if (unlikely(idx == X86_PMC_IDX_FIXED_BTS)) {
1754                 if (!__get_cpu_var(cpu_hw_events).enabled)
1755                         return;
1756
1757                 intel_pmu_enable_bts(hwc->config);
1758                 return;
1759         }
1760
1761         if (unlikely(hwc->config_base == MSR_ARCH_PERFMON_FIXED_CTR_CTRL)) {
1762                 intel_pmu_enable_fixed(hwc, idx);
1763                 return;
1764         }
1765
1766         __x86_pmu_enable_event(hwc, idx);
1767 }
1768
1769 static void x86_pmu_enable_event(struct hw_perf_event *hwc, int idx)
1770 {
1771         struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events);
1772         if (cpuc->enabled)
1773                 __x86_pmu_enable_event(hwc, idx);
1774 }
1775
1776 /*
1777  * activate a single event
1778  *
1779  * The event is added to the group of enabled events
1780  * but only if it can be scehduled with existing events.
1781  *
1782  * Called with PMU disabled. If successful and return value 1,
1783  * then guaranteed to call perf_enable() and hw_perf_enable()
1784  */
1785 static int x86_pmu_enable(struct perf_event *event)
1786 {
1787         struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events);
1788         struct hw_perf_event *hwc;
1789         int assign[X86_PMC_IDX_MAX];
1790         int n, n0, ret;
1791
1792         hwc = &event->hw;
1793
1794         n0 = cpuc->n_events;
1795         n = collect_events(cpuc, event, false);
1796         if (n < 0)
1797                 return n;
1798
1799         ret = x86_schedule_events(cpuc, n, assign);
1800         if (ret)
1801                 return ret;
1802         /*
1803          * copy new assignment, now we know it is possible
1804          * will be used by hw_perf_enable()
1805          */
1806         memcpy(cpuc->assign, assign, n*sizeof(int));
1807
1808         cpuc->n_events = n;
1809         cpuc->n_added  = n - n0;
1810
1811         return 0;
1812 }
1813
1814 static int x86_pmu_start(struct perf_event *event)
1815 {
1816         struct hw_perf_event *hwc = &event->hw;
1817
1818         if (hwc->idx == -1)
1819                 return -EAGAIN;
1820
1821         x86_perf_event_set_period(event, hwc, hwc->idx);
1822         x86_pmu.enable(hwc, hwc->idx);
1823
1824         return 0;
1825 }
1826
1827 static void x86_pmu_unthrottle(struct perf_event *event)
1828 {
1829         struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events);
1830         struct hw_perf_event *hwc = &event->hw;
1831
1832         if (WARN_ON_ONCE(hwc->idx >= X86_PMC_IDX_MAX ||
1833                                 cpuc->events[hwc->idx] != event))
1834                 return;
1835
1836         x86_pmu.enable(hwc, hwc->idx);
1837 }
1838
1839 void perf_event_print_debug(void)
1840 {
1841         u64 ctrl, status, overflow, pmc_ctrl, pmc_count, prev_left, fixed;
1842         struct cpu_hw_events *cpuc;
1843         unsigned long flags;
1844         int cpu, idx;
1845
1846         if (!x86_pmu.num_events)
1847                 return;
1848
1849         local_irq_save(flags);
1850
1851         cpu = smp_processor_id();
1852         cpuc = &per_cpu(cpu_hw_events, cpu);
1853
1854         if (x86_pmu.version >= 2) {
1855                 rdmsrl(MSR_CORE_PERF_GLOBAL_CTRL, ctrl);
1856                 rdmsrl(MSR_CORE_PERF_GLOBAL_STATUS, status);
1857                 rdmsrl(MSR_CORE_PERF_GLOBAL_OVF_CTRL, overflow);
1858                 rdmsrl(MSR_ARCH_PERFMON_FIXED_CTR_CTRL, fixed);
1859
1860                 pr_info("\n");
1861                 pr_info("CPU#%d: ctrl:       %016llx\n", cpu, ctrl);
1862                 pr_info("CPU#%d: status:     %016llx\n", cpu, status);
1863                 pr_info("CPU#%d: overflow:   %016llx\n", cpu, overflow);
1864                 pr_info("CPU#%d: fixed:      %016llx\n", cpu, fixed);
1865         }
1866         pr_info("CPU#%d: active:       %016llx\n", cpu, *(u64 *)cpuc->active_mask);
1867
1868         for (idx = 0; idx < x86_pmu.num_events; idx++) {
1869                 rdmsrl(x86_pmu.eventsel + idx, pmc_ctrl);
1870                 rdmsrl(x86_pmu.perfctr  + idx, pmc_count);
1871
1872                 prev_left = per_cpu(pmc_prev_left[idx], cpu);
1873
1874                 pr_info("CPU#%d:   gen-PMC%d ctrl:  %016llx\n",
1875                         cpu, idx, pmc_ctrl);
1876                 pr_info("CPU#%d:   gen-PMC%d count: %016llx\n",
1877                         cpu, idx, pmc_count);
1878                 pr_info("CPU#%d:   gen-PMC%d left:  %016llx\n",
1879                         cpu, idx, prev_left);
1880         }
1881         for (idx = 0; idx < x86_pmu.num_events_fixed; idx++) {
1882                 rdmsrl(MSR_ARCH_PERFMON_FIXED_CTR0 + idx, pmc_count);
1883
1884                 pr_info("CPU#%d: fixed-PMC%d count: %016llx\n",
1885                         cpu, idx, pmc_count);
1886         }
1887         local_irq_restore(flags);
1888 }
1889
1890 static void intel_pmu_drain_bts_buffer(struct cpu_hw_events *cpuc)
1891 {
1892         struct debug_store *ds = cpuc->ds;
1893         struct bts_record {
1894                 u64     from;
1895                 u64     to;
1896                 u64     flags;
1897         };
1898         struct perf_event *event = cpuc->events[X86_PMC_IDX_FIXED_BTS];
1899         struct bts_record *at, *top;
1900         struct perf_output_handle handle;
1901         struct perf_event_header header;
1902         struct perf_sample_data data;
1903         struct pt_regs regs;
1904
1905         if (!event)
1906                 return;
1907
1908         if (!ds)
1909                 return;
1910
1911         at  = (struct bts_record *)(unsigned long)ds->bts_buffer_base;
1912         top = (struct bts_record *)(unsigned long)ds->bts_index;
1913
1914         if (top <= at)
1915                 return;
1916
1917         ds->bts_index = ds->bts_buffer_base;
1918
1919
1920         data.period     = event->hw.last_period;
1921         data.addr       = 0;
1922         data.raw        = NULL;
1923         regs.ip         = 0;
1924
1925         /*
1926          * Prepare a generic sample, i.e. fill in the invariant fields.
1927          * We will overwrite the from and to address before we output
1928          * the sample.
1929          */
1930         perf_prepare_sample(&header, &data, event, &regs);
1931
1932         if (perf_output_begin(&handle, event,
1933                               header.size * (top - at), 1, 1))
1934                 return;
1935
1936         for (; at < top; at++) {
1937                 data.ip         = at->from;
1938                 data.addr       = at->to;
1939
1940                 perf_output_sample(&handle, &header, &data, event);
1941         }
1942
1943         perf_output_end(&handle);
1944
1945         /* There's new data available. */
1946         event->hw.interrupts++;
1947         event->pending_kill = POLL_IN;
1948 }
1949
1950 static void x86_pmu_stop(struct perf_event *event)
1951 {
1952         struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events);
1953         struct hw_perf_event *hwc = &event->hw;
1954         int idx = hwc->idx;
1955
1956         /*
1957          * Must be done before we disable, otherwise the nmi handler
1958          * could reenable again:
1959          */
1960         clear_bit(idx, cpuc->active_mask);
1961         x86_pmu.disable(hwc, idx);
1962
1963         /*
1964          * Drain the remaining delta count out of a event
1965          * that we are disabling:
1966          */
1967         x86_perf_event_update(event, hwc, idx);
1968
1969         /* Drain the remaining BTS records. */
1970         if (unlikely(idx == X86_PMC_IDX_FIXED_BTS))
1971                 intel_pmu_drain_bts_buffer(cpuc);
1972
1973         cpuc->events[idx] = NULL;
1974 }
1975
1976 static void x86_pmu_disable(struct perf_event *event)
1977 {
1978         struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events);
1979         int i;
1980
1981         x86_pmu_stop(event);
1982
1983         for (i = 0; i < cpuc->n_events; i++) {
1984                 if (event == cpuc->event_list[i]) {
1985
1986                         if (x86_pmu.put_event_constraints)
1987                                 x86_pmu.put_event_constraints(cpuc, event);
1988
1989                         while (++i < cpuc->n_events)
1990                                 cpuc->event_list[i-1] = cpuc->event_list[i];
1991
1992                         --cpuc->n_events;
1993                         break;
1994                 }
1995         }
1996         perf_event_update_userpage(event);
1997 }
1998
1999 /*
2000  * Save and restart an expired event. Called by NMI contexts,
2001  * so it has to be careful about preempting normal event ops:
2002  */
2003 static int intel_pmu_save_and_restart(struct perf_event *event)
2004 {
2005         struct hw_perf_event *hwc = &event->hw;
2006         int idx = hwc->idx;
2007         int ret;
2008
2009         x86_perf_event_update(event, hwc, idx);
2010         ret = x86_perf_event_set_period(event, hwc, idx);
2011
2012         if (event->state == PERF_EVENT_STATE_ACTIVE)
2013                 intel_pmu_enable_event(hwc, idx);
2014
2015         return ret;
2016 }
2017
2018 static void intel_pmu_reset(void)
2019 {
2020         struct debug_store *ds = __get_cpu_var(cpu_hw_events).ds;
2021         unsigned long flags;
2022         int idx;
2023
2024         if (!x86_pmu.num_events)
2025                 return;
2026
2027         local_irq_save(flags);
2028
2029         printk("clearing PMU state on CPU#%d\n", smp_processor_id());
2030
2031         for (idx = 0; idx < x86_pmu.num_events; idx++) {
2032                 checking_wrmsrl(x86_pmu.eventsel + idx, 0ull);
2033                 checking_wrmsrl(x86_pmu.perfctr  + idx, 0ull);
2034         }
2035         for (idx = 0; idx < x86_pmu.num_events_fixed; idx++) {
2036                 checking_wrmsrl(MSR_ARCH_PERFMON_FIXED_CTR0 + idx, 0ull);
2037         }
2038         if (ds)
2039                 ds->bts_index = ds->bts_buffer_base;
2040
2041         local_irq_restore(flags);
2042 }
2043
2044 /*
2045  * This handler is triggered by the local APIC, so the APIC IRQ handling
2046  * rules apply:
2047  */
2048 static int intel_pmu_handle_irq(struct pt_regs *regs)
2049 {
2050         struct perf_sample_data data;
2051         struct cpu_hw_events *cpuc;
2052         int bit, loops;
2053         u64 ack, status;
2054
2055         data.addr = 0;
2056         data.raw = NULL;
2057
2058         cpuc = &__get_cpu_var(cpu_hw_events);
2059
2060         perf_disable();
2061         intel_pmu_drain_bts_buffer(cpuc);
2062         status = intel_pmu_get_status();
2063         if (!status) {
2064                 perf_enable();
2065                 return 0;
2066         }
2067
2068         loops = 0;
2069 again:
2070         if (++loops > 100) {
2071                 WARN_ONCE(1, "perfevents: irq loop stuck!\n");
2072                 perf_event_print_debug();
2073                 intel_pmu_reset();
2074                 perf_enable();
2075                 return 1;
2076         }
2077
2078         inc_irq_stat(apic_perf_irqs);
2079         ack = status;
2080         for_each_bit(bit, (unsigned long *)&status, X86_PMC_IDX_MAX) {
2081                 struct perf_event *event = cpuc->events[bit];
2082
2083                 clear_bit(bit, (unsigned long *) &status);
2084                 if (!test_bit(bit, cpuc->active_mask))
2085                         continue;
2086
2087                 if (!intel_pmu_save_and_restart(event))
2088                         continue;
2089
2090                 data.period = event->hw.last_period;
2091
2092                 if (perf_event_overflow(event, 1, &data, regs))
2093                         intel_pmu_disable_event(&event->hw, bit);
2094         }
2095
2096         intel_pmu_ack_status(ack);
2097
2098         /*
2099          * Repeat if there is more work to be done:
2100          */
2101         status = intel_pmu_get_status();
2102         if (status)
2103                 goto again;
2104
2105         perf_enable();
2106
2107         return 1;
2108 }
2109
2110 static int x86_pmu_handle_irq(struct pt_regs *regs)
2111 {
2112         struct perf_sample_data data;
2113         struct cpu_hw_events *cpuc;
2114         struct perf_event *event;
2115         struct hw_perf_event *hwc;
2116         int idx, handled = 0;
2117         u64 val;
2118
2119         data.addr = 0;
2120         data.raw = NULL;
2121
2122         cpuc = &__get_cpu_var(cpu_hw_events);
2123
2124         for (idx = 0; idx < x86_pmu.num_events; idx++) {
2125                 if (!test_bit(idx, cpuc->active_mask))
2126                         continue;
2127
2128                 event = cpuc->events[idx];
2129                 hwc = &event->hw;
2130
2131                 val = x86_perf_event_update(event, hwc, idx);
2132                 if (val & (1ULL << (x86_pmu.event_bits - 1)))
2133                         continue;
2134
2135                 /*
2136                  * event overflow
2137                  */
2138                 handled         = 1;
2139                 data.period     = event->hw.last_period;
2140
2141                 if (!x86_perf_event_set_period(event, hwc, idx))
2142                         continue;
2143
2144                 if (perf_event_overflow(event, 1, &data, regs))
2145                         x86_pmu.disable(hwc, idx);
2146         }
2147
2148         if (handled)
2149                 inc_irq_stat(apic_perf_irqs);
2150
2151         return handled;
2152 }
2153
2154 void smp_perf_pending_interrupt(struct pt_regs *regs)
2155 {
2156         irq_enter();
2157         ack_APIC_irq();
2158         inc_irq_stat(apic_pending_irqs);
2159         perf_event_do_pending();
2160         irq_exit();
2161 }
2162
2163 void set_perf_event_pending(void)
2164 {
2165 #ifdef CONFIG_X86_LOCAL_APIC
2166         if (!x86_pmu.apic || !x86_pmu_initialized())
2167                 return;
2168
2169         apic->send_IPI_self(LOCAL_PENDING_VECTOR);
2170 #endif
2171 }
2172
2173 void perf_events_lapic_init(void)
2174 {
2175 #ifdef CONFIG_X86_LOCAL_APIC
2176         if (!x86_pmu.apic || !x86_pmu_initialized())
2177                 return;
2178
2179         /*
2180          * Always use NMI for PMU
2181          */
2182         apic_write(APIC_LVTPC, APIC_DM_NMI);
2183 #endif
2184 }
2185
2186 static int __kprobes
2187 perf_event_nmi_handler(struct notifier_block *self,
2188                          unsigned long cmd, void *__args)
2189 {
2190         struct die_args *args = __args;
2191         struct pt_regs *regs;
2192
2193         if (!atomic_read(&active_events))
2194                 return NOTIFY_DONE;
2195
2196         switch (cmd) {
2197         case DIE_NMI:
2198         case DIE_NMI_IPI:
2199                 break;
2200
2201         default:
2202                 return NOTIFY_DONE;
2203         }
2204
2205         regs = args->regs;
2206
2207 #ifdef CONFIG_X86_LOCAL_APIC
2208         apic_write(APIC_LVTPC, APIC_DM_NMI);
2209 #endif
2210         /*
2211          * Can't rely on the handled return value to say it was our NMI, two
2212          * events could trigger 'simultaneously' raising two back-to-back NMIs.
2213          *
2214          * If the first NMI handles both, the latter will be empty and daze
2215          * the CPU.
2216          */
2217         x86_pmu.handle_irq(regs);
2218
2219         return NOTIFY_STOP;
2220 }
2221
2222 static struct event_constraint unconstrained;
2223 static struct event_constraint emptyconstraint;
2224
2225 static struct event_constraint bts_constraint =
2226         EVENT_CONSTRAINT(0, 1ULL << X86_PMC_IDX_FIXED_BTS, 0);
2227
2228 static struct event_constraint *
2229 intel_special_constraints(struct perf_event *event)
2230 {
2231         unsigned int hw_event;
2232
2233         hw_event = event->hw.config & INTEL_ARCH_EVENT_MASK;
2234
2235         if (unlikely((hw_event ==
2236                       x86_pmu.event_map(PERF_COUNT_HW_BRANCH_INSTRUCTIONS)) &&
2237                      (event->hw.sample_period == 1))) {
2238
2239                 return &bts_constraint;
2240         }
2241         return NULL;
2242 }
2243
2244 static struct event_constraint *
2245 intel_get_event_constraints(struct cpu_hw_events *cpuc, struct perf_event *event)
2246 {
2247         struct event_constraint *c;
2248
2249         c = intel_special_constraints(event);
2250         if (c)
2251                 return c;
2252
2253         if (x86_pmu.event_constraints) {
2254                 for_each_event_constraint(c, x86_pmu.event_constraints) {
2255                         if ((event->hw.config & c->cmask) == c->code)
2256                                 return c;
2257                 }
2258         }
2259
2260         return &unconstrained;
2261 }
2262
2263 /*
2264  * AMD64 events are detected based on their event codes.
2265  */
2266 static inline int amd_is_nb_event(struct hw_perf_event *hwc)
2267 {
2268         return (hwc->config & 0xe0) == 0xe0;
2269 }
2270
2271 static void amd_put_event_constraints(struct cpu_hw_events *cpuc,
2272                                       struct perf_event *event)
2273 {
2274         struct hw_perf_event *hwc = &event->hw;
2275         struct amd_nb *nb = cpuc->amd_nb;
2276         int i;
2277
2278         /*
2279          * only care about NB events
2280          */
2281         if (!(nb && amd_is_nb_event(hwc)))
2282                 return;
2283
2284         /*
2285          * need to scan whole list because event may not have
2286          * been assigned during scheduling
2287          *
2288          * no race condition possible because event can only
2289          * be removed on one CPU at a time AND PMU is disabled
2290          * when we come here
2291          */
2292         for (i = 0; i < x86_pmu.num_events; i++) {
2293                 if (nb->owners[i] == event) {
2294                         cmpxchg(nb->owners+i, event, NULL);
2295                         break;
2296                 }
2297         }
2298 }
2299
2300  /*
2301   * AMD64 NorthBridge events need special treatment because
2302   * counter access needs to be synchronized across all cores
2303   * of a package. Refer to BKDG section 3.12
2304   *
2305   * NB events are events measuring L3 cache, Hypertransport
2306   * traffic. They are identified by an event code >= 0xe00.
2307   * They measure events on the NorthBride which is shared
2308   * by all cores on a package. NB events are counted on a
2309   * shared set of counters. When a NB event is programmed
2310   * in a counter, the data actually comes from a shared
2311   * counter. Thus, access to those counters needs to be
2312   * synchronized.
2313   *
2314   * We implement the synchronization such that no two cores
2315   * can be measuring NB events using the same counters. Thus,
2316   * we maintain a per-NB allocation table. The available slot
2317   * is propagated using the event_constraint structure.
2318   *
2319   * We provide only one choice for each NB event based on
2320   * the fact that only NB events have restrictions. Consequently,
2321   * if a counter is available, there is a guarantee the NB event
2322   * will be assigned to it. If no slot is available, an empty
2323   * constraint is returned and scheduling will eventually fail
2324   * for this event.
2325   *
2326   * Note that all cores attached the same NB compete for the same
2327   * counters to host NB events, this is why we use atomic ops. Some
2328   * multi-chip CPUs may have more than one NB.
2329   *
2330   * Given that resources are allocated (cmpxchg), they must be
2331   * eventually freed for others to use. This is accomplished by
2332   * calling amd_put_event_constraints().
2333   *
2334   * Non NB events are not impacted by this restriction.
2335   */
2336 static struct event_constraint *
2337 amd_get_event_constraints(struct cpu_hw_events *cpuc, struct perf_event *event)
2338 {
2339         struct hw_perf_event *hwc = &event->hw;
2340         struct amd_nb *nb = cpuc->amd_nb;
2341         struct perf_event *old = NULL;
2342         int max = x86_pmu.num_events;
2343         int i, j, k = -1;
2344
2345         /*
2346          * if not NB event or no NB, then no constraints
2347          */
2348         if (!(nb && amd_is_nb_event(hwc)))
2349                 return &unconstrained;
2350
2351         /*
2352          * detect if already present, if so reuse
2353          *
2354          * cannot merge with actual allocation
2355          * because of possible holes
2356          *
2357          * event can already be present yet not assigned (in hwc->idx)
2358          * because of successive calls to x86_schedule_events() from
2359          * hw_perf_group_sched_in() without hw_perf_enable()
2360          */
2361         for (i = 0; i < max; i++) {
2362                 /*
2363                  * keep track of first free slot
2364                  */
2365                 if (k == -1 && !nb->owners[i])
2366                         k = i;
2367
2368                 /* already present, reuse */
2369                 if (nb->owners[i] == event)
2370                         goto done;
2371         }
2372         /*
2373          * not present, so grab a new slot
2374          * starting either at:
2375          */
2376         if (hwc->idx != -1) {
2377                 /* previous assignment */
2378                 i = hwc->idx;
2379         } else if (k != -1) {
2380                 /* start from free slot found */
2381                 i = k;
2382         } else {
2383                 /*
2384                  * event not found, no slot found in
2385                  * first pass, try again from the
2386                  * beginning
2387                  */
2388                 i = 0;
2389         }
2390         j = i;
2391         do {
2392                 old = cmpxchg(nb->owners+i, NULL, event);
2393                 if (!old)
2394                         break;
2395                 if (++i == max)
2396                         i = 0;
2397         } while (i != j);
2398 done:
2399         if (!old)
2400                 return &nb->event_constraints[i];
2401
2402         return &emptyconstraint;
2403 }
2404
2405 static int x86_event_sched_in(struct perf_event *event,
2406                           struct perf_cpu_context *cpuctx, int cpu)
2407 {
2408         int ret = 0;
2409
2410         event->state = PERF_EVENT_STATE_ACTIVE;
2411         event->oncpu = cpu;
2412         event->tstamp_running += event->ctx->time - event->tstamp_stopped;
2413
2414         if (!is_x86_event(event))
2415                 ret = event->pmu->enable(event);
2416
2417         if (!ret && !is_software_event(event))
2418                 cpuctx->active_oncpu++;
2419
2420         if (!ret && event->attr.exclusive)
2421                 cpuctx->exclusive = 1;
2422
2423         return ret;
2424 }
2425
2426 static void x86_event_sched_out(struct perf_event *event,
2427                             struct perf_cpu_context *cpuctx, int cpu)
2428 {
2429         event->state = PERF_EVENT_STATE_INACTIVE;
2430         event->oncpu = -1;
2431
2432         if (!is_x86_event(event))
2433                 event->pmu->disable(event);
2434
2435         event->tstamp_running -= event->ctx->time - event->tstamp_stopped;
2436
2437         if (!is_software_event(event))
2438                 cpuctx->active_oncpu--;
2439
2440         if (event->attr.exclusive || !cpuctx->active_oncpu)
2441                 cpuctx->exclusive = 0;
2442 }
2443
2444 /*
2445  * Called to enable a whole group of events.
2446  * Returns 1 if the group was enabled, or -EAGAIN if it could not be.
2447  * Assumes the caller has disabled interrupts and has
2448  * frozen the PMU with hw_perf_save_disable.
2449  *
2450  * called with PMU disabled. If successful and return value 1,
2451  * then guaranteed to call perf_enable() and hw_perf_enable()
2452  */
2453 int hw_perf_group_sched_in(struct perf_event *leader,
2454                struct perf_cpu_context *cpuctx,
2455                struct perf_event_context *ctx, int cpu)
2456 {
2457         struct cpu_hw_events *cpuc = &per_cpu(cpu_hw_events, cpu);
2458         struct perf_event *sub;
2459         int assign[X86_PMC_IDX_MAX];
2460         int n0, n1, ret;
2461
2462         /* n0 = total number of events */
2463         n0 = collect_events(cpuc, leader, true);
2464         if (n0 < 0)
2465                 return n0;
2466
2467         ret = x86_schedule_events(cpuc, n0, assign);
2468         if (ret)
2469                 return ret;
2470
2471         ret = x86_event_sched_in(leader, cpuctx, cpu);
2472         if (ret)
2473                 return ret;
2474
2475         n1 = 1;
2476         list_for_each_entry(sub, &leader->sibling_list, group_entry) {
2477                 if (sub->state > PERF_EVENT_STATE_OFF) {
2478                         ret = x86_event_sched_in(sub, cpuctx, cpu);
2479                         if (ret)
2480                                 goto undo;
2481                         ++n1;
2482                 }
2483         }
2484         /*
2485          * copy new assignment, now we know it is possible
2486          * will be used by hw_perf_enable()
2487          */
2488         memcpy(cpuc->assign, assign, n0*sizeof(int));
2489
2490         cpuc->n_events  = n0;
2491         cpuc->n_added   = n1;
2492         ctx->nr_active += n1;
2493
2494         /*
2495          * 1 means successful and events are active
2496          * This is not quite true because we defer
2497          * actual activation until hw_perf_enable() but
2498          * this way we* ensure caller won't try to enable
2499          * individual events
2500          */
2501         return 1;
2502 undo:
2503         x86_event_sched_out(leader, cpuctx, cpu);
2504         n0  = 1;
2505         list_for_each_entry(sub, &leader->sibling_list, group_entry) {
2506                 if (sub->state == PERF_EVENT_STATE_ACTIVE) {
2507                         x86_event_sched_out(sub, cpuctx, cpu);
2508                         if (++n0 == n1)
2509                                 break;
2510                 }
2511         }
2512         return ret;
2513 }
2514
2515 static __read_mostly struct notifier_block perf_event_nmi_notifier = {
2516         .notifier_call          = perf_event_nmi_handler,
2517         .next                   = NULL,
2518         .priority               = 1
2519 };
2520
2521 static __initconst struct x86_pmu p6_pmu = {
2522         .name                   = "p6",
2523         .handle_irq             = x86_pmu_handle_irq,
2524         .disable_all            = p6_pmu_disable_all,
2525         .enable_all             = p6_pmu_enable_all,
2526         .enable                 = p6_pmu_enable_event,
2527         .disable                = p6_pmu_disable_event,
2528         .eventsel               = MSR_P6_EVNTSEL0,
2529         .perfctr                = MSR_P6_PERFCTR0,
2530         .event_map              = p6_pmu_event_map,
2531         .raw_event              = p6_pmu_raw_event,
2532         .max_events             = ARRAY_SIZE(p6_perfmon_event_map),
2533         .apic                   = 1,
2534         .max_period             = (1ULL << 31) - 1,
2535         .version                = 0,
2536         .num_events             = 2,
2537         /*
2538          * Events have 40 bits implemented. However they are designed such
2539          * that bits [32-39] are sign extensions of bit 31. As such the
2540          * effective width of a event for P6-like PMU is 32 bits only.
2541          *
2542          * See IA-32 Intel Architecture Software developer manual Vol 3B
2543          */
2544         .event_bits             = 32,
2545         .event_mask             = (1ULL << 32) - 1,
2546         .get_event_constraints  = intel_get_event_constraints,
2547         .event_constraints      = intel_p6_event_constraints
2548 };
2549
2550 static __initconst struct x86_pmu core_pmu = {
2551         .name                   = "core",
2552         .handle_irq             = x86_pmu_handle_irq,
2553         .disable_all            = x86_pmu_disable_all,
2554         .enable_all             = x86_pmu_enable_all,
2555         .enable                 = x86_pmu_enable_event,
2556         .disable                = x86_pmu_disable_event,
2557         .eventsel               = MSR_ARCH_PERFMON_EVENTSEL0,
2558         .perfctr                = MSR_ARCH_PERFMON_PERFCTR0,
2559         .event_map              = intel_pmu_event_map,
2560         .raw_event              = intel_pmu_raw_event,
2561         .max_events             = ARRAY_SIZE(intel_perfmon_event_map),
2562         .apic                   = 1,
2563         /*
2564          * Intel PMCs cannot be accessed sanely above 32 bit width,
2565          * so we install an artificial 1<<31 period regardless of
2566          * the generic event period:
2567          */
2568         .max_period             = (1ULL << 31) - 1,
2569         .get_event_constraints  = intel_get_event_constraints,
2570         .event_constraints      = intel_core_event_constraints,
2571 };
2572
2573 static __initconst struct x86_pmu intel_pmu = {
2574         .name                   = "Intel",
2575         .handle_irq             = intel_pmu_handle_irq,
2576         .disable_all            = intel_pmu_disable_all,
2577         .enable_all             = intel_pmu_enable_all,
2578         .enable                 = intel_pmu_enable_event,
2579         .disable                = intel_pmu_disable_event,
2580         .eventsel               = MSR_ARCH_PERFMON_EVENTSEL0,
2581         .perfctr                = MSR_ARCH_PERFMON_PERFCTR0,
2582         .event_map              = intel_pmu_event_map,
2583         .raw_event              = intel_pmu_raw_event,
2584         .max_events             = ARRAY_SIZE(intel_perfmon_event_map),
2585         .apic                   = 1,
2586         /*
2587          * Intel PMCs cannot be accessed sanely above 32 bit width,
2588          * so we install an artificial 1<<31 period regardless of
2589          * the generic event period:
2590          */
2591         .max_period             = (1ULL << 31) - 1,
2592         .enable_bts             = intel_pmu_enable_bts,
2593         .disable_bts            = intel_pmu_disable_bts,
2594         .get_event_constraints  = intel_get_event_constraints
2595 };
2596
2597 static __initconst struct x86_pmu amd_pmu = {
2598         .name                   = "AMD",
2599         .handle_irq             = x86_pmu_handle_irq,
2600         .disable_all            = x86_pmu_disable_all,
2601         .enable_all             = x86_pmu_enable_all,
2602         .enable                 = x86_pmu_enable_event,
2603         .disable                = x86_pmu_disable_event,
2604         .eventsel               = MSR_K7_EVNTSEL0,
2605         .perfctr                = MSR_K7_PERFCTR0,
2606         .event_map              = amd_pmu_event_map,
2607         .raw_event              = amd_pmu_raw_event,
2608         .max_events             = ARRAY_SIZE(amd_perfmon_event_map),
2609         .num_events             = 4,
2610         .event_bits             = 48,
2611         .event_mask             = (1ULL << 48) - 1,
2612         .apic                   = 1,
2613         /* use highest bit to detect overflow */
2614         .max_period             = (1ULL << 47) - 1,
2615         .get_event_constraints  = amd_get_event_constraints,
2616         .put_event_constraints  = amd_put_event_constraints
2617 };
2618
2619 static __init int p6_pmu_init(void)
2620 {
2621         switch (boot_cpu_data.x86_model) {
2622         case 1:
2623         case 3:  /* Pentium Pro */
2624         case 5:
2625         case 6:  /* Pentium II */
2626         case 7:
2627         case 8:
2628         case 11: /* Pentium III */
2629         case 9:
2630         case 13:
2631                 /* Pentium M */
2632                 break;
2633         default:
2634                 pr_cont("unsupported p6 CPU model %d ",
2635                         boot_cpu_data.x86_model);
2636                 return -ENODEV;
2637         }
2638
2639         x86_pmu = p6_pmu;
2640
2641         return 0;
2642 }
2643
2644 static __init int intel_pmu_init(void)
2645 {
2646         union cpuid10_edx edx;
2647         union cpuid10_eax eax;
2648         unsigned int unused;
2649         unsigned int ebx;
2650         int version;
2651
2652         if (!cpu_has(&boot_cpu_data, X86_FEATURE_ARCH_PERFMON)) {
2653                 /* check for P6 processor family */
2654            if (boot_cpu_data.x86 == 6) {
2655                 return p6_pmu_init();
2656            } else {
2657                 return -ENODEV;
2658            }
2659         }
2660
2661         /*
2662          * Check whether the Architectural PerfMon supports
2663          * Branch Misses Retired hw_event or not.
2664          */
2665         cpuid(10, &eax.full, &ebx, &unused, &edx.full);
2666         if (eax.split.mask_length <= ARCH_PERFMON_BRANCH_MISSES_RETIRED)
2667                 return -ENODEV;
2668
2669         version = eax.split.version_id;
2670         if (version < 2)
2671                 x86_pmu = core_pmu;
2672         else
2673                 x86_pmu = intel_pmu;
2674
2675         x86_pmu.version                 = version;
2676         x86_pmu.num_events              = eax.split.num_events;
2677         x86_pmu.event_bits              = eax.split.bit_width;
2678         x86_pmu.event_mask              = (1ULL << eax.split.bit_width) - 1;
2679
2680         /*
2681          * Quirk: v2 perfmon does not report fixed-purpose events, so
2682          * assume at least 3 events:
2683          */
2684         if (version > 1)
2685                 x86_pmu.num_events_fixed = max((int)edx.split.num_events_fixed, 3);
2686
2687         /*
2688          * Install the hw-cache-events table:
2689          */
2690         switch (boot_cpu_data.x86_model) {
2691         case 14: /* 65 nm core solo/duo, "Yonah" */
2692                 pr_cont("Core events, ");
2693                 break;
2694
2695         case 15: /* original 65 nm celeron/pentium/core2/xeon, "Merom"/"Conroe" */
2696         case 22: /* single-core 65 nm celeron/core2solo "Merom-L"/"Conroe-L" */
2697         case 23: /* current 45 nm celeron/core2/xeon "Penryn"/"Wolfdale" */
2698         case 29: /* six-core 45 nm xeon "Dunnington" */
2699                 memcpy(hw_cache_event_ids, core2_hw_cache_event_ids,
2700                        sizeof(hw_cache_event_ids));
2701
2702                 x86_pmu.event_constraints = intel_core2_event_constraints;
2703                 pr_cont("Core2 events, ");
2704                 break;
2705
2706         case 26: /* 45 nm nehalem, "Bloomfield" */
2707         case 30: /* 45 nm nehalem, "Lynnfield" */
2708                 memcpy(hw_cache_event_ids, nehalem_hw_cache_event_ids,
2709                        sizeof(hw_cache_event_ids));
2710
2711                 x86_pmu.event_constraints = intel_nehalem_event_constraints;
2712                 pr_cont("Nehalem/Corei7 events, ");
2713                 break;
2714         case 28:
2715                 memcpy(hw_cache_event_ids, atom_hw_cache_event_ids,
2716                        sizeof(hw_cache_event_ids));
2717
2718                 x86_pmu.event_constraints = intel_gen_event_constraints;
2719                 pr_cont("Atom events, ");
2720                 break;
2721
2722         case 37: /* 32 nm nehalem, "Clarkdale" */
2723         case 44: /* 32 nm nehalem, "Gulftown" */
2724                 memcpy(hw_cache_event_ids, westmere_hw_cache_event_ids,
2725                        sizeof(hw_cache_event_ids));
2726
2727                 x86_pmu.event_constraints = intel_westmere_event_constraints;
2728                 pr_cont("Westmere events, ");
2729                 break;
2730         default:
2731                 /*
2732                  * default constraints for v2 and up
2733                  */
2734                 x86_pmu.event_constraints = intel_gen_event_constraints;
2735                 pr_cont("generic architected perfmon, ");
2736         }
2737         return 0;
2738 }
2739
2740 static struct amd_nb *amd_alloc_nb(int cpu, int nb_id)
2741 {
2742         struct amd_nb *nb;
2743         int i;
2744
2745         nb = kmalloc(sizeof(struct amd_nb), GFP_KERNEL);
2746         if (!nb)
2747                 return NULL;
2748
2749         memset(nb, 0, sizeof(*nb));
2750         nb->nb_id = nb_id;
2751
2752         /*
2753          * initialize all possible NB constraints
2754          */
2755         for (i = 0; i < x86_pmu.num_events; i++) {
2756                 set_bit(i, nb->event_constraints[i].idxmsk);
2757                 nb->event_constraints[i].weight = 1;
2758         }
2759         return nb;
2760 }
2761
2762 static void amd_pmu_cpu_online(int cpu)
2763 {
2764         struct cpu_hw_events *cpu1, *cpu2;
2765         struct amd_nb *nb = NULL;
2766         int i, nb_id;
2767
2768         if (boot_cpu_data.x86_max_cores < 2)
2769                 return;
2770
2771         /*
2772          * function may be called too early in the
2773          * boot process, in which case nb_id is bogus
2774          */
2775         nb_id = amd_get_nb_id(cpu);
2776         if (nb_id == BAD_APICID)
2777                 return;
2778
2779         cpu1 = &per_cpu(cpu_hw_events, cpu);
2780         cpu1->amd_nb = NULL;
2781
2782         raw_spin_lock(&amd_nb_lock);
2783
2784         for_each_online_cpu(i) {
2785                 cpu2 = &per_cpu(cpu_hw_events, i);
2786                 nb = cpu2->amd_nb;
2787                 if (!nb)
2788                         continue;
2789                 if (nb->nb_id == nb_id)
2790                         goto found;
2791         }
2792
2793         nb = amd_alloc_nb(cpu, nb_id);
2794         if (!nb) {
2795                 pr_err("perf_events: failed NB allocation for CPU%d\n", cpu);
2796                 raw_spin_unlock(&amd_nb_lock);
2797                 return;
2798         }
2799 found:
2800         nb->refcnt++;
2801         cpu1->amd_nb = nb;
2802
2803         raw_spin_unlock(&amd_nb_lock);
2804 }
2805
2806 static void amd_pmu_cpu_offline(int cpu)
2807 {
2808         struct cpu_hw_events *cpuhw;
2809
2810         if (boot_cpu_data.x86_max_cores < 2)
2811                 return;
2812
2813         cpuhw = &per_cpu(cpu_hw_events, cpu);
2814
2815         raw_spin_lock(&amd_nb_lock);
2816
2817         if (--cpuhw->amd_nb->refcnt == 0)
2818                 kfree(cpuhw->amd_nb);
2819
2820         cpuhw->amd_nb = NULL;
2821
2822         raw_spin_unlock(&amd_nb_lock);
2823 }
2824
2825 static __init int amd_pmu_init(void)
2826 {
2827         /* Performance-monitoring supported from K7 and later: */
2828         if (boot_cpu_data.x86 < 6)
2829                 return -ENODEV;
2830
2831         x86_pmu = amd_pmu;
2832
2833         /* Events are common for all AMDs */
2834         memcpy(hw_cache_event_ids, amd_hw_cache_event_ids,
2835                sizeof(hw_cache_event_ids));
2836
2837         /*
2838          * explicitly initialize the boot cpu, other cpus will get
2839          * the cpu hotplug callbacks from smp_init()
2840          */
2841         amd_pmu_cpu_online(smp_processor_id());
2842         return 0;
2843 }
2844
2845 static void __init pmu_check_apic(void)
2846 {
2847         if (cpu_has_apic)
2848                 return;
2849
2850         x86_pmu.apic = 0;
2851         pr_info("no APIC, boot with the \"lapic\" boot parameter to force-enable it.\n");
2852         pr_info("no hardware sampling interrupt available.\n");
2853 }
2854
2855 void __init init_hw_perf_events(void)
2856 {
2857         int err;
2858
2859         pr_info("Performance Events: ");
2860
2861         switch (boot_cpu_data.x86_vendor) {
2862         case X86_VENDOR_INTEL:
2863                 err = intel_pmu_init();
2864                 break;
2865         case X86_VENDOR_AMD:
2866                 err = amd_pmu_init();
2867                 break;
2868         default:
2869                 return;
2870         }
2871         if (err != 0) {
2872                 pr_cont("no PMU driver, software events only.\n");
2873                 return;
2874         }
2875
2876         pmu_check_apic();
2877
2878         pr_cont("%s PMU driver.\n", x86_pmu.name);
2879
2880         if (x86_pmu.num_events > X86_PMC_MAX_GENERIC) {
2881                 WARN(1, KERN_ERR "hw perf events %d > max(%d), clipping!",
2882                      x86_pmu.num_events, X86_PMC_MAX_GENERIC);
2883                 x86_pmu.num_events = X86_PMC_MAX_GENERIC;
2884         }
2885         perf_event_mask = (1 << x86_pmu.num_events) - 1;
2886         perf_max_events = x86_pmu.num_events;
2887
2888         if (x86_pmu.num_events_fixed > X86_PMC_MAX_FIXED) {
2889                 WARN(1, KERN_ERR "hw perf events fixed %d > max(%d), clipping!",
2890                      x86_pmu.num_events_fixed, X86_PMC_MAX_FIXED);
2891                 x86_pmu.num_events_fixed = X86_PMC_MAX_FIXED;
2892         }
2893
2894         perf_event_mask |=
2895                 ((1LL << x86_pmu.num_events_fixed)-1) << X86_PMC_IDX_FIXED;
2896         x86_pmu.intel_ctrl = perf_event_mask;
2897
2898         perf_events_lapic_init();
2899         register_die_notifier(&perf_event_nmi_notifier);
2900
2901         unconstrained = (struct event_constraint)
2902                 __EVENT_CONSTRAINT(0, (1ULL << x86_pmu.num_events) - 1,
2903                                    0, x86_pmu.num_events);
2904
2905         pr_info("... version:                %d\n",     x86_pmu.version);
2906         pr_info("... bit width:              %d\n",     x86_pmu.event_bits);
2907         pr_info("... generic registers:      %d\n",     x86_pmu.num_events);
2908         pr_info("... value mask:             %016Lx\n", x86_pmu.event_mask);
2909         pr_info("... max period:             %016Lx\n", x86_pmu.max_period);
2910         pr_info("... fixed-purpose events:   %d\n",     x86_pmu.num_events_fixed);
2911         pr_info("... event mask:             %016Lx\n", perf_event_mask);
2912 }
2913
2914 static inline void x86_pmu_read(struct perf_event *event)
2915 {
2916         x86_perf_event_update(event, &event->hw, event->hw.idx);
2917 }
2918
2919 static const struct pmu pmu = {
2920         .enable         = x86_pmu_enable,
2921         .disable        = x86_pmu_disable,
2922         .start          = x86_pmu_start,
2923         .stop           = x86_pmu_stop,
2924         .read           = x86_pmu_read,
2925         .unthrottle     = x86_pmu_unthrottle,
2926 };
2927
2928 /*
2929  * validate a single event group
2930  *
2931  * validation include:
2932  *      - check events are compatible which each other
2933  *      - events do not compete for the same counter
2934  *      - number of events <= number of counters
2935  *
2936  * validation ensures the group can be loaded onto the
2937  * PMU if it was the only group available.
2938  */
2939 static int validate_group(struct perf_event *event)
2940 {
2941         struct perf_event *leader = event->group_leader;
2942         struct cpu_hw_events *fake_cpuc;
2943         int ret, n;
2944
2945         ret = -ENOMEM;
2946         fake_cpuc = kmalloc(sizeof(*fake_cpuc), GFP_KERNEL | __GFP_ZERO);
2947         if (!fake_cpuc)
2948                 goto out;
2949
2950         /*
2951          * the event is not yet connected with its
2952          * siblings therefore we must first collect
2953          * existing siblings, then add the new event
2954          * before we can simulate the scheduling
2955          */
2956         ret = -ENOSPC;
2957         n = collect_events(fake_cpuc, leader, true);
2958         if (n < 0)
2959                 goto out_free;
2960
2961         fake_cpuc->n_events = n;
2962         n = collect_events(fake_cpuc, event, false);
2963         if (n < 0)
2964                 goto out_free;
2965
2966         fake_cpuc->n_events = n;
2967
2968         ret = x86_schedule_events(fake_cpuc, n, NULL);
2969
2970 out_free:
2971         kfree(fake_cpuc);
2972 out:
2973         return ret;
2974 }
2975
2976 const struct pmu *hw_perf_event_init(struct perf_event *event)
2977 {
2978         const struct pmu *tmp;
2979         int err;
2980
2981         err = __hw_perf_event_init(event);
2982         if (!err) {
2983                 /*
2984                  * we temporarily connect event to its pmu
2985                  * such that validate_group() can classify
2986                  * it as an x86 event using is_x86_event()
2987                  */
2988                 tmp = event->pmu;
2989                 event->pmu = &pmu;
2990
2991                 if (event->group_leader != event)
2992                         err = validate_group(event);
2993
2994                 event->pmu = tmp;
2995         }
2996         if (err) {
2997                 if (event->destroy)
2998                         event->destroy(event);
2999                 return ERR_PTR(err);
3000         }
3001
3002         return &pmu;
3003 }
3004
3005 /*
3006  * callchain support
3007  */
3008
3009 static inline
3010 void callchain_store(struct perf_callchain_entry *entry, u64 ip)
3011 {
3012         if (entry->nr < PERF_MAX_STACK_DEPTH)
3013                 entry->ip[entry->nr++] = ip;
3014 }
3015
3016 static DEFINE_PER_CPU(struct perf_callchain_entry, pmc_irq_entry);
3017 static DEFINE_PER_CPU(struct perf_callchain_entry, pmc_nmi_entry);
3018
3019
3020 static void
3021 backtrace_warning_symbol(void *data, char *msg, unsigned long symbol)
3022 {
3023         /* Ignore warnings */
3024 }
3025
3026 static void backtrace_warning(void *data, char *msg)
3027 {
3028         /* Ignore warnings */
3029 }
3030
3031 static int backtrace_stack(void *data, char *name)
3032 {
3033         return 0;
3034 }
3035
3036 static void backtrace_address(void *data, unsigned long addr, int reliable)
3037 {
3038         struct perf_callchain_entry *entry = data;
3039
3040         if (reliable)
3041                 callchain_store(entry, addr);
3042 }
3043
3044 static const struct stacktrace_ops backtrace_ops = {
3045         .warning                = backtrace_warning,
3046         .warning_symbol         = backtrace_warning_symbol,
3047         .stack                  = backtrace_stack,
3048         .address                = backtrace_address,
3049         .walk_stack             = print_context_stack_bp,
3050 };
3051
3052 #include "../dumpstack.h"
3053
3054 static void
3055 perf_callchain_kernel(struct pt_regs *regs, struct perf_callchain_entry *entry)
3056 {
3057         callchain_store(entry, PERF_CONTEXT_KERNEL);
3058         callchain_store(entry, regs->ip);
3059
3060         dump_trace(NULL, regs, NULL, regs->bp, &backtrace_ops, entry);
3061 }
3062
3063 /*
3064  * best effort, GUP based copy_from_user() that assumes IRQ or NMI context
3065  */
3066 static unsigned long
3067 copy_from_user_nmi(void *to, const void __user *from, unsigned long n)
3068 {
3069         unsigned long offset, addr = (unsigned long)from;
3070         int type = in_nmi() ? KM_NMI : KM_IRQ0;
3071         unsigned long size, len = 0;
3072         struct page *page;
3073         void *map;
3074         int ret;
3075
3076         do {
3077                 ret = __get_user_pages_fast(addr, 1, 0, &page);
3078                 if (!ret)
3079                         break;
3080
3081                 offset = addr & (PAGE_SIZE - 1);
3082                 size = min(PAGE_SIZE - offset, n - len);
3083
3084                 map = kmap_atomic(page, type);
3085                 memcpy(to, map+offset, size);
3086                 kunmap_atomic(map, type);
3087                 put_page(page);
3088
3089                 len  += size;
3090                 to   += size;
3091                 addr += size;
3092
3093         } while (len < n);
3094
3095         return len;
3096 }
3097
3098 static int copy_stack_frame(const void __user *fp, struct stack_frame *frame)
3099 {
3100         unsigned long bytes;
3101
3102         bytes = copy_from_user_nmi(frame, fp, sizeof(*frame));
3103
3104         return bytes == sizeof(*frame);
3105 }
3106
3107 static void
3108 perf_callchain_user(struct pt_regs *regs, struct perf_callchain_entry *entry)
3109 {
3110         struct stack_frame frame;
3111         const void __user *fp;
3112
3113         if (!user_mode(regs))
3114                 regs = task_pt_regs(current);
3115
3116         fp = (void __user *)regs->bp;
3117
3118         callchain_store(entry, PERF_CONTEXT_USER);
3119         callchain_store(entry, regs->ip);
3120
3121         while (entry->nr < PERF_MAX_STACK_DEPTH) {
3122                 frame.next_frame             = NULL;
3123                 frame.return_address = 0;
3124
3125                 if (!copy_stack_frame(fp, &frame))
3126                         break;
3127
3128                 if ((unsigned long)fp < regs->sp)
3129                         break;
3130
3131                 callchain_store(entry, frame.return_address);
3132                 fp = frame.next_frame;
3133         }
3134 }
3135
3136 static void
3137 perf_do_callchain(struct pt_regs *regs, struct perf_callchain_entry *entry)
3138 {
3139         int is_user;
3140
3141         if (!regs)
3142                 return;
3143
3144         is_user = user_mode(regs);
3145
3146         if (is_user && current->state != TASK_RUNNING)
3147                 return;
3148
3149         if (!is_user)
3150                 perf_callchain_kernel(regs, entry);
3151
3152         if (current->mm)
3153                 perf_callchain_user(regs, entry);
3154 }
3155
3156 struct perf_callchain_entry *perf_callchain(struct pt_regs *regs)
3157 {
3158         struct perf_callchain_entry *entry;
3159
3160         if (in_nmi())
3161                 entry = &__get_cpu_var(pmc_nmi_entry);
3162         else
3163                 entry = &__get_cpu_var(pmc_irq_entry);
3164
3165         entry->nr = 0;
3166
3167         perf_do_callchain(regs, entry);
3168
3169         return entry;
3170 }
3171
3172 void hw_perf_event_setup_online(int cpu)
3173 {
3174         init_debug_store_on_cpu(cpu);
3175
3176         switch (boot_cpu_data.x86_vendor) {
3177         case X86_VENDOR_AMD:
3178                 amd_pmu_cpu_online(cpu);
3179                 break;
3180         default:
3181                 return;
3182         }
3183 }
3184
3185 void hw_perf_event_setup_offline(int cpu)
3186 {
3187         init_debug_store_on_cpu(cpu);
3188
3189         switch (boot_cpu_data.x86_vendor) {
3190         case X86_VENDOR_AMD:
3191                 amd_pmu_cpu_offline(cpu);
3192                 break;
3193         default:
3194                 return;
3195         }
3196 }