perf, x86: Clean up event constraints code a bit
[safe/jmp/linux-2.6] / arch / x86 / kernel / cpu / perf_event.c
1 /*
2  * Performance events x86 architecture code
3  *
4  *  Copyright (C) 2008 Thomas Gleixner <tglx@linutronix.de>
5  *  Copyright (C) 2008-2009 Red Hat, Inc., Ingo Molnar
6  *  Copyright (C) 2009 Jaswinder Singh Rajput
7  *  Copyright (C) 2009 Advanced Micro Devices, Inc., Robert Richter
8  *  Copyright (C) 2008-2009 Red Hat, Inc., Peter Zijlstra <pzijlstr@redhat.com>
9  *  Copyright (C) 2009 Intel Corporation, <markus.t.metzger@intel.com>
10  *  Copyright (C) 2009 Google, Inc., Stephane Eranian
11  *
12  *  For licencing details see kernel-base/COPYING
13  */
14
15 #include <linux/perf_event.h>
16 #include <linux/capability.h>
17 #include <linux/notifier.h>
18 #include <linux/hardirq.h>
19 #include <linux/kprobes.h>
20 #include <linux/module.h>
21 #include <linux/kdebug.h>
22 #include <linux/sched.h>
23 #include <linux/uaccess.h>
24 #include <linux/highmem.h>
25 #include <linux/cpu.h>
26 #include <linux/bitops.h>
27
28 #include <asm/apic.h>
29 #include <asm/stacktrace.h>
30 #include <asm/nmi.h>
31
32 static u64 perf_event_mask __read_mostly;
33
34 /* The maximal number of PEBS events: */
35 #define MAX_PEBS_EVENTS 4
36
37 /* The size of a BTS record in bytes: */
38 #define BTS_RECORD_SIZE         24
39
40 /* The size of a per-cpu BTS buffer in bytes: */
41 #define BTS_BUFFER_SIZE         (BTS_RECORD_SIZE * 2048)
42
43 /* The BTS overflow threshold in bytes from the end of the buffer: */
44 #define BTS_OVFL_TH             (BTS_RECORD_SIZE * 128)
45
46
47 /*
48  * Bits in the debugctlmsr controlling branch tracing.
49  */
50 #define X86_DEBUGCTL_TR                 (1 << 6)
51 #define X86_DEBUGCTL_BTS                (1 << 7)
52 #define X86_DEBUGCTL_BTINT              (1 << 8)
53 #define X86_DEBUGCTL_BTS_OFF_OS         (1 << 9)
54 #define X86_DEBUGCTL_BTS_OFF_USR        (1 << 10)
55
56 /*
57  * A debug store configuration.
58  *
59  * We only support architectures that use 64bit fields.
60  */
61 struct debug_store {
62         u64     bts_buffer_base;
63         u64     bts_index;
64         u64     bts_absolute_maximum;
65         u64     bts_interrupt_threshold;
66         u64     pebs_buffer_base;
67         u64     pebs_index;
68         u64     pebs_absolute_maximum;
69         u64     pebs_interrupt_threshold;
70         u64     pebs_event_reset[MAX_PEBS_EVENTS];
71 };
72
73 struct event_constraint {
74         union {
75                 unsigned long   idxmsk[BITS_TO_LONGS(X86_PMC_IDX_MAX)];
76                 u64             idxmsk64[1];
77         };
78         int     code;
79         int     cmask;
80         int     weight;
81 };
82
83 struct cpu_hw_events {
84         struct perf_event       *events[X86_PMC_IDX_MAX]; /* in counter order */
85         unsigned long           active_mask[BITS_TO_LONGS(X86_PMC_IDX_MAX)];
86         unsigned long           interrupts;
87         int                     enabled;
88         struct debug_store      *ds;
89
90         int                     n_events;
91         int                     n_added;
92         int                     assign[X86_PMC_IDX_MAX]; /* event to counter assignment */
93         struct perf_event       *event_list[X86_PMC_IDX_MAX]; /* in enabled order */
94 };
95
96 #define EVENT_CONSTRAINT(c, n, m) {     \
97         { .idxmsk64[0] = (n) },         \
98         .code = (c),                    \
99         .cmask = (m),                   \
100         .weight = HWEIGHT64((u64)(n)),  \
101 }
102
103 #define INTEL_EVENT_CONSTRAINT(c, n)            EVENT_CONSTRAINT(c, n, INTEL_ARCH_EVENT_MASK)
104 #define FIXED_EVENT_CONSTRAINT(c, n)            EVENT_CONSTRAINT(c, n, INTEL_ARCH_FIXED_MASK)
105
106 #define EVENT_CONSTRAINT_END                    EVENT_CONSTRAINT(0, 0, 0)
107
108 #define for_each_event_constraint(e, c)         for ((e) = (c); (e)->cmask; (e)++)
109
110 /*
111  * struct x86_pmu - generic x86 pmu
112  */
113 struct x86_pmu {
114         const char      *name;
115         int             version;
116         int             (*handle_irq)(struct pt_regs *);
117         void            (*disable_all)(void);
118         void            (*enable_all)(void);
119         void            (*enable)(struct hw_perf_event *, int);
120         void            (*disable)(struct hw_perf_event *, int);
121         unsigned        eventsel;
122         unsigned        perfctr;
123         u64             (*event_map)(int);
124         u64             (*raw_event)(u64);
125         int             max_events;
126         int             num_events;
127         int             num_events_fixed;
128         int             event_bits;
129         u64             event_mask;
130         int             apic;
131         u64             max_period;
132         u64             intel_ctrl;
133         void            (*enable_bts)(u64 config);
134         void            (*disable_bts)(void);
135
136         struct event_constraint *
137                         (*get_event_constraints)(struct cpu_hw_events *cpuc,
138                                                  struct perf_event *event);
139
140         void            (*put_event_constraints)(struct cpu_hw_events *cpuc,
141                                                  struct perf_event *event);
142         struct event_constraint *event_constraints;
143 };
144
145 static struct x86_pmu x86_pmu __read_mostly;
146
147 static DEFINE_PER_CPU(struct cpu_hw_events, cpu_hw_events) = {
148         .enabled = 1,
149 };
150
151 static int x86_perf_event_set_period(struct perf_event *event,
152                              struct hw_perf_event *hwc, int idx);
153
154 /*
155  * Not sure about some of these
156  */
157 static const u64 p6_perfmon_event_map[] =
158 {
159   [PERF_COUNT_HW_CPU_CYCLES]            = 0x0079,
160   [PERF_COUNT_HW_INSTRUCTIONS]          = 0x00c0,
161   [PERF_COUNT_HW_CACHE_REFERENCES]      = 0x0f2e,
162   [PERF_COUNT_HW_CACHE_MISSES]          = 0x012e,
163   [PERF_COUNT_HW_BRANCH_INSTRUCTIONS]   = 0x00c4,
164   [PERF_COUNT_HW_BRANCH_MISSES]         = 0x00c5,
165   [PERF_COUNT_HW_BUS_CYCLES]            = 0x0062,
166 };
167
168 static u64 p6_pmu_event_map(int hw_event)
169 {
170         return p6_perfmon_event_map[hw_event];
171 }
172
173 /*
174  * Event setting that is specified not to count anything.
175  * We use this to effectively disable a counter.
176  *
177  * L2_RQSTS with 0 MESI unit mask.
178  */
179 #define P6_NOP_EVENT                    0x0000002EULL
180
181 static u64 p6_pmu_raw_event(u64 hw_event)
182 {
183 #define P6_EVNTSEL_EVENT_MASK           0x000000FFULL
184 #define P6_EVNTSEL_UNIT_MASK            0x0000FF00ULL
185 #define P6_EVNTSEL_EDGE_MASK            0x00040000ULL
186 #define P6_EVNTSEL_INV_MASK             0x00800000ULL
187 #define P6_EVNTSEL_REG_MASK             0xFF000000ULL
188
189 #define P6_EVNTSEL_MASK                 \
190         (P6_EVNTSEL_EVENT_MASK |        \
191          P6_EVNTSEL_UNIT_MASK  |        \
192          P6_EVNTSEL_EDGE_MASK  |        \
193          P6_EVNTSEL_INV_MASK   |        \
194          P6_EVNTSEL_REG_MASK)
195
196         return hw_event & P6_EVNTSEL_MASK;
197 }
198
199 static struct event_constraint intel_p6_event_constraints[] =
200 {
201         INTEL_EVENT_CONSTRAINT(0xc1, 0x1),      /* FLOPS */
202         INTEL_EVENT_CONSTRAINT(0x10, 0x1),      /* FP_COMP_OPS_EXE */
203         INTEL_EVENT_CONSTRAINT(0x11, 0x1),      /* FP_ASSIST */
204         INTEL_EVENT_CONSTRAINT(0x12, 0x2),      /* MUL */
205         INTEL_EVENT_CONSTRAINT(0x13, 0x2),      /* DIV */
206         INTEL_EVENT_CONSTRAINT(0x14, 0x1),      /* CYCLES_DIV_BUSY */
207         EVENT_CONSTRAINT_END
208 };
209
210 /*
211  * Intel PerfMon v3. Used on Core2 and later.
212  */
213 static const u64 intel_perfmon_event_map[] =
214 {
215   [PERF_COUNT_HW_CPU_CYCLES]            = 0x003c,
216   [PERF_COUNT_HW_INSTRUCTIONS]          = 0x00c0,
217   [PERF_COUNT_HW_CACHE_REFERENCES]      = 0x4f2e,
218   [PERF_COUNT_HW_CACHE_MISSES]          = 0x412e,
219   [PERF_COUNT_HW_BRANCH_INSTRUCTIONS]   = 0x00c4,
220   [PERF_COUNT_HW_BRANCH_MISSES]         = 0x00c5,
221   [PERF_COUNT_HW_BUS_CYCLES]            = 0x013c,
222 };
223
224 static struct event_constraint intel_core_event_constraints[] =
225 {
226         FIXED_EVENT_CONSTRAINT(0xc0, (0x3|(1ULL<<32))), /* INSTRUCTIONS_RETIRED */
227         FIXED_EVENT_CONSTRAINT(0x3c, (0x3|(1ULL<<33))), /* UNHALTED_CORE_CYCLES */
228         INTEL_EVENT_CONSTRAINT(0x10, 0x1), /* FP_COMP_OPS_EXE */
229         INTEL_EVENT_CONSTRAINT(0x11, 0x2), /* FP_ASSIST */
230         INTEL_EVENT_CONSTRAINT(0x12, 0x2), /* MUL */
231         INTEL_EVENT_CONSTRAINT(0x13, 0x2), /* DIV */
232         INTEL_EVENT_CONSTRAINT(0x14, 0x1), /* CYCLES_DIV_BUSY */
233         INTEL_EVENT_CONSTRAINT(0x18, 0x1), /* IDLE_DURING_DIV */
234         INTEL_EVENT_CONSTRAINT(0x19, 0x2), /* DELAYED_BYPASS */
235         INTEL_EVENT_CONSTRAINT(0xa1, 0x1), /* RS_UOPS_DISPATCH_CYCLES */
236         INTEL_EVENT_CONSTRAINT(0xcb, 0x1), /* MEM_LOAD_RETIRED */
237         EVENT_CONSTRAINT_END
238 };
239
240 static struct event_constraint intel_nehalem_event_constraints[] =
241 {
242         FIXED_EVENT_CONSTRAINT(0xc0, (0x3|(1ULL<<32))), /* INSTRUCTIONS_RETIRED */
243         FIXED_EVENT_CONSTRAINT(0x3c, (0x3|(1ULL<<33))), /* UNHALTED_CORE_CYCLES */
244         INTEL_EVENT_CONSTRAINT(0x40, 0x3), /* L1D_CACHE_LD */
245         INTEL_EVENT_CONSTRAINT(0x41, 0x3), /* L1D_CACHE_ST */
246         INTEL_EVENT_CONSTRAINT(0x42, 0x3), /* L1D_CACHE_LOCK */
247         INTEL_EVENT_CONSTRAINT(0x43, 0x3), /* L1D_ALL_REF */
248         INTEL_EVENT_CONSTRAINT(0x4e, 0x3), /* L1D_PREFETCH */
249         INTEL_EVENT_CONSTRAINT(0x4c, 0x3), /* LOAD_HIT_PRE */
250         INTEL_EVENT_CONSTRAINT(0x51, 0x3), /* L1D */
251         INTEL_EVENT_CONSTRAINT(0x52, 0x3), /* L1D_CACHE_PREFETCH_LOCK_FB_HIT */
252         INTEL_EVENT_CONSTRAINT(0x53, 0x3), /* L1D_CACHE_LOCK_FB_HIT */
253         INTEL_EVENT_CONSTRAINT(0xc5, 0x3), /* CACHE_LOCK_CYCLES */
254         EVENT_CONSTRAINT_END
255 };
256
257 static struct event_constraint intel_gen_event_constraints[] =
258 {
259         FIXED_EVENT_CONSTRAINT(0xc0, (0x3|(1ULL<<32))), /* INSTRUCTIONS_RETIRED */
260         FIXED_EVENT_CONSTRAINT(0x3c, (0x3|(1ULL<<33))), /* UNHALTED_CORE_CYCLES */
261         EVENT_CONSTRAINT_END
262 };
263
264 static u64 intel_pmu_event_map(int hw_event)
265 {
266         return intel_perfmon_event_map[hw_event];
267 }
268
269 /*
270  * Generalized hw caching related hw_event table, filled
271  * in on a per model basis. A value of 0 means
272  * 'not supported', -1 means 'hw_event makes no sense on
273  * this CPU', any other value means the raw hw_event
274  * ID.
275  */
276
277 #define C(x) PERF_COUNT_HW_CACHE_##x
278
279 static u64 __read_mostly hw_cache_event_ids
280                                 [PERF_COUNT_HW_CACHE_MAX]
281                                 [PERF_COUNT_HW_CACHE_OP_MAX]
282                                 [PERF_COUNT_HW_CACHE_RESULT_MAX];
283
284 static __initconst u64 nehalem_hw_cache_event_ids
285                                 [PERF_COUNT_HW_CACHE_MAX]
286                                 [PERF_COUNT_HW_CACHE_OP_MAX]
287                                 [PERF_COUNT_HW_CACHE_RESULT_MAX] =
288 {
289  [ C(L1D) ] = {
290         [ C(OP_READ) ] = {
291                 [ C(RESULT_ACCESS) ] = 0x0f40, /* L1D_CACHE_LD.MESI            */
292                 [ C(RESULT_MISS)   ] = 0x0140, /* L1D_CACHE_LD.I_STATE         */
293         },
294         [ C(OP_WRITE) ] = {
295                 [ C(RESULT_ACCESS) ] = 0x0f41, /* L1D_CACHE_ST.MESI            */
296                 [ C(RESULT_MISS)   ] = 0x0141, /* L1D_CACHE_ST.I_STATE         */
297         },
298         [ C(OP_PREFETCH) ] = {
299                 [ C(RESULT_ACCESS) ] = 0x014e, /* L1D_PREFETCH.REQUESTS        */
300                 [ C(RESULT_MISS)   ] = 0x024e, /* L1D_PREFETCH.MISS            */
301         },
302  },
303  [ C(L1I ) ] = {
304         [ C(OP_READ) ] = {
305                 [ C(RESULT_ACCESS) ] = 0x0380, /* L1I.READS                    */
306                 [ C(RESULT_MISS)   ] = 0x0280, /* L1I.MISSES                   */
307         },
308         [ C(OP_WRITE) ] = {
309                 [ C(RESULT_ACCESS) ] = -1,
310                 [ C(RESULT_MISS)   ] = -1,
311         },
312         [ C(OP_PREFETCH) ] = {
313                 [ C(RESULT_ACCESS) ] = 0x0,
314                 [ C(RESULT_MISS)   ] = 0x0,
315         },
316  },
317  [ C(LL  ) ] = {
318         [ C(OP_READ) ] = {
319                 [ C(RESULT_ACCESS) ] = 0x0324, /* L2_RQSTS.LOADS               */
320                 [ C(RESULT_MISS)   ] = 0x0224, /* L2_RQSTS.LD_MISS             */
321         },
322         [ C(OP_WRITE) ] = {
323                 [ C(RESULT_ACCESS) ] = 0x0c24, /* L2_RQSTS.RFOS                */
324                 [ C(RESULT_MISS)   ] = 0x0824, /* L2_RQSTS.RFO_MISS            */
325         },
326         [ C(OP_PREFETCH) ] = {
327                 [ C(RESULT_ACCESS) ] = 0x4f2e, /* LLC Reference                */
328                 [ C(RESULT_MISS)   ] = 0x412e, /* LLC Misses                   */
329         },
330  },
331  [ C(DTLB) ] = {
332         [ C(OP_READ) ] = {
333                 [ C(RESULT_ACCESS) ] = 0x0f40, /* L1D_CACHE_LD.MESI   (alias)  */
334                 [ C(RESULT_MISS)   ] = 0x0108, /* DTLB_LOAD_MISSES.ANY         */
335         },
336         [ C(OP_WRITE) ] = {
337                 [ C(RESULT_ACCESS) ] = 0x0f41, /* L1D_CACHE_ST.MESI   (alias)  */
338                 [ C(RESULT_MISS)   ] = 0x010c, /* MEM_STORE_RETIRED.DTLB_MISS  */
339         },
340         [ C(OP_PREFETCH) ] = {
341                 [ C(RESULT_ACCESS) ] = 0x0,
342                 [ C(RESULT_MISS)   ] = 0x0,
343         },
344  },
345  [ C(ITLB) ] = {
346         [ C(OP_READ) ] = {
347                 [ C(RESULT_ACCESS) ] = 0x01c0, /* INST_RETIRED.ANY_P           */
348                 [ C(RESULT_MISS)   ] = 0x20c8, /* ITLB_MISS_RETIRED            */
349         },
350         [ C(OP_WRITE) ] = {
351                 [ C(RESULT_ACCESS) ] = -1,
352                 [ C(RESULT_MISS)   ] = -1,
353         },
354         [ C(OP_PREFETCH) ] = {
355                 [ C(RESULT_ACCESS) ] = -1,
356                 [ C(RESULT_MISS)   ] = -1,
357         },
358  },
359  [ C(BPU ) ] = {
360         [ C(OP_READ) ] = {
361                 [ C(RESULT_ACCESS) ] = 0x00c4, /* BR_INST_RETIRED.ALL_BRANCHES */
362                 [ C(RESULT_MISS)   ] = 0x03e8, /* BPU_CLEARS.ANY               */
363         },
364         [ C(OP_WRITE) ] = {
365                 [ C(RESULT_ACCESS) ] = -1,
366                 [ C(RESULT_MISS)   ] = -1,
367         },
368         [ C(OP_PREFETCH) ] = {
369                 [ C(RESULT_ACCESS) ] = -1,
370                 [ C(RESULT_MISS)   ] = -1,
371         },
372  },
373 };
374
375 static __initconst u64 core2_hw_cache_event_ids
376                                 [PERF_COUNT_HW_CACHE_MAX]
377                                 [PERF_COUNT_HW_CACHE_OP_MAX]
378                                 [PERF_COUNT_HW_CACHE_RESULT_MAX] =
379 {
380  [ C(L1D) ] = {
381         [ C(OP_READ) ] = {
382                 [ C(RESULT_ACCESS) ] = 0x0f40, /* L1D_CACHE_LD.MESI          */
383                 [ C(RESULT_MISS)   ] = 0x0140, /* L1D_CACHE_LD.I_STATE       */
384         },
385         [ C(OP_WRITE) ] = {
386                 [ C(RESULT_ACCESS) ] = 0x0f41, /* L1D_CACHE_ST.MESI          */
387                 [ C(RESULT_MISS)   ] = 0x0141, /* L1D_CACHE_ST.I_STATE       */
388         },
389         [ C(OP_PREFETCH) ] = {
390                 [ C(RESULT_ACCESS) ] = 0x104e, /* L1D_PREFETCH.REQUESTS      */
391                 [ C(RESULT_MISS)   ] = 0,
392         },
393  },
394  [ C(L1I ) ] = {
395         [ C(OP_READ) ] = {
396                 [ C(RESULT_ACCESS) ] = 0x0080, /* L1I.READS                  */
397                 [ C(RESULT_MISS)   ] = 0x0081, /* L1I.MISSES                 */
398         },
399         [ C(OP_WRITE) ] = {
400                 [ C(RESULT_ACCESS) ] = -1,
401                 [ C(RESULT_MISS)   ] = -1,
402         },
403         [ C(OP_PREFETCH) ] = {
404                 [ C(RESULT_ACCESS) ] = 0,
405                 [ C(RESULT_MISS)   ] = 0,
406         },
407  },
408  [ C(LL  ) ] = {
409         [ C(OP_READ) ] = {
410                 [ C(RESULT_ACCESS) ] = 0x4f29, /* L2_LD.MESI                 */
411                 [ C(RESULT_MISS)   ] = 0x4129, /* L2_LD.ISTATE               */
412         },
413         [ C(OP_WRITE) ] = {
414                 [ C(RESULT_ACCESS) ] = 0x4f2A, /* L2_ST.MESI                 */
415                 [ C(RESULT_MISS)   ] = 0x412A, /* L2_ST.ISTATE               */
416         },
417         [ C(OP_PREFETCH) ] = {
418                 [ C(RESULT_ACCESS) ] = 0,
419                 [ C(RESULT_MISS)   ] = 0,
420         },
421  },
422  [ C(DTLB) ] = {
423         [ C(OP_READ) ] = {
424                 [ C(RESULT_ACCESS) ] = 0x0f40, /* L1D_CACHE_LD.MESI  (alias) */
425                 [ C(RESULT_MISS)   ] = 0x0208, /* DTLB_MISSES.MISS_LD        */
426         },
427         [ C(OP_WRITE) ] = {
428                 [ C(RESULT_ACCESS) ] = 0x0f41, /* L1D_CACHE_ST.MESI  (alias) */
429                 [ C(RESULT_MISS)   ] = 0x0808, /* DTLB_MISSES.MISS_ST        */
430         },
431         [ C(OP_PREFETCH) ] = {
432                 [ C(RESULT_ACCESS) ] = 0,
433                 [ C(RESULT_MISS)   ] = 0,
434         },
435  },
436  [ C(ITLB) ] = {
437         [ C(OP_READ) ] = {
438                 [ C(RESULT_ACCESS) ] = 0x00c0, /* INST_RETIRED.ANY_P         */
439                 [ C(RESULT_MISS)   ] = 0x1282, /* ITLBMISSES                 */
440         },
441         [ C(OP_WRITE) ] = {
442                 [ C(RESULT_ACCESS) ] = -1,
443                 [ C(RESULT_MISS)   ] = -1,
444         },
445         [ C(OP_PREFETCH) ] = {
446                 [ C(RESULT_ACCESS) ] = -1,
447                 [ C(RESULT_MISS)   ] = -1,
448         },
449  },
450  [ C(BPU ) ] = {
451         [ C(OP_READ) ] = {
452                 [ C(RESULT_ACCESS) ] = 0x00c4, /* BR_INST_RETIRED.ANY        */
453                 [ C(RESULT_MISS)   ] = 0x00c5, /* BP_INST_RETIRED.MISPRED    */
454         },
455         [ C(OP_WRITE) ] = {
456                 [ C(RESULT_ACCESS) ] = -1,
457                 [ C(RESULT_MISS)   ] = -1,
458         },
459         [ C(OP_PREFETCH) ] = {
460                 [ C(RESULT_ACCESS) ] = -1,
461                 [ C(RESULT_MISS)   ] = -1,
462         },
463  },
464 };
465
466 static __initconst u64 atom_hw_cache_event_ids
467                                 [PERF_COUNT_HW_CACHE_MAX]
468                                 [PERF_COUNT_HW_CACHE_OP_MAX]
469                                 [PERF_COUNT_HW_CACHE_RESULT_MAX] =
470 {
471  [ C(L1D) ] = {
472         [ C(OP_READ) ] = {
473                 [ C(RESULT_ACCESS) ] = 0x2140, /* L1D_CACHE.LD               */
474                 [ C(RESULT_MISS)   ] = 0,
475         },
476         [ C(OP_WRITE) ] = {
477                 [ C(RESULT_ACCESS) ] = 0x2240, /* L1D_CACHE.ST               */
478                 [ C(RESULT_MISS)   ] = 0,
479         },
480         [ C(OP_PREFETCH) ] = {
481                 [ C(RESULT_ACCESS) ] = 0x0,
482                 [ C(RESULT_MISS)   ] = 0,
483         },
484  },
485  [ C(L1I ) ] = {
486         [ C(OP_READ) ] = {
487                 [ C(RESULT_ACCESS) ] = 0x0380, /* L1I.READS                  */
488                 [ C(RESULT_MISS)   ] = 0x0280, /* L1I.MISSES                 */
489         },
490         [ C(OP_WRITE) ] = {
491                 [ C(RESULT_ACCESS) ] = -1,
492                 [ C(RESULT_MISS)   ] = -1,
493         },
494         [ C(OP_PREFETCH) ] = {
495                 [ C(RESULT_ACCESS) ] = 0,
496                 [ C(RESULT_MISS)   ] = 0,
497         },
498  },
499  [ C(LL  ) ] = {
500         [ C(OP_READ) ] = {
501                 [ C(RESULT_ACCESS) ] = 0x4f29, /* L2_LD.MESI                 */
502                 [ C(RESULT_MISS)   ] = 0x4129, /* L2_LD.ISTATE               */
503         },
504         [ C(OP_WRITE) ] = {
505                 [ C(RESULT_ACCESS) ] = 0x4f2A, /* L2_ST.MESI                 */
506                 [ C(RESULT_MISS)   ] = 0x412A, /* L2_ST.ISTATE               */
507         },
508         [ C(OP_PREFETCH) ] = {
509                 [ C(RESULT_ACCESS) ] = 0,
510                 [ C(RESULT_MISS)   ] = 0,
511         },
512  },
513  [ C(DTLB) ] = {
514         [ C(OP_READ) ] = {
515                 [ C(RESULT_ACCESS) ] = 0x2140, /* L1D_CACHE_LD.MESI  (alias) */
516                 [ C(RESULT_MISS)   ] = 0x0508, /* DTLB_MISSES.MISS_LD        */
517         },
518         [ C(OP_WRITE) ] = {
519                 [ C(RESULT_ACCESS) ] = 0x2240, /* L1D_CACHE_ST.MESI  (alias) */
520                 [ C(RESULT_MISS)   ] = 0x0608, /* DTLB_MISSES.MISS_ST        */
521         },
522         [ C(OP_PREFETCH) ] = {
523                 [ C(RESULT_ACCESS) ] = 0,
524                 [ C(RESULT_MISS)   ] = 0,
525         },
526  },
527  [ C(ITLB) ] = {
528         [ C(OP_READ) ] = {
529                 [ C(RESULT_ACCESS) ] = 0x00c0, /* INST_RETIRED.ANY_P         */
530                 [ C(RESULT_MISS)   ] = 0x0282, /* ITLB.MISSES                */
531         },
532         [ C(OP_WRITE) ] = {
533                 [ C(RESULT_ACCESS) ] = -1,
534                 [ C(RESULT_MISS)   ] = -1,
535         },
536         [ C(OP_PREFETCH) ] = {
537                 [ C(RESULT_ACCESS) ] = -1,
538                 [ C(RESULT_MISS)   ] = -1,
539         },
540  },
541  [ C(BPU ) ] = {
542         [ C(OP_READ) ] = {
543                 [ C(RESULT_ACCESS) ] = 0x00c4, /* BR_INST_RETIRED.ANY        */
544                 [ C(RESULT_MISS)   ] = 0x00c5, /* BP_INST_RETIRED.MISPRED    */
545         },
546         [ C(OP_WRITE) ] = {
547                 [ C(RESULT_ACCESS) ] = -1,
548                 [ C(RESULT_MISS)   ] = -1,
549         },
550         [ C(OP_PREFETCH) ] = {
551                 [ C(RESULT_ACCESS) ] = -1,
552                 [ C(RESULT_MISS)   ] = -1,
553         },
554  },
555 };
556
557 static u64 intel_pmu_raw_event(u64 hw_event)
558 {
559 #define CORE_EVNTSEL_EVENT_MASK         0x000000FFULL
560 #define CORE_EVNTSEL_UNIT_MASK          0x0000FF00ULL
561 #define CORE_EVNTSEL_EDGE_MASK          0x00040000ULL
562 #define CORE_EVNTSEL_INV_MASK           0x00800000ULL
563 #define CORE_EVNTSEL_REG_MASK           0xFF000000ULL
564
565 #define CORE_EVNTSEL_MASK               \
566         (INTEL_ARCH_EVTSEL_MASK |       \
567          INTEL_ARCH_UNIT_MASK   |       \
568          INTEL_ARCH_EDGE_MASK   |       \
569          INTEL_ARCH_INV_MASK    |       \
570          INTEL_ARCH_CNT_MASK)
571
572         return hw_event & CORE_EVNTSEL_MASK;
573 }
574
575 static __initconst u64 amd_hw_cache_event_ids
576                                 [PERF_COUNT_HW_CACHE_MAX]
577                                 [PERF_COUNT_HW_CACHE_OP_MAX]
578                                 [PERF_COUNT_HW_CACHE_RESULT_MAX] =
579 {
580  [ C(L1D) ] = {
581         [ C(OP_READ) ] = {
582                 [ C(RESULT_ACCESS) ] = 0x0040, /* Data Cache Accesses        */
583                 [ C(RESULT_MISS)   ] = 0x0041, /* Data Cache Misses          */
584         },
585         [ C(OP_WRITE) ] = {
586                 [ C(RESULT_ACCESS) ] = 0x0142, /* Data Cache Refills :system */
587                 [ C(RESULT_MISS)   ] = 0,
588         },
589         [ C(OP_PREFETCH) ] = {
590                 [ C(RESULT_ACCESS) ] = 0x0267, /* Data Prefetcher :attempts  */
591                 [ C(RESULT_MISS)   ] = 0x0167, /* Data Prefetcher :cancelled */
592         },
593  },
594  [ C(L1I ) ] = {
595         [ C(OP_READ) ] = {
596                 [ C(RESULT_ACCESS) ] = 0x0080, /* Instruction cache fetches  */
597                 [ C(RESULT_MISS)   ] = 0x0081, /* Instruction cache misses   */
598         },
599         [ C(OP_WRITE) ] = {
600                 [ C(RESULT_ACCESS) ] = -1,
601                 [ C(RESULT_MISS)   ] = -1,
602         },
603         [ C(OP_PREFETCH) ] = {
604                 [ C(RESULT_ACCESS) ] = 0x014B, /* Prefetch Instructions :Load */
605                 [ C(RESULT_MISS)   ] = 0,
606         },
607  },
608  [ C(LL  ) ] = {
609         [ C(OP_READ) ] = {
610                 [ C(RESULT_ACCESS) ] = 0x037D, /* Requests to L2 Cache :IC+DC */
611                 [ C(RESULT_MISS)   ] = 0x037E, /* L2 Cache Misses : IC+DC     */
612         },
613         [ C(OP_WRITE) ] = {
614                 [ C(RESULT_ACCESS) ] = 0x017F, /* L2 Fill/Writeback           */
615                 [ C(RESULT_MISS)   ] = 0,
616         },
617         [ C(OP_PREFETCH) ] = {
618                 [ C(RESULT_ACCESS) ] = 0,
619                 [ C(RESULT_MISS)   ] = 0,
620         },
621  },
622  [ C(DTLB) ] = {
623         [ C(OP_READ) ] = {
624                 [ C(RESULT_ACCESS) ] = 0x0040, /* Data Cache Accesses        */
625                 [ C(RESULT_MISS)   ] = 0x0046, /* L1 DTLB and L2 DLTB Miss   */
626         },
627         [ C(OP_WRITE) ] = {
628                 [ C(RESULT_ACCESS) ] = 0,
629                 [ C(RESULT_MISS)   ] = 0,
630         },
631         [ C(OP_PREFETCH) ] = {
632                 [ C(RESULT_ACCESS) ] = 0,
633                 [ C(RESULT_MISS)   ] = 0,
634         },
635  },
636  [ C(ITLB) ] = {
637         [ C(OP_READ) ] = {
638                 [ C(RESULT_ACCESS) ] = 0x0080, /* Instruction fecthes        */
639                 [ C(RESULT_MISS)   ] = 0x0085, /* Instr. fetch ITLB misses   */
640         },
641         [ C(OP_WRITE) ] = {
642                 [ C(RESULT_ACCESS) ] = -1,
643                 [ C(RESULT_MISS)   ] = -1,
644         },
645         [ C(OP_PREFETCH) ] = {
646                 [ C(RESULT_ACCESS) ] = -1,
647                 [ C(RESULT_MISS)   ] = -1,
648         },
649  },
650  [ C(BPU ) ] = {
651         [ C(OP_READ) ] = {
652                 [ C(RESULT_ACCESS) ] = 0x00c2, /* Retired Branch Instr.      */
653                 [ C(RESULT_MISS)   ] = 0x00c3, /* Retired Mispredicted BI    */
654         },
655         [ C(OP_WRITE) ] = {
656                 [ C(RESULT_ACCESS) ] = -1,
657                 [ C(RESULT_MISS)   ] = -1,
658         },
659         [ C(OP_PREFETCH) ] = {
660                 [ C(RESULT_ACCESS) ] = -1,
661                 [ C(RESULT_MISS)   ] = -1,
662         },
663  },
664 };
665
666 /*
667  * AMD Performance Monitor K7 and later.
668  */
669 static const u64 amd_perfmon_event_map[] =
670 {
671   [PERF_COUNT_HW_CPU_CYCLES]            = 0x0076,
672   [PERF_COUNT_HW_INSTRUCTIONS]          = 0x00c0,
673   [PERF_COUNT_HW_CACHE_REFERENCES]      = 0x0080,
674   [PERF_COUNT_HW_CACHE_MISSES]          = 0x0081,
675   [PERF_COUNT_HW_BRANCH_INSTRUCTIONS]   = 0x00c4,
676   [PERF_COUNT_HW_BRANCH_MISSES]         = 0x00c5,
677 };
678
679 static u64 amd_pmu_event_map(int hw_event)
680 {
681         return amd_perfmon_event_map[hw_event];
682 }
683
684 static u64 amd_pmu_raw_event(u64 hw_event)
685 {
686 #define K7_EVNTSEL_EVENT_MASK   0x7000000FFULL
687 #define K7_EVNTSEL_UNIT_MASK    0x00000FF00ULL
688 #define K7_EVNTSEL_EDGE_MASK    0x000040000ULL
689 #define K7_EVNTSEL_INV_MASK     0x000800000ULL
690 #define K7_EVNTSEL_REG_MASK     0x0FF000000ULL
691
692 #define K7_EVNTSEL_MASK                 \
693         (K7_EVNTSEL_EVENT_MASK |        \
694          K7_EVNTSEL_UNIT_MASK  |        \
695          K7_EVNTSEL_EDGE_MASK  |        \
696          K7_EVNTSEL_INV_MASK   |        \
697          K7_EVNTSEL_REG_MASK)
698
699         return hw_event & K7_EVNTSEL_MASK;
700 }
701
702 /*
703  * Propagate event elapsed time into the generic event.
704  * Can only be executed on the CPU where the event is active.
705  * Returns the delta events processed.
706  */
707 static u64
708 x86_perf_event_update(struct perf_event *event,
709                         struct hw_perf_event *hwc, int idx)
710 {
711         int shift = 64 - x86_pmu.event_bits;
712         u64 prev_raw_count, new_raw_count;
713         s64 delta;
714
715         if (idx == X86_PMC_IDX_FIXED_BTS)
716                 return 0;
717
718         /*
719          * Careful: an NMI might modify the previous event value.
720          *
721          * Our tactic to handle this is to first atomically read and
722          * exchange a new raw count - then add that new-prev delta
723          * count to the generic event atomically:
724          */
725 again:
726         prev_raw_count = atomic64_read(&hwc->prev_count);
727         rdmsrl(hwc->event_base + idx, new_raw_count);
728
729         if (atomic64_cmpxchg(&hwc->prev_count, prev_raw_count,
730                                         new_raw_count) != prev_raw_count)
731                 goto again;
732
733         /*
734          * Now we have the new raw value and have updated the prev
735          * timestamp already. We can now calculate the elapsed delta
736          * (event-)time and add that to the generic event.
737          *
738          * Careful, not all hw sign-extends above the physical width
739          * of the count.
740          */
741         delta = (new_raw_count << shift) - (prev_raw_count << shift);
742         delta >>= shift;
743
744         atomic64_add(delta, &event->count);
745         atomic64_sub(delta, &hwc->period_left);
746
747         return new_raw_count;
748 }
749
750 static atomic_t active_events;
751 static DEFINE_MUTEX(pmc_reserve_mutex);
752
753 static bool reserve_pmc_hardware(void)
754 {
755 #ifdef CONFIG_X86_LOCAL_APIC
756         int i;
757
758         if (nmi_watchdog == NMI_LOCAL_APIC)
759                 disable_lapic_nmi_watchdog();
760
761         for (i = 0; i < x86_pmu.num_events; i++) {
762                 if (!reserve_perfctr_nmi(x86_pmu.perfctr + i))
763                         goto perfctr_fail;
764         }
765
766         for (i = 0; i < x86_pmu.num_events; i++) {
767                 if (!reserve_evntsel_nmi(x86_pmu.eventsel + i))
768                         goto eventsel_fail;
769         }
770 #endif
771
772         return true;
773
774 #ifdef CONFIG_X86_LOCAL_APIC
775 eventsel_fail:
776         for (i--; i >= 0; i--)
777                 release_evntsel_nmi(x86_pmu.eventsel + i);
778
779         i = x86_pmu.num_events;
780
781 perfctr_fail:
782         for (i--; i >= 0; i--)
783                 release_perfctr_nmi(x86_pmu.perfctr + i);
784
785         if (nmi_watchdog == NMI_LOCAL_APIC)
786                 enable_lapic_nmi_watchdog();
787
788         return false;
789 #endif
790 }
791
792 static void release_pmc_hardware(void)
793 {
794 #ifdef CONFIG_X86_LOCAL_APIC
795         int i;
796
797         for (i = 0; i < x86_pmu.num_events; i++) {
798                 release_perfctr_nmi(x86_pmu.perfctr + i);
799                 release_evntsel_nmi(x86_pmu.eventsel + i);
800         }
801
802         if (nmi_watchdog == NMI_LOCAL_APIC)
803                 enable_lapic_nmi_watchdog();
804 #endif
805 }
806
807 static inline bool bts_available(void)
808 {
809         return x86_pmu.enable_bts != NULL;
810 }
811
812 static inline void init_debug_store_on_cpu(int cpu)
813 {
814         struct debug_store *ds = per_cpu(cpu_hw_events, cpu).ds;
815
816         if (!ds)
817                 return;
818
819         wrmsr_on_cpu(cpu, MSR_IA32_DS_AREA,
820                      (u32)((u64)(unsigned long)ds),
821                      (u32)((u64)(unsigned long)ds >> 32));
822 }
823
824 static inline void fini_debug_store_on_cpu(int cpu)
825 {
826         if (!per_cpu(cpu_hw_events, cpu).ds)
827                 return;
828
829         wrmsr_on_cpu(cpu, MSR_IA32_DS_AREA, 0, 0);
830 }
831
832 static void release_bts_hardware(void)
833 {
834         int cpu;
835
836         if (!bts_available())
837                 return;
838
839         get_online_cpus();
840
841         for_each_online_cpu(cpu)
842                 fini_debug_store_on_cpu(cpu);
843
844         for_each_possible_cpu(cpu) {
845                 struct debug_store *ds = per_cpu(cpu_hw_events, cpu).ds;
846
847                 if (!ds)
848                         continue;
849
850                 per_cpu(cpu_hw_events, cpu).ds = NULL;
851
852                 kfree((void *)(unsigned long)ds->bts_buffer_base);
853                 kfree(ds);
854         }
855
856         put_online_cpus();
857 }
858
859 static int reserve_bts_hardware(void)
860 {
861         int cpu, err = 0;
862
863         if (!bts_available())
864                 return 0;
865
866         get_online_cpus();
867
868         for_each_possible_cpu(cpu) {
869                 struct debug_store *ds;
870                 void *buffer;
871
872                 err = -ENOMEM;
873                 buffer = kzalloc(BTS_BUFFER_SIZE, GFP_KERNEL);
874                 if (unlikely(!buffer))
875                         break;
876
877                 ds = kzalloc(sizeof(*ds), GFP_KERNEL);
878                 if (unlikely(!ds)) {
879                         kfree(buffer);
880                         break;
881                 }
882
883                 ds->bts_buffer_base = (u64)(unsigned long)buffer;
884                 ds->bts_index = ds->bts_buffer_base;
885                 ds->bts_absolute_maximum =
886                         ds->bts_buffer_base + BTS_BUFFER_SIZE;
887                 ds->bts_interrupt_threshold =
888                         ds->bts_absolute_maximum - BTS_OVFL_TH;
889
890                 per_cpu(cpu_hw_events, cpu).ds = ds;
891                 err = 0;
892         }
893
894         if (err)
895                 release_bts_hardware();
896         else {
897                 for_each_online_cpu(cpu)
898                         init_debug_store_on_cpu(cpu);
899         }
900
901         put_online_cpus();
902
903         return err;
904 }
905
906 static void hw_perf_event_destroy(struct perf_event *event)
907 {
908         if (atomic_dec_and_mutex_lock(&active_events, &pmc_reserve_mutex)) {
909                 release_pmc_hardware();
910                 release_bts_hardware();
911                 mutex_unlock(&pmc_reserve_mutex);
912         }
913 }
914
915 static inline int x86_pmu_initialized(void)
916 {
917         return x86_pmu.handle_irq != NULL;
918 }
919
920 static inline int
921 set_ext_hw_attr(struct hw_perf_event *hwc, struct perf_event_attr *attr)
922 {
923         unsigned int cache_type, cache_op, cache_result;
924         u64 config, val;
925
926         config = attr->config;
927
928         cache_type = (config >>  0) & 0xff;
929         if (cache_type >= PERF_COUNT_HW_CACHE_MAX)
930                 return -EINVAL;
931
932         cache_op = (config >>  8) & 0xff;
933         if (cache_op >= PERF_COUNT_HW_CACHE_OP_MAX)
934                 return -EINVAL;
935
936         cache_result = (config >> 16) & 0xff;
937         if (cache_result >= PERF_COUNT_HW_CACHE_RESULT_MAX)
938                 return -EINVAL;
939
940         val = hw_cache_event_ids[cache_type][cache_op][cache_result];
941
942         if (val == 0)
943                 return -ENOENT;
944
945         if (val == -1)
946                 return -EINVAL;
947
948         hwc->config |= val;
949
950         return 0;
951 }
952
953 static void intel_pmu_enable_bts(u64 config)
954 {
955         unsigned long debugctlmsr;
956
957         debugctlmsr = get_debugctlmsr();
958
959         debugctlmsr |= X86_DEBUGCTL_TR;
960         debugctlmsr |= X86_DEBUGCTL_BTS;
961         debugctlmsr |= X86_DEBUGCTL_BTINT;
962
963         if (!(config & ARCH_PERFMON_EVENTSEL_OS))
964                 debugctlmsr |= X86_DEBUGCTL_BTS_OFF_OS;
965
966         if (!(config & ARCH_PERFMON_EVENTSEL_USR))
967                 debugctlmsr |= X86_DEBUGCTL_BTS_OFF_USR;
968
969         update_debugctlmsr(debugctlmsr);
970 }
971
972 static void intel_pmu_disable_bts(void)
973 {
974         struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events);
975         unsigned long debugctlmsr;
976
977         if (!cpuc->ds)
978                 return;
979
980         debugctlmsr = get_debugctlmsr();
981
982         debugctlmsr &=
983                 ~(X86_DEBUGCTL_TR | X86_DEBUGCTL_BTS | X86_DEBUGCTL_BTINT |
984                   X86_DEBUGCTL_BTS_OFF_OS | X86_DEBUGCTL_BTS_OFF_USR);
985
986         update_debugctlmsr(debugctlmsr);
987 }
988
989 /*
990  * Setup the hardware configuration for a given attr_type
991  */
992 static int __hw_perf_event_init(struct perf_event *event)
993 {
994         struct perf_event_attr *attr = &event->attr;
995         struct hw_perf_event *hwc = &event->hw;
996         u64 config;
997         int err;
998
999         if (!x86_pmu_initialized())
1000                 return -ENODEV;
1001
1002         err = 0;
1003         if (!atomic_inc_not_zero(&active_events)) {
1004                 mutex_lock(&pmc_reserve_mutex);
1005                 if (atomic_read(&active_events) == 0) {
1006                         if (!reserve_pmc_hardware())
1007                                 err = -EBUSY;
1008                         else
1009                                 err = reserve_bts_hardware();
1010                 }
1011                 if (!err)
1012                         atomic_inc(&active_events);
1013                 mutex_unlock(&pmc_reserve_mutex);
1014         }
1015         if (err)
1016                 return err;
1017
1018         event->destroy = hw_perf_event_destroy;
1019
1020         /*
1021          * Generate PMC IRQs:
1022          * (keep 'enabled' bit clear for now)
1023          */
1024         hwc->config = ARCH_PERFMON_EVENTSEL_INT;
1025
1026         hwc->idx = -1;
1027
1028         /*
1029          * Count user and OS events unless requested not to.
1030          */
1031         if (!attr->exclude_user)
1032                 hwc->config |= ARCH_PERFMON_EVENTSEL_USR;
1033         if (!attr->exclude_kernel)
1034                 hwc->config |= ARCH_PERFMON_EVENTSEL_OS;
1035
1036         if (!hwc->sample_period) {
1037                 hwc->sample_period = x86_pmu.max_period;
1038                 hwc->last_period = hwc->sample_period;
1039                 atomic64_set(&hwc->period_left, hwc->sample_period);
1040         } else {
1041                 /*
1042                  * If we have a PMU initialized but no APIC
1043                  * interrupts, we cannot sample hardware
1044                  * events (user-space has to fall back and
1045                  * sample via a hrtimer based software event):
1046                  */
1047                 if (!x86_pmu.apic)
1048                         return -EOPNOTSUPP;
1049         }
1050
1051         /*
1052          * Raw hw_event type provide the config in the hw_event structure
1053          */
1054         if (attr->type == PERF_TYPE_RAW) {
1055                 hwc->config |= x86_pmu.raw_event(attr->config);
1056                 return 0;
1057         }
1058
1059         if (attr->type == PERF_TYPE_HW_CACHE)
1060                 return set_ext_hw_attr(hwc, attr);
1061
1062         if (attr->config >= x86_pmu.max_events)
1063                 return -EINVAL;
1064
1065         /*
1066          * The generic map:
1067          */
1068         config = x86_pmu.event_map(attr->config);
1069
1070         if (config == 0)
1071                 return -ENOENT;
1072
1073         if (config == -1LL)
1074                 return -EINVAL;
1075
1076         /*
1077          * Branch tracing:
1078          */
1079         if ((attr->config == PERF_COUNT_HW_BRANCH_INSTRUCTIONS) &&
1080             (hwc->sample_period == 1)) {
1081                 /* BTS is not supported by this architecture. */
1082                 if (!bts_available())
1083                         return -EOPNOTSUPP;
1084
1085                 /* BTS is currently only allowed for user-mode. */
1086                 if (hwc->config & ARCH_PERFMON_EVENTSEL_OS)
1087                         return -EOPNOTSUPP;
1088         }
1089
1090         hwc->config |= config;
1091
1092         return 0;
1093 }
1094
1095 static void p6_pmu_disable_all(void)
1096 {
1097         struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events);
1098         u64 val;
1099
1100         if (!cpuc->enabled)
1101                 return;
1102
1103         cpuc->enabled = 0;
1104         barrier();
1105
1106         /* p6 only has one enable register */
1107         rdmsrl(MSR_P6_EVNTSEL0, val);
1108         val &= ~ARCH_PERFMON_EVENTSEL0_ENABLE;
1109         wrmsrl(MSR_P6_EVNTSEL0, val);
1110 }
1111
1112 static void intel_pmu_disable_all(void)
1113 {
1114         struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events);
1115
1116         if (!cpuc->enabled)
1117                 return;
1118
1119         cpuc->enabled = 0;
1120         barrier();
1121
1122         wrmsrl(MSR_CORE_PERF_GLOBAL_CTRL, 0);
1123
1124         if (test_bit(X86_PMC_IDX_FIXED_BTS, cpuc->active_mask))
1125                 intel_pmu_disable_bts();
1126 }
1127
1128 static void amd_pmu_disable_all(void)
1129 {
1130         struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events);
1131         int idx;
1132
1133         if (!cpuc->enabled)
1134                 return;
1135
1136         cpuc->enabled = 0;
1137         /*
1138          * ensure we write the disable before we start disabling the
1139          * events proper, so that amd_pmu_enable_event() does the
1140          * right thing.
1141          */
1142         barrier();
1143
1144         for (idx = 0; idx < x86_pmu.num_events; idx++) {
1145                 u64 val;
1146
1147                 if (!test_bit(idx, cpuc->active_mask))
1148                         continue;
1149                 rdmsrl(MSR_K7_EVNTSEL0 + idx, val);
1150                 if (!(val & ARCH_PERFMON_EVENTSEL0_ENABLE))
1151                         continue;
1152                 val &= ~ARCH_PERFMON_EVENTSEL0_ENABLE;
1153                 wrmsrl(MSR_K7_EVNTSEL0 + idx, val);
1154         }
1155 }
1156
1157 void hw_perf_disable(void)
1158 {
1159         struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events);
1160
1161         if (!x86_pmu_initialized())
1162                 return;
1163
1164         if (cpuc->enabled)
1165                 cpuc->n_added = 0;
1166
1167         x86_pmu.disable_all();
1168 }
1169
1170 static void p6_pmu_enable_all(void)
1171 {
1172         struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events);
1173         unsigned long val;
1174
1175         if (cpuc->enabled)
1176                 return;
1177
1178         cpuc->enabled = 1;
1179         barrier();
1180
1181         /* p6 only has one enable register */
1182         rdmsrl(MSR_P6_EVNTSEL0, val);
1183         val |= ARCH_PERFMON_EVENTSEL0_ENABLE;
1184         wrmsrl(MSR_P6_EVNTSEL0, val);
1185 }
1186
1187 static void intel_pmu_enable_all(void)
1188 {
1189         struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events);
1190
1191         if (cpuc->enabled)
1192                 return;
1193
1194         cpuc->enabled = 1;
1195         barrier();
1196
1197         wrmsrl(MSR_CORE_PERF_GLOBAL_CTRL, x86_pmu.intel_ctrl);
1198
1199         if (test_bit(X86_PMC_IDX_FIXED_BTS, cpuc->active_mask)) {
1200                 struct perf_event *event =
1201                         cpuc->events[X86_PMC_IDX_FIXED_BTS];
1202
1203                 if (WARN_ON_ONCE(!event))
1204                         return;
1205
1206                 intel_pmu_enable_bts(event->hw.config);
1207         }
1208 }
1209
1210 static void amd_pmu_enable_all(void)
1211 {
1212         struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events);
1213         int idx;
1214
1215         if (cpuc->enabled)
1216                 return;
1217
1218         cpuc->enabled = 1;
1219         barrier();
1220
1221         for (idx = 0; idx < x86_pmu.num_events; idx++) {
1222                 struct perf_event *event = cpuc->events[idx];
1223                 u64 val;
1224
1225                 if (!test_bit(idx, cpuc->active_mask))
1226                         continue;
1227
1228                 val = event->hw.config;
1229                 val |= ARCH_PERFMON_EVENTSEL0_ENABLE;
1230                 wrmsrl(MSR_K7_EVNTSEL0 + idx, val);
1231         }
1232 }
1233
1234 static const struct pmu pmu;
1235
1236 static inline int is_x86_event(struct perf_event *event)
1237 {
1238         return event->pmu == &pmu;
1239 }
1240
1241 static int x86_schedule_events(struct cpu_hw_events *cpuc, int n, int *assign)
1242 {
1243         struct event_constraint *c, *constraints[X86_PMC_IDX_MAX];
1244         unsigned long used_mask[BITS_TO_LONGS(X86_PMC_IDX_MAX)];
1245         int i, j, w, wmax, num = 0;
1246         struct hw_perf_event *hwc;
1247
1248         bitmap_zero(used_mask, X86_PMC_IDX_MAX);
1249
1250         for (i = 0; i < n; i++) {
1251                 constraints[i] =
1252                   x86_pmu.get_event_constraints(cpuc, cpuc->event_list[i]);
1253         }
1254
1255         /*
1256          * fastpath, try to reuse previous register
1257          */
1258         for (i = 0; i < n; i++) {
1259                 hwc = &cpuc->event_list[i]->hw;
1260                 c = constraints[i];
1261
1262                 /* never assigned */
1263                 if (hwc->idx == -1)
1264                         break;
1265
1266                 /* constraint still honored */
1267                 if (!test_bit(hwc->idx, c->idxmsk))
1268                         break;
1269
1270                 /* not already used */
1271                 if (test_bit(hwc->idx, used_mask))
1272                         break;
1273
1274                 set_bit(hwc->idx, used_mask);
1275                 if (assign)
1276                         assign[i] = hwc->idx;
1277         }
1278         if (i == n)
1279                 goto done;
1280
1281         /*
1282          * begin slow path
1283          */
1284
1285         bitmap_zero(used_mask, X86_PMC_IDX_MAX);
1286
1287         /*
1288          * weight = number of possible counters
1289          *
1290          * 1    = most constrained, only works on one counter
1291          * wmax = least constrained, works on any counter
1292          *
1293          * assign events to counters starting with most
1294          * constrained events.
1295          */
1296         wmax = x86_pmu.num_events;
1297
1298         /*
1299          * when fixed event counters are present,
1300          * wmax is incremented by 1 to account
1301          * for one more choice
1302          */
1303         if (x86_pmu.num_events_fixed)
1304                 wmax++;
1305
1306         for (w = 1, num = n; num && w <= wmax; w++) {
1307                 /* for each event */
1308                 for (i = 0; num && i < n; i++) {
1309                         c = constraints[i];
1310                         hwc = &cpuc->event_list[i]->hw;
1311
1312                         if (c->weight != w)
1313                                 continue;
1314
1315                         for_each_bit(j, c->idxmsk, X86_PMC_IDX_MAX) {
1316                                 if (!test_bit(j, used_mask))
1317                                         break;
1318                         }
1319
1320                         if (j == X86_PMC_IDX_MAX)
1321                                 break;
1322
1323                         set_bit(j, used_mask);
1324
1325                         if (assign)
1326                                 assign[i] = j;
1327                         num--;
1328                 }
1329         }
1330 done:
1331         /*
1332          * scheduling failed or is just a simulation,
1333          * free resources if necessary
1334          */
1335         if (!assign || num) {
1336                 for (i = 0; i < n; i++) {
1337                         if (x86_pmu.put_event_constraints)
1338                                 x86_pmu.put_event_constraints(cpuc, cpuc->event_list[i]);
1339                 }
1340         }
1341         return num ? -ENOSPC : 0;
1342 }
1343
1344 /*
1345  * dogrp: true if must collect siblings events (group)
1346  * returns total number of events and error code
1347  */
1348 static int collect_events(struct cpu_hw_events *cpuc, struct perf_event *leader, bool dogrp)
1349 {
1350         struct perf_event *event;
1351         int n, max_count;
1352
1353         max_count = x86_pmu.num_events + x86_pmu.num_events_fixed;
1354
1355         /* current number of events already accepted */
1356         n = cpuc->n_events;
1357
1358         if (is_x86_event(leader)) {
1359                 if (n >= max_count)
1360                         return -ENOSPC;
1361                 cpuc->event_list[n] = leader;
1362                 n++;
1363         }
1364         if (!dogrp)
1365                 return n;
1366
1367         list_for_each_entry(event, &leader->sibling_list, group_entry) {
1368                 if (!is_x86_event(event) ||
1369                     event->state <= PERF_EVENT_STATE_OFF)
1370                         continue;
1371
1372                 if (n >= max_count)
1373                         return -ENOSPC;
1374
1375                 cpuc->event_list[n] = event;
1376                 n++;
1377         }
1378         return n;
1379 }
1380
1381
1382 static inline void x86_assign_hw_event(struct perf_event *event,
1383                                 struct hw_perf_event *hwc, int idx)
1384 {
1385         hwc->idx = idx;
1386
1387         if (hwc->idx == X86_PMC_IDX_FIXED_BTS) {
1388                 hwc->config_base = 0;
1389                 hwc->event_base = 0;
1390         } else if (hwc->idx >= X86_PMC_IDX_FIXED) {
1391                 hwc->config_base = MSR_ARCH_PERFMON_FIXED_CTR_CTRL;
1392                 /*
1393                  * We set it so that event_base + idx in wrmsr/rdmsr maps to
1394                  * MSR_ARCH_PERFMON_FIXED_CTR0 ... CTR2:
1395                  */
1396                 hwc->event_base =
1397                         MSR_ARCH_PERFMON_FIXED_CTR0 - X86_PMC_IDX_FIXED;
1398         } else {
1399                 hwc->config_base = x86_pmu.eventsel;
1400                 hwc->event_base  = x86_pmu.perfctr;
1401         }
1402 }
1403
1404 void hw_perf_enable(void)
1405 {
1406         struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events);
1407         struct perf_event *event;
1408         struct hw_perf_event *hwc;
1409         int i;
1410
1411         if (!x86_pmu_initialized())
1412                 return;
1413         if (cpuc->n_added) {
1414                 /*
1415                  * apply assignment obtained either from
1416                  * hw_perf_group_sched_in() or x86_pmu_enable()
1417                  *
1418                  * step1: save events moving to new counters
1419                  * step2: reprogram moved events into new counters
1420                  */
1421                 for (i = 0; i < cpuc->n_events; i++) {
1422
1423                         event = cpuc->event_list[i];
1424                         hwc = &event->hw;
1425
1426                         if (hwc->idx == -1 || hwc->idx == cpuc->assign[i])
1427                                 continue;
1428
1429                         x86_pmu.disable(hwc, hwc->idx);
1430
1431                         clear_bit(hwc->idx, cpuc->active_mask);
1432                         barrier();
1433                         cpuc->events[hwc->idx] = NULL;
1434
1435                         x86_perf_event_update(event, hwc, hwc->idx);
1436
1437                         hwc->idx = -1;
1438                 }
1439
1440                 for (i = 0; i < cpuc->n_events; i++) {
1441
1442                         event = cpuc->event_list[i];
1443                         hwc = &event->hw;
1444
1445                         if (hwc->idx == -1) {
1446                                 x86_assign_hw_event(event, hwc, cpuc->assign[i]);
1447                                 x86_perf_event_set_period(event, hwc, hwc->idx);
1448                         }
1449                         /*
1450                          * need to mark as active because x86_pmu_disable()
1451                          * clear active_mask and eventsp[] yet it preserves
1452                          * idx
1453                          */
1454                         set_bit(hwc->idx, cpuc->active_mask);
1455                         cpuc->events[hwc->idx] = event;
1456
1457                         x86_pmu.enable(hwc, hwc->idx);
1458                         perf_event_update_userpage(event);
1459                 }
1460                 cpuc->n_added = 0;
1461                 perf_events_lapic_init();
1462         }
1463         x86_pmu.enable_all();
1464 }
1465
1466 static inline u64 intel_pmu_get_status(void)
1467 {
1468         u64 status;
1469
1470         rdmsrl(MSR_CORE_PERF_GLOBAL_STATUS, status);
1471
1472         return status;
1473 }
1474
1475 static inline void intel_pmu_ack_status(u64 ack)
1476 {
1477         wrmsrl(MSR_CORE_PERF_GLOBAL_OVF_CTRL, ack);
1478 }
1479
1480 static inline void x86_pmu_enable_event(struct hw_perf_event *hwc, int idx)
1481 {
1482         (void)checking_wrmsrl(hwc->config_base + idx,
1483                               hwc->config | ARCH_PERFMON_EVENTSEL0_ENABLE);
1484 }
1485
1486 static inline void x86_pmu_disable_event(struct hw_perf_event *hwc, int idx)
1487 {
1488         (void)checking_wrmsrl(hwc->config_base + idx, hwc->config);
1489 }
1490
1491 static inline void
1492 intel_pmu_disable_fixed(struct hw_perf_event *hwc, int __idx)
1493 {
1494         int idx = __idx - X86_PMC_IDX_FIXED;
1495         u64 ctrl_val, mask;
1496
1497         mask = 0xfULL << (idx * 4);
1498
1499         rdmsrl(hwc->config_base, ctrl_val);
1500         ctrl_val &= ~mask;
1501         (void)checking_wrmsrl(hwc->config_base, ctrl_val);
1502 }
1503
1504 static inline void
1505 p6_pmu_disable_event(struct hw_perf_event *hwc, int idx)
1506 {
1507         struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events);
1508         u64 val = P6_NOP_EVENT;
1509
1510         if (cpuc->enabled)
1511                 val |= ARCH_PERFMON_EVENTSEL0_ENABLE;
1512
1513         (void)checking_wrmsrl(hwc->config_base + idx, val);
1514 }
1515
1516 static inline void
1517 intel_pmu_disable_event(struct hw_perf_event *hwc, int idx)
1518 {
1519         if (unlikely(idx == X86_PMC_IDX_FIXED_BTS)) {
1520                 intel_pmu_disable_bts();
1521                 return;
1522         }
1523
1524         if (unlikely(hwc->config_base == MSR_ARCH_PERFMON_FIXED_CTR_CTRL)) {
1525                 intel_pmu_disable_fixed(hwc, idx);
1526                 return;
1527         }
1528
1529         x86_pmu_disable_event(hwc, idx);
1530 }
1531
1532 static inline void
1533 amd_pmu_disable_event(struct hw_perf_event *hwc, int idx)
1534 {
1535         x86_pmu_disable_event(hwc, idx);
1536 }
1537
1538 static DEFINE_PER_CPU(u64 [X86_PMC_IDX_MAX], pmc_prev_left);
1539
1540 /*
1541  * Set the next IRQ period, based on the hwc->period_left value.
1542  * To be called with the event disabled in hw:
1543  */
1544 static int
1545 x86_perf_event_set_period(struct perf_event *event,
1546                              struct hw_perf_event *hwc, int idx)
1547 {
1548         s64 left = atomic64_read(&hwc->period_left);
1549         s64 period = hwc->sample_period;
1550         int err, ret = 0;
1551
1552         if (idx == X86_PMC_IDX_FIXED_BTS)
1553                 return 0;
1554
1555         /*
1556          * If we are way outside a reasonable range then just skip forward:
1557          */
1558         if (unlikely(left <= -period)) {
1559                 left = period;
1560                 atomic64_set(&hwc->period_left, left);
1561                 hwc->last_period = period;
1562                 ret = 1;
1563         }
1564
1565         if (unlikely(left <= 0)) {
1566                 left += period;
1567                 atomic64_set(&hwc->period_left, left);
1568                 hwc->last_period = period;
1569                 ret = 1;
1570         }
1571         /*
1572          * Quirk: certain CPUs dont like it if just 1 hw_event is left:
1573          */
1574         if (unlikely(left < 2))
1575                 left = 2;
1576
1577         if (left > x86_pmu.max_period)
1578                 left = x86_pmu.max_period;
1579
1580         per_cpu(pmc_prev_left[idx], smp_processor_id()) = left;
1581
1582         /*
1583          * The hw event starts counting from this event offset,
1584          * mark it to be able to extra future deltas:
1585          */
1586         atomic64_set(&hwc->prev_count, (u64)-left);
1587
1588         err = checking_wrmsrl(hwc->event_base + idx,
1589                              (u64)(-left) & x86_pmu.event_mask);
1590
1591         perf_event_update_userpage(event);
1592
1593         return ret;
1594 }
1595
1596 static inline void
1597 intel_pmu_enable_fixed(struct hw_perf_event *hwc, int __idx)
1598 {
1599         int idx = __idx - X86_PMC_IDX_FIXED;
1600         u64 ctrl_val, bits, mask;
1601         int err;
1602
1603         /*
1604          * Enable IRQ generation (0x8),
1605          * and enable ring-3 counting (0x2) and ring-0 counting (0x1)
1606          * if requested:
1607          */
1608         bits = 0x8ULL;
1609         if (hwc->config & ARCH_PERFMON_EVENTSEL_USR)
1610                 bits |= 0x2;
1611         if (hwc->config & ARCH_PERFMON_EVENTSEL_OS)
1612                 bits |= 0x1;
1613         bits <<= (idx * 4);
1614         mask = 0xfULL << (idx * 4);
1615
1616         rdmsrl(hwc->config_base, ctrl_val);
1617         ctrl_val &= ~mask;
1618         ctrl_val |= bits;
1619         err = checking_wrmsrl(hwc->config_base, ctrl_val);
1620 }
1621
1622 static void p6_pmu_enable_event(struct hw_perf_event *hwc, int idx)
1623 {
1624         struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events);
1625         u64 val;
1626
1627         val = hwc->config;
1628         if (cpuc->enabled)
1629                 val |= ARCH_PERFMON_EVENTSEL0_ENABLE;
1630
1631         (void)checking_wrmsrl(hwc->config_base + idx, val);
1632 }
1633
1634
1635 static void intel_pmu_enable_event(struct hw_perf_event *hwc, int idx)
1636 {
1637         if (unlikely(idx == X86_PMC_IDX_FIXED_BTS)) {
1638                 if (!__get_cpu_var(cpu_hw_events).enabled)
1639                         return;
1640
1641                 intel_pmu_enable_bts(hwc->config);
1642                 return;
1643         }
1644
1645         if (unlikely(hwc->config_base == MSR_ARCH_PERFMON_FIXED_CTR_CTRL)) {
1646                 intel_pmu_enable_fixed(hwc, idx);
1647                 return;
1648         }
1649
1650         x86_pmu_enable_event(hwc, idx);
1651 }
1652
1653 static void amd_pmu_enable_event(struct hw_perf_event *hwc, int idx)
1654 {
1655         struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events);
1656
1657         if (cpuc->enabled)
1658                 x86_pmu_enable_event(hwc, idx);
1659 }
1660
1661 /*
1662  * activate a single event
1663  *
1664  * The event is added to the group of enabled events
1665  * but only if it can be scehduled with existing events.
1666  *
1667  * Called with PMU disabled. If successful and return value 1,
1668  * then guaranteed to call perf_enable() and hw_perf_enable()
1669  */
1670 static int x86_pmu_enable(struct perf_event *event)
1671 {
1672         struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events);
1673         struct hw_perf_event *hwc;
1674         int assign[X86_PMC_IDX_MAX];
1675         int n, n0, ret;
1676
1677         hwc = &event->hw;
1678
1679         n0 = cpuc->n_events;
1680         n = collect_events(cpuc, event, false);
1681         if (n < 0)
1682                 return n;
1683
1684         ret = x86_schedule_events(cpuc, n, assign);
1685         if (ret)
1686                 return ret;
1687         /*
1688          * copy new assignment, now we know it is possible
1689          * will be used by hw_perf_enable()
1690          */
1691         memcpy(cpuc->assign, assign, n*sizeof(int));
1692
1693         cpuc->n_events = n;
1694         cpuc->n_added  = n - n0;
1695
1696         if (hwc->idx != -1)
1697                 x86_perf_event_set_period(event, hwc, hwc->idx);
1698
1699         return 0;
1700 }
1701
1702 static void x86_pmu_unthrottle(struct perf_event *event)
1703 {
1704         struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events);
1705         struct hw_perf_event *hwc = &event->hw;
1706
1707         if (WARN_ON_ONCE(hwc->idx >= X86_PMC_IDX_MAX ||
1708                                 cpuc->events[hwc->idx] != event))
1709                 return;
1710
1711         x86_pmu.enable(hwc, hwc->idx);
1712 }
1713
1714 void perf_event_print_debug(void)
1715 {
1716         u64 ctrl, status, overflow, pmc_ctrl, pmc_count, prev_left, fixed;
1717         struct cpu_hw_events *cpuc;
1718         unsigned long flags;
1719         int cpu, idx;
1720
1721         if (!x86_pmu.num_events)
1722                 return;
1723
1724         local_irq_save(flags);
1725
1726         cpu = smp_processor_id();
1727         cpuc = &per_cpu(cpu_hw_events, cpu);
1728
1729         if (x86_pmu.version >= 2) {
1730                 rdmsrl(MSR_CORE_PERF_GLOBAL_CTRL, ctrl);
1731                 rdmsrl(MSR_CORE_PERF_GLOBAL_STATUS, status);
1732                 rdmsrl(MSR_CORE_PERF_GLOBAL_OVF_CTRL, overflow);
1733                 rdmsrl(MSR_ARCH_PERFMON_FIXED_CTR_CTRL, fixed);
1734
1735                 pr_info("\n");
1736                 pr_info("CPU#%d: ctrl:       %016llx\n", cpu, ctrl);
1737                 pr_info("CPU#%d: status:     %016llx\n", cpu, status);
1738                 pr_info("CPU#%d: overflow:   %016llx\n", cpu, overflow);
1739                 pr_info("CPU#%d: fixed:      %016llx\n", cpu, fixed);
1740         }
1741         pr_info("CPU#%d: active:       %016llx\n", cpu, *(u64 *)cpuc->active_mask);
1742
1743         for (idx = 0; idx < x86_pmu.num_events; idx++) {
1744                 rdmsrl(x86_pmu.eventsel + idx, pmc_ctrl);
1745                 rdmsrl(x86_pmu.perfctr  + idx, pmc_count);
1746
1747                 prev_left = per_cpu(pmc_prev_left[idx], cpu);
1748
1749                 pr_info("CPU#%d:   gen-PMC%d ctrl:  %016llx\n",
1750                         cpu, idx, pmc_ctrl);
1751                 pr_info("CPU#%d:   gen-PMC%d count: %016llx\n",
1752                         cpu, idx, pmc_count);
1753                 pr_info("CPU#%d:   gen-PMC%d left:  %016llx\n",
1754                         cpu, idx, prev_left);
1755         }
1756         for (idx = 0; idx < x86_pmu.num_events_fixed; idx++) {
1757                 rdmsrl(MSR_ARCH_PERFMON_FIXED_CTR0 + idx, pmc_count);
1758
1759                 pr_info("CPU#%d: fixed-PMC%d count: %016llx\n",
1760                         cpu, idx, pmc_count);
1761         }
1762         local_irq_restore(flags);
1763 }
1764
1765 static void intel_pmu_drain_bts_buffer(struct cpu_hw_events *cpuc)
1766 {
1767         struct debug_store *ds = cpuc->ds;
1768         struct bts_record {
1769                 u64     from;
1770                 u64     to;
1771                 u64     flags;
1772         };
1773         struct perf_event *event = cpuc->events[X86_PMC_IDX_FIXED_BTS];
1774         struct bts_record *at, *top;
1775         struct perf_output_handle handle;
1776         struct perf_event_header header;
1777         struct perf_sample_data data;
1778         struct pt_regs regs;
1779
1780         if (!event)
1781                 return;
1782
1783         if (!ds)
1784                 return;
1785
1786         at  = (struct bts_record *)(unsigned long)ds->bts_buffer_base;
1787         top = (struct bts_record *)(unsigned long)ds->bts_index;
1788
1789         if (top <= at)
1790                 return;
1791
1792         ds->bts_index = ds->bts_buffer_base;
1793
1794
1795         data.period     = event->hw.last_period;
1796         data.addr       = 0;
1797         data.raw        = NULL;
1798         regs.ip         = 0;
1799
1800         /*
1801          * Prepare a generic sample, i.e. fill in the invariant fields.
1802          * We will overwrite the from and to address before we output
1803          * the sample.
1804          */
1805         perf_prepare_sample(&header, &data, event, &regs);
1806
1807         if (perf_output_begin(&handle, event,
1808                               header.size * (top - at), 1, 1))
1809                 return;
1810
1811         for (; at < top; at++) {
1812                 data.ip         = at->from;
1813                 data.addr       = at->to;
1814
1815                 perf_output_sample(&handle, &header, &data, event);
1816         }
1817
1818         perf_output_end(&handle);
1819
1820         /* There's new data available. */
1821         event->hw.interrupts++;
1822         event->pending_kill = POLL_IN;
1823 }
1824
1825 static void x86_pmu_disable(struct perf_event *event)
1826 {
1827         struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events);
1828         struct hw_perf_event *hwc = &event->hw;
1829         int i, idx = hwc->idx;
1830
1831         /*
1832          * Must be done before we disable, otherwise the nmi handler
1833          * could reenable again:
1834          */
1835         clear_bit(idx, cpuc->active_mask);
1836         x86_pmu.disable(hwc, idx);
1837
1838         /*
1839          * Make sure the cleared pointer becomes visible before we
1840          * (potentially) free the event:
1841          */
1842         barrier();
1843
1844         /*
1845          * Drain the remaining delta count out of a event
1846          * that we are disabling:
1847          */
1848         x86_perf_event_update(event, hwc, idx);
1849
1850         /* Drain the remaining BTS records. */
1851         if (unlikely(idx == X86_PMC_IDX_FIXED_BTS))
1852                 intel_pmu_drain_bts_buffer(cpuc);
1853
1854         cpuc->events[idx] = NULL;
1855
1856         for (i = 0; i < cpuc->n_events; i++) {
1857                 if (event == cpuc->event_list[i]) {
1858
1859                         if (x86_pmu.put_event_constraints)
1860                                 x86_pmu.put_event_constraints(cpuc, event);
1861
1862                         while (++i < cpuc->n_events)
1863                                 cpuc->event_list[i-1] = cpuc->event_list[i];
1864
1865                         --cpuc->n_events;
1866                         break;
1867                 }
1868         }
1869         perf_event_update_userpage(event);
1870 }
1871
1872 /*
1873  * Save and restart an expired event. Called by NMI contexts,
1874  * so it has to be careful about preempting normal event ops:
1875  */
1876 static int intel_pmu_save_and_restart(struct perf_event *event)
1877 {
1878         struct hw_perf_event *hwc = &event->hw;
1879         int idx = hwc->idx;
1880         int ret;
1881
1882         x86_perf_event_update(event, hwc, idx);
1883         ret = x86_perf_event_set_period(event, hwc, idx);
1884
1885         if (event->state == PERF_EVENT_STATE_ACTIVE)
1886                 intel_pmu_enable_event(hwc, idx);
1887
1888         return ret;
1889 }
1890
1891 static void intel_pmu_reset(void)
1892 {
1893         struct debug_store *ds = __get_cpu_var(cpu_hw_events).ds;
1894         unsigned long flags;
1895         int idx;
1896
1897         if (!x86_pmu.num_events)
1898                 return;
1899
1900         local_irq_save(flags);
1901
1902         printk("clearing PMU state on CPU#%d\n", smp_processor_id());
1903
1904         for (idx = 0; idx < x86_pmu.num_events; idx++) {
1905                 checking_wrmsrl(x86_pmu.eventsel + idx, 0ull);
1906                 checking_wrmsrl(x86_pmu.perfctr  + idx, 0ull);
1907         }
1908         for (idx = 0; idx < x86_pmu.num_events_fixed; idx++) {
1909                 checking_wrmsrl(MSR_ARCH_PERFMON_FIXED_CTR0 + idx, 0ull);
1910         }
1911         if (ds)
1912                 ds->bts_index = ds->bts_buffer_base;
1913
1914         local_irq_restore(flags);
1915 }
1916
1917 static int p6_pmu_handle_irq(struct pt_regs *regs)
1918 {
1919         struct perf_sample_data data;
1920         struct cpu_hw_events *cpuc;
1921         struct perf_event *event;
1922         struct hw_perf_event *hwc;
1923         int idx, handled = 0;
1924         u64 val;
1925
1926         data.addr = 0;
1927         data.raw = NULL;
1928
1929         cpuc = &__get_cpu_var(cpu_hw_events);
1930
1931         for (idx = 0; idx < x86_pmu.num_events; idx++) {
1932                 if (!test_bit(idx, cpuc->active_mask))
1933                         continue;
1934
1935                 event = cpuc->events[idx];
1936                 hwc = &event->hw;
1937
1938                 val = x86_perf_event_update(event, hwc, idx);
1939                 if (val & (1ULL << (x86_pmu.event_bits - 1)))
1940                         continue;
1941
1942                 /*
1943                  * event overflow
1944                  */
1945                 handled         = 1;
1946                 data.period     = event->hw.last_period;
1947
1948                 if (!x86_perf_event_set_period(event, hwc, idx))
1949                         continue;
1950
1951                 if (perf_event_overflow(event, 1, &data, regs))
1952                         p6_pmu_disable_event(hwc, idx);
1953         }
1954
1955         if (handled)
1956                 inc_irq_stat(apic_perf_irqs);
1957
1958         return handled;
1959 }
1960
1961 /*
1962  * This handler is triggered by the local APIC, so the APIC IRQ handling
1963  * rules apply:
1964  */
1965 static int intel_pmu_handle_irq(struct pt_regs *regs)
1966 {
1967         struct perf_sample_data data;
1968         struct cpu_hw_events *cpuc;
1969         int bit, loops;
1970         u64 ack, status;
1971
1972         data.addr = 0;
1973         data.raw = NULL;
1974
1975         cpuc = &__get_cpu_var(cpu_hw_events);
1976
1977         perf_disable();
1978         intel_pmu_drain_bts_buffer(cpuc);
1979         status = intel_pmu_get_status();
1980         if (!status) {
1981                 perf_enable();
1982                 return 0;
1983         }
1984
1985         loops = 0;
1986 again:
1987         if (++loops > 100) {
1988                 WARN_ONCE(1, "perfevents: irq loop stuck!\n");
1989                 perf_event_print_debug();
1990                 intel_pmu_reset();
1991                 perf_enable();
1992                 return 1;
1993         }
1994
1995         inc_irq_stat(apic_perf_irqs);
1996         ack = status;
1997         for_each_bit(bit, (unsigned long *)&status, X86_PMC_IDX_MAX) {
1998                 struct perf_event *event = cpuc->events[bit];
1999
2000                 clear_bit(bit, (unsigned long *) &status);
2001                 if (!test_bit(bit, cpuc->active_mask))
2002                         continue;
2003
2004                 if (!intel_pmu_save_and_restart(event))
2005                         continue;
2006
2007                 data.period = event->hw.last_period;
2008
2009                 if (perf_event_overflow(event, 1, &data, regs))
2010                         intel_pmu_disable_event(&event->hw, bit);
2011         }
2012
2013         intel_pmu_ack_status(ack);
2014
2015         /*
2016          * Repeat if there is more work to be done:
2017          */
2018         status = intel_pmu_get_status();
2019         if (status)
2020                 goto again;
2021
2022         perf_enable();
2023
2024         return 1;
2025 }
2026
2027 static int amd_pmu_handle_irq(struct pt_regs *regs)
2028 {
2029         struct perf_sample_data data;
2030         struct cpu_hw_events *cpuc;
2031         struct perf_event *event;
2032         struct hw_perf_event *hwc;
2033         int idx, handled = 0;
2034         u64 val;
2035
2036         data.addr = 0;
2037         data.raw = NULL;
2038
2039         cpuc = &__get_cpu_var(cpu_hw_events);
2040
2041         for (idx = 0; idx < x86_pmu.num_events; idx++) {
2042                 if (!test_bit(idx, cpuc->active_mask))
2043                         continue;
2044
2045                 event = cpuc->events[idx];
2046                 hwc = &event->hw;
2047
2048                 val = x86_perf_event_update(event, hwc, idx);
2049                 if (val & (1ULL << (x86_pmu.event_bits - 1)))
2050                         continue;
2051
2052                 /*
2053                  * event overflow
2054                  */
2055                 handled         = 1;
2056                 data.period     = event->hw.last_period;
2057
2058                 if (!x86_perf_event_set_period(event, hwc, idx))
2059                         continue;
2060
2061                 if (perf_event_overflow(event, 1, &data, regs))
2062                         amd_pmu_disable_event(hwc, idx);
2063         }
2064
2065         if (handled)
2066                 inc_irq_stat(apic_perf_irqs);
2067
2068         return handled;
2069 }
2070
2071 void smp_perf_pending_interrupt(struct pt_regs *regs)
2072 {
2073         irq_enter();
2074         ack_APIC_irq();
2075         inc_irq_stat(apic_pending_irqs);
2076         perf_event_do_pending();
2077         irq_exit();
2078 }
2079
2080 void set_perf_event_pending(void)
2081 {
2082 #ifdef CONFIG_X86_LOCAL_APIC
2083         if (!x86_pmu.apic || !x86_pmu_initialized())
2084                 return;
2085
2086         apic->send_IPI_self(LOCAL_PENDING_VECTOR);
2087 #endif
2088 }
2089
2090 void perf_events_lapic_init(void)
2091 {
2092 #ifdef CONFIG_X86_LOCAL_APIC
2093         if (!x86_pmu.apic || !x86_pmu_initialized())
2094                 return;
2095
2096         /*
2097          * Always use NMI for PMU
2098          */
2099         apic_write(APIC_LVTPC, APIC_DM_NMI);
2100 #endif
2101 }
2102
2103 static int __kprobes
2104 perf_event_nmi_handler(struct notifier_block *self,
2105                          unsigned long cmd, void *__args)
2106 {
2107         struct die_args *args = __args;
2108         struct pt_regs *regs;
2109
2110         if (!atomic_read(&active_events))
2111                 return NOTIFY_DONE;
2112
2113         switch (cmd) {
2114         case DIE_NMI:
2115         case DIE_NMI_IPI:
2116                 break;
2117
2118         default:
2119                 return NOTIFY_DONE;
2120         }
2121
2122         regs = args->regs;
2123
2124 #ifdef CONFIG_X86_LOCAL_APIC
2125         apic_write(APIC_LVTPC, APIC_DM_NMI);
2126 #endif
2127         /*
2128          * Can't rely on the handled return value to say it was our NMI, two
2129          * events could trigger 'simultaneously' raising two back-to-back NMIs.
2130          *
2131          * If the first NMI handles both, the latter will be empty and daze
2132          * the CPU.
2133          */
2134         x86_pmu.handle_irq(regs);
2135
2136         return NOTIFY_STOP;
2137 }
2138
2139 static struct event_constraint unconstrained;
2140
2141 static struct event_constraint bts_constraint =
2142         EVENT_CONSTRAINT(0, 1ULL << X86_PMC_IDX_FIXED_BTS, 0);
2143
2144 static struct event_constraint *
2145 intel_special_constraints(struct perf_event *event)
2146 {
2147         unsigned int hw_event;
2148
2149         hw_event = event->hw.config & INTEL_ARCH_EVENT_MASK;
2150
2151         if (unlikely((hw_event ==
2152                       x86_pmu.event_map(PERF_COUNT_HW_BRANCH_INSTRUCTIONS)) &&
2153                      (event->hw.sample_period == 1))) {
2154
2155                 return &bts_constraint;
2156         }
2157         return NULL;
2158 }
2159
2160 static struct event_constraint *
2161 intel_get_event_constraints(struct cpu_hw_events *cpuc, struct perf_event *event)
2162 {
2163         struct event_constraint *c;
2164
2165         c = intel_special_constraints(event);
2166         if (c)
2167                 return c;
2168
2169         if (x86_pmu.event_constraints) {
2170                 for_each_event_constraint(c, x86_pmu.event_constraints) {
2171                         if ((event->hw.config & c->cmask) == c->code)
2172                                 return c;
2173                 }
2174         }
2175
2176         return &unconstrained;
2177 }
2178
2179 static struct event_constraint *
2180 amd_get_event_constraints(struct cpu_hw_events *cpuc, struct perf_event *event)
2181 {
2182         return &unconstrained;
2183 }
2184
2185 static int x86_event_sched_in(struct perf_event *event,
2186                           struct perf_cpu_context *cpuctx, int cpu)
2187 {
2188         int ret = 0;
2189
2190         event->state = PERF_EVENT_STATE_ACTIVE;
2191         event->oncpu = cpu;
2192         event->tstamp_running += event->ctx->time - event->tstamp_stopped;
2193
2194         if (!is_x86_event(event))
2195                 ret = event->pmu->enable(event);
2196
2197         if (!ret && !is_software_event(event))
2198                 cpuctx->active_oncpu++;
2199
2200         if (!ret && event->attr.exclusive)
2201                 cpuctx->exclusive = 1;
2202
2203         return ret;
2204 }
2205
2206 static void x86_event_sched_out(struct perf_event *event,
2207                             struct perf_cpu_context *cpuctx, int cpu)
2208 {
2209         event->state = PERF_EVENT_STATE_INACTIVE;
2210         event->oncpu = -1;
2211
2212         if (!is_x86_event(event))
2213                 event->pmu->disable(event);
2214
2215         event->tstamp_running -= event->ctx->time - event->tstamp_stopped;
2216
2217         if (!is_software_event(event))
2218                 cpuctx->active_oncpu--;
2219
2220         if (event->attr.exclusive || !cpuctx->active_oncpu)
2221                 cpuctx->exclusive = 0;
2222 }
2223
2224 /*
2225  * Called to enable a whole group of events.
2226  * Returns 1 if the group was enabled, or -EAGAIN if it could not be.
2227  * Assumes the caller has disabled interrupts and has
2228  * frozen the PMU with hw_perf_save_disable.
2229  *
2230  * called with PMU disabled. If successful and return value 1,
2231  * then guaranteed to call perf_enable() and hw_perf_enable()
2232  */
2233 int hw_perf_group_sched_in(struct perf_event *leader,
2234                struct perf_cpu_context *cpuctx,
2235                struct perf_event_context *ctx, int cpu)
2236 {
2237         struct cpu_hw_events *cpuc = &per_cpu(cpu_hw_events, cpu);
2238         struct perf_event *sub;
2239         int assign[X86_PMC_IDX_MAX];
2240         int n0, n1, ret;
2241
2242         /* n0 = total number of events */
2243         n0 = collect_events(cpuc, leader, true);
2244         if (n0 < 0)
2245                 return n0;
2246
2247         ret = x86_schedule_events(cpuc, n0, assign);
2248         if (ret)
2249                 return ret;
2250
2251         ret = x86_event_sched_in(leader, cpuctx, cpu);
2252         if (ret)
2253                 return ret;
2254
2255         n1 = 1;
2256         list_for_each_entry(sub, &leader->sibling_list, group_entry) {
2257                 if (sub->state > PERF_EVENT_STATE_OFF) {
2258                         ret = x86_event_sched_in(sub, cpuctx, cpu);
2259                         if (ret)
2260                                 goto undo;
2261                         ++n1;
2262                 }
2263         }
2264         /*
2265          * copy new assignment, now we know it is possible
2266          * will be used by hw_perf_enable()
2267          */
2268         memcpy(cpuc->assign, assign, n0*sizeof(int));
2269
2270         cpuc->n_events  = n0;
2271         cpuc->n_added   = n1;
2272         ctx->nr_active += n1;
2273
2274         /*
2275          * 1 means successful and events are active
2276          * This is not quite true because we defer
2277          * actual activation until hw_perf_enable() but
2278          * this way we* ensure caller won't try to enable
2279          * individual events
2280          */
2281         return 1;
2282 undo:
2283         x86_event_sched_out(leader, cpuctx, cpu);
2284         n0  = 1;
2285         list_for_each_entry(sub, &leader->sibling_list, group_entry) {
2286                 if (sub->state == PERF_EVENT_STATE_ACTIVE) {
2287                         x86_event_sched_out(sub, cpuctx, cpu);
2288                         if (++n0 == n1)
2289                                 break;
2290                 }
2291         }
2292         return ret;
2293 }
2294
2295 static __read_mostly struct notifier_block perf_event_nmi_notifier = {
2296         .notifier_call          = perf_event_nmi_handler,
2297         .next                   = NULL,
2298         .priority               = 1
2299 };
2300
2301 static __initconst struct x86_pmu p6_pmu = {
2302         .name                   = "p6",
2303         .handle_irq             = p6_pmu_handle_irq,
2304         .disable_all            = p6_pmu_disable_all,
2305         .enable_all             = p6_pmu_enable_all,
2306         .enable                 = p6_pmu_enable_event,
2307         .disable                = p6_pmu_disable_event,
2308         .eventsel               = MSR_P6_EVNTSEL0,
2309         .perfctr                = MSR_P6_PERFCTR0,
2310         .event_map              = p6_pmu_event_map,
2311         .raw_event              = p6_pmu_raw_event,
2312         .max_events             = ARRAY_SIZE(p6_perfmon_event_map),
2313         .apic                   = 1,
2314         .max_period             = (1ULL << 31) - 1,
2315         .version                = 0,
2316         .num_events             = 2,
2317         /*
2318          * Events have 40 bits implemented. However they are designed such
2319          * that bits [32-39] are sign extensions of bit 31. As such the
2320          * effective width of a event for P6-like PMU is 32 bits only.
2321          *
2322          * See IA-32 Intel Architecture Software developer manual Vol 3B
2323          */
2324         .event_bits             = 32,
2325         .event_mask             = (1ULL << 32) - 1,
2326         .get_event_constraints  = intel_get_event_constraints,
2327         .event_constraints      = intel_p6_event_constraints
2328 };
2329
2330 static __initconst struct x86_pmu intel_pmu = {
2331         .name                   = "Intel",
2332         .handle_irq             = intel_pmu_handle_irq,
2333         .disable_all            = intel_pmu_disable_all,
2334         .enable_all             = intel_pmu_enable_all,
2335         .enable                 = intel_pmu_enable_event,
2336         .disable                = intel_pmu_disable_event,
2337         .eventsel               = MSR_ARCH_PERFMON_EVENTSEL0,
2338         .perfctr                = MSR_ARCH_PERFMON_PERFCTR0,
2339         .event_map              = intel_pmu_event_map,
2340         .raw_event              = intel_pmu_raw_event,
2341         .max_events             = ARRAY_SIZE(intel_perfmon_event_map),
2342         .apic                   = 1,
2343         /*
2344          * Intel PMCs cannot be accessed sanely above 32 bit width,
2345          * so we install an artificial 1<<31 period regardless of
2346          * the generic event period:
2347          */
2348         .max_period             = (1ULL << 31) - 1,
2349         .enable_bts             = intel_pmu_enable_bts,
2350         .disable_bts            = intel_pmu_disable_bts,
2351         .get_event_constraints  = intel_get_event_constraints
2352 };
2353
2354 static __initconst struct x86_pmu amd_pmu = {
2355         .name                   = "AMD",
2356         .handle_irq             = amd_pmu_handle_irq,
2357         .disable_all            = amd_pmu_disable_all,
2358         .enable_all             = amd_pmu_enable_all,
2359         .enable                 = amd_pmu_enable_event,
2360         .disable                = amd_pmu_disable_event,
2361         .eventsel               = MSR_K7_EVNTSEL0,
2362         .perfctr                = MSR_K7_PERFCTR0,
2363         .event_map              = amd_pmu_event_map,
2364         .raw_event              = amd_pmu_raw_event,
2365         .max_events             = ARRAY_SIZE(amd_perfmon_event_map),
2366         .num_events             = 4,
2367         .event_bits             = 48,
2368         .event_mask             = (1ULL << 48) - 1,
2369         .apic                   = 1,
2370         /* use highest bit to detect overflow */
2371         .max_period             = (1ULL << 47) - 1,
2372         .get_event_constraints  = amd_get_event_constraints
2373 };
2374
2375 static __init int p6_pmu_init(void)
2376 {
2377         switch (boot_cpu_data.x86_model) {
2378         case 1:
2379         case 3:  /* Pentium Pro */
2380         case 5:
2381         case 6:  /* Pentium II */
2382         case 7:
2383         case 8:
2384         case 11: /* Pentium III */
2385         case 9:
2386         case 13:
2387                 /* Pentium M */
2388                 break;
2389         default:
2390                 pr_cont("unsupported p6 CPU model %d ",
2391                         boot_cpu_data.x86_model);
2392                 return -ENODEV;
2393         }
2394
2395         x86_pmu = p6_pmu;
2396
2397         return 0;
2398 }
2399
2400 static __init int intel_pmu_init(void)
2401 {
2402         union cpuid10_edx edx;
2403         union cpuid10_eax eax;
2404         unsigned int unused;
2405         unsigned int ebx;
2406         int version;
2407
2408         if (!cpu_has(&boot_cpu_data, X86_FEATURE_ARCH_PERFMON)) {
2409                 /* check for P6 processor family */
2410            if (boot_cpu_data.x86 == 6) {
2411                 return p6_pmu_init();
2412            } else {
2413                 return -ENODEV;
2414            }
2415         }
2416
2417         /*
2418          * Check whether the Architectural PerfMon supports
2419          * Branch Misses Retired hw_event or not.
2420          */
2421         cpuid(10, &eax.full, &ebx, &unused, &edx.full);
2422         if (eax.split.mask_length <= ARCH_PERFMON_BRANCH_MISSES_RETIRED)
2423                 return -ENODEV;
2424
2425         version = eax.split.version_id;
2426         if (version < 2)
2427                 return -ENODEV;
2428
2429         x86_pmu                         = intel_pmu;
2430         x86_pmu.version                 = version;
2431         x86_pmu.num_events              = eax.split.num_events;
2432         x86_pmu.event_bits              = eax.split.bit_width;
2433         x86_pmu.event_mask              = (1ULL << eax.split.bit_width) - 1;
2434
2435         /*
2436          * Quirk: v2 perfmon does not report fixed-purpose events, so
2437          * assume at least 3 events:
2438          */
2439         x86_pmu.num_events_fixed        = max((int)edx.split.num_events_fixed, 3);
2440
2441         /*
2442          * Install the hw-cache-events table:
2443          */
2444         switch (boot_cpu_data.x86_model) {
2445         case 15: /* original 65 nm celeron/pentium/core2/xeon, "Merom"/"Conroe" */
2446         case 22: /* single-core 65 nm celeron/core2solo "Merom-L"/"Conroe-L" */
2447         case 23: /* current 45 nm celeron/core2/xeon "Penryn"/"Wolfdale" */
2448         case 29: /* six-core 45 nm xeon "Dunnington" */
2449                 memcpy(hw_cache_event_ids, core2_hw_cache_event_ids,
2450                        sizeof(hw_cache_event_ids));
2451
2452                 x86_pmu.event_constraints = intel_core_event_constraints;
2453                 pr_cont("Core2 events, ");
2454                 break;
2455         case 26:
2456                 memcpy(hw_cache_event_ids, nehalem_hw_cache_event_ids,
2457                        sizeof(hw_cache_event_ids));
2458
2459                 x86_pmu.event_constraints = intel_nehalem_event_constraints;
2460                 pr_cont("Nehalem/Corei7 events, ");
2461                 break;
2462         case 28:
2463                 memcpy(hw_cache_event_ids, atom_hw_cache_event_ids,
2464                        sizeof(hw_cache_event_ids));
2465
2466                 x86_pmu.event_constraints = intel_gen_event_constraints;
2467                 pr_cont("Atom events, ");
2468                 break;
2469         default:
2470                 /*
2471                  * default constraints for v2 and up
2472                  */
2473                 x86_pmu.event_constraints = intel_gen_event_constraints;
2474                 pr_cont("generic architected perfmon, ");
2475         }
2476         return 0;
2477 }
2478
2479 static __init int amd_pmu_init(void)
2480 {
2481         /* Performance-monitoring supported from K7 and later: */
2482         if (boot_cpu_data.x86 < 6)
2483                 return -ENODEV;
2484
2485         x86_pmu = amd_pmu;
2486
2487         /* Events are common for all AMDs */
2488         memcpy(hw_cache_event_ids, amd_hw_cache_event_ids,
2489                sizeof(hw_cache_event_ids));
2490
2491         return 0;
2492 }
2493
2494 static void __init pmu_check_apic(void)
2495 {
2496         if (cpu_has_apic)
2497                 return;
2498
2499         x86_pmu.apic = 0;
2500         pr_info("no APIC, boot with the \"lapic\" boot parameter to force-enable it.\n");
2501         pr_info("no hardware sampling interrupt available.\n");
2502 }
2503
2504 void __init init_hw_perf_events(void)
2505 {
2506         int err;
2507
2508         pr_info("Performance Events: ");
2509
2510         switch (boot_cpu_data.x86_vendor) {
2511         case X86_VENDOR_INTEL:
2512                 err = intel_pmu_init();
2513                 break;
2514         case X86_VENDOR_AMD:
2515                 err = amd_pmu_init();
2516                 break;
2517         default:
2518                 return;
2519         }
2520         if (err != 0) {
2521                 pr_cont("no PMU driver, software events only.\n");
2522                 return;
2523         }
2524
2525         pmu_check_apic();
2526
2527         pr_cont("%s PMU driver.\n", x86_pmu.name);
2528
2529         if (x86_pmu.num_events > X86_PMC_MAX_GENERIC) {
2530                 WARN(1, KERN_ERR "hw perf events %d > max(%d), clipping!",
2531                      x86_pmu.num_events, X86_PMC_MAX_GENERIC);
2532                 x86_pmu.num_events = X86_PMC_MAX_GENERIC;
2533         }
2534         perf_event_mask = (1 << x86_pmu.num_events) - 1;
2535         perf_max_events = x86_pmu.num_events;
2536
2537         if (x86_pmu.num_events_fixed > X86_PMC_MAX_FIXED) {
2538                 WARN(1, KERN_ERR "hw perf events fixed %d > max(%d), clipping!",
2539                      x86_pmu.num_events_fixed, X86_PMC_MAX_FIXED);
2540                 x86_pmu.num_events_fixed = X86_PMC_MAX_FIXED;
2541         }
2542
2543         perf_event_mask |=
2544                 ((1LL << x86_pmu.num_events_fixed)-1) << X86_PMC_IDX_FIXED;
2545         x86_pmu.intel_ctrl = perf_event_mask;
2546
2547         perf_events_lapic_init();
2548         register_die_notifier(&perf_event_nmi_notifier);
2549
2550         unconstrained = (struct event_constraint)
2551                 EVENT_CONSTRAINT(0, (1ULL << x86_pmu.num_events) - 1, 0);
2552
2553         pr_info("... version:                %d\n",     x86_pmu.version);
2554         pr_info("... bit width:              %d\n",     x86_pmu.event_bits);
2555         pr_info("... generic registers:      %d\n",     x86_pmu.num_events);
2556         pr_info("... value mask:             %016Lx\n", x86_pmu.event_mask);
2557         pr_info("... max period:             %016Lx\n", x86_pmu.max_period);
2558         pr_info("... fixed-purpose events:   %d\n",     x86_pmu.num_events_fixed);
2559         pr_info("... event mask:             %016Lx\n", perf_event_mask);
2560 }
2561
2562 static inline void x86_pmu_read(struct perf_event *event)
2563 {
2564         x86_perf_event_update(event, &event->hw, event->hw.idx);
2565 }
2566
2567 static const struct pmu pmu = {
2568         .enable         = x86_pmu_enable,
2569         .disable        = x86_pmu_disable,
2570         .read           = x86_pmu_read,
2571         .unthrottle     = x86_pmu_unthrottle,
2572 };
2573
2574 /*
2575  * validate a single event group
2576  *
2577  * validation include:
2578  *      - check events are compatible which each other
2579  *      - events do not compete for the same counter
2580  *      - number of events <= number of counters
2581  *
2582  * validation ensures the group can be loaded onto the
2583  * PMU if it was the only group available.
2584  */
2585 static int validate_group(struct perf_event *event)
2586 {
2587         struct perf_event *leader = event->group_leader;
2588         struct cpu_hw_events *fake_cpuc;
2589         int ret, n;
2590
2591         ret = -ENOMEM;
2592         fake_cpuc = kmalloc(sizeof(*fake_cpuc), GFP_KERNEL | __GFP_ZERO);
2593         if (!fake_cpuc)
2594                 goto out;
2595
2596         /*
2597          * the event is not yet connected with its
2598          * siblings therefore we must first collect
2599          * existing siblings, then add the new event
2600          * before we can simulate the scheduling
2601          */
2602         ret = -ENOSPC;
2603         n = collect_events(fake_cpuc, leader, true);
2604         if (n < 0)
2605                 goto out_free;
2606
2607         fake_cpuc->n_events = n;
2608         n = collect_events(fake_cpuc, event, false);
2609         if (n < 0)
2610                 goto out_free;
2611
2612         fake_cpuc->n_events = n;
2613
2614         ret = x86_schedule_events(fake_cpuc, n, NULL);
2615
2616 out_free:
2617         kfree(fake_cpuc);
2618 out:
2619         return ret;
2620 }
2621
2622 const struct pmu *hw_perf_event_init(struct perf_event *event)
2623 {
2624         const struct pmu *tmp;
2625         int err;
2626
2627         err = __hw_perf_event_init(event);
2628         if (!err) {
2629                 /*
2630                  * we temporarily connect event to its pmu
2631                  * such that validate_group() can classify
2632                  * it as an x86 event using is_x86_event()
2633                  */
2634                 tmp = event->pmu;
2635                 event->pmu = &pmu;
2636
2637                 if (event->group_leader != event)
2638                         err = validate_group(event);
2639
2640                 event->pmu = tmp;
2641         }
2642         if (err) {
2643                 if (event->destroy)
2644                         event->destroy(event);
2645                 return ERR_PTR(err);
2646         }
2647
2648         return &pmu;
2649 }
2650
2651 /*
2652  * callchain support
2653  */
2654
2655 static inline
2656 void callchain_store(struct perf_callchain_entry *entry, u64 ip)
2657 {
2658         if (entry->nr < PERF_MAX_STACK_DEPTH)
2659                 entry->ip[entry->nr++] = ip;
2660 }
2661
2662 static DEFINE_PER_CPU(struct perf_callchain_entry, pmc_irq_entry);
2663 static DEFINE_PER_CPU(struct perf_callchain_entry, pmc_nmi_entry);
2664
2665
2666 static void
2667 backtrace_warning_symbol(void *data, char *msg, unsigned long symbol)
2668 {
2669         /* Ignore warnings */
2670 }
2671
2672 static void backtrace_warning(void *data, char *msg)
2673 {
2674         /* Ignore warnings */
2675 }
2676
2677 static int backtrace_stack(void *data, char *name)
2678 {
2679         return 0;
2680 }
2681
2682 static void backtrace_address(void *data, unsigned long addr, int reliable)
2683 {
2684         struct perf_callchain_entry *entry = data;
2685
2686         if (reliable)
2687                 callchain_store(entry, addr);
2688 }
2689
2690 static const struct stacktrace_ops backtrace_ops = {
2691         .warning                = backtrace_warning,
2692         .warning_symbol         = backtrace_warning_symbol,
2693         .stack                  = backtrace_stack,
2694         .address                = backtrace_address,
2695         .walk_stack             = print_context_stack_bp,
2696 };
2697
2698 #include "../dumpstack.h"
2699
2700 static void
2701 perf_callchain_kernel(struct pt_regs *regs, struct perf_callchain_entry *entry)
2702 {
2703         callchain_store(entry, PERF_CONTEXT_KERNEL);
2704         callchain_store(entry, regs->ip);
2705
2706         dump_trace(NULL, regs, NULL, regs->bp, &backtrace_ops, entry);
2707 }
2708
2709 /*
2710  * best effort, GUP based copy_from_user() that assumes IRQ or NMI context
2711  */
2712 static unsigned long
2713 copy_from_user_nmi(void *to, const void __user *from, unsigned long n)
2714 {
2715         unsigned long offset, addr = (unsigned long)from;
2716         int type = in_nmi() ? KM_NMI : KM_IRQ0;
2717         unsigned long size, len = 0;
2718         struct page *page;
2719         void *map;
2720         int ret;
2721
2722         do {
2723                 ret = __get_user_pages_fast(addr, 1, 0, &page);
2724                 if (!ret)
2725                         break;
2726
2727                 offset = addr & (PAGE_SIZE - 1);
2728                 size = min(PAGE_SIZE - offset, n - len);
2729
2730                 map = kmap_atomic(page, type);
2731                 memcpy(to, map+offset, size);
2732                 kunmap_atomic(map, type);
2733                 put_page(page);
2734
2735                 len  += size;
2736                 to   += size;
2737                 addr += size;
2738
2739         } while (len < n);
2740
2741         return len;
2742 }
2743
2744 static int copy_stack_frame(const void __user *fp, struct stack_frame *frame)
2745 {
2746         unsigned long bytes;
2747
2748         bytes = copy_from_user_nmi(frame, fp, sizeof(*frame));
2749
2750         return bytes == sizeof(*frame);
2751 }
2752
2753 static void
2754 perf_callchain_user(struct pt_regs *regs, struct perf_callchain_entry *entry)
2755 {
2756         struct stack_frame frame;
2757         const void __user *fp;
2758
2759         if (!user_mode(regs))
2760                 regs = task_pt_regs(current);
2761
2762         fp = (void __user *)regs->bp;
2763
2764         callchain_store(entry, PERF_CONTEXT_USER);
2765         callchain_store(entry, regs->ip);
2766
2767         while (entry->nr < PERF_MAX_STACK_DEPTH) {
2768                 frame.next_frame             = NULL;
2769                 frame.return_address = 0;
2770
2771                 if (!copy_stack_frame(fp, &frame))
2772                         break;
2773
2774                 if ((unsigned long)fp < regs->sp)
2775                         break;
2776
2777                 callchain_store(entry, frame.return_address);
2778                 fp = frame.next_frame;
2779         }
2780 }
2781
2782 static void
2783 perf_do_callchain(struct pt_regs *regs, struct perf_callchain_entry *entry)
2784 {
2785         int is_user;
2786
2787         if (!regs)
2788                 return;
2789
2790         is_user = user_mode(regs);
2791
2792         if (is_user && current->state != TASK_RUNNING)
2793                 return;
2794
2795         if (!is_user)
2796                 perf_callchain_kernel(regs, entry);
2797
2798         if (current->mm)
2799                 perf_callchain_user(regs, entry);
2800 }
2801
2802 struct perf_callchain_entry *perf_callchain(struct pt_regs *regs)
2803 {
2804         struct perf_callchain_entry *entry;
2805
2806         if (in_nmi())
2807                 entry = &__get_cpu_var(pmc_nmi_entry);
2808         else
2809                 entry = &__get_cpu_var(pmc_irq_entry);
2810
2811         entry->nr = 0;
2812
2813         perf_do_callchain(regs, entry);
2814
2815         return entry;
2816 }
2817
2818 void hw_perf_event_setup_online(int cpu)
2819 {
2820         init_debug_store_on_cpu(cpu);
2821 }