perf_events, x86: Fix event constraint masks
[safe/jmp/linux-2.6] / arch / x86 / kernel / cpu / perf_event.c
1 /*
2  * Performance events x86 architecture code
3  *
4  *  Copyright (C) 2008 Thomas Gleixner <tglx@linutronix.de>
5  *  Copyright (C) 2008-2009 Red Hat, Inc., Ingo Molnar
6  *  Copyright (C) 2009 Jaswinder Singh Rajput
7  *  Copyright (C) 2009 Advanced Micro Devices, Inc., Robert Richter
8  *  Copyright (C) 2008-2009 Red Hat, Inc., Peter Zijlstra <pzijlstr@redhat.com>
9  *  Copyright (C) 2009 Intel Corporation, <markus.t.metzger@intel.com>
10  *  Copyright (C) 2009 Google, Inc., Stephane Eranian
11  *
12  *  For licencing details see kernel-base/COPYING
13  */
14
15 #include <linux/perf_event.h>
16 #include <linux/capability.h>
17 #include <linux/notifier.h>
18 #include <linux/hardirq.h>
19 #include <linux/kprobes.h>
20 #include <linux/module.h>
21 #include <linux/kdebug.h>
22 #include <linux/sched.h>
23 #include <linux/uaccess.h>
24 #include <linux/highmem.h>
25 #include <linux/cpu.h>
26 #include <linux/bitops.h>
27
28 #include <asm/apic.h>
29 #include <asm/stacktrace.h>
30 #include <asm/nmi.h>
31
32 static u64 perf_event_mask __read_mostly;
33
34 /* The maximal number of PEBS events: */
35 #define MAX_PEBS_EVENTS 4
36
37 /* The size of a BTS record in bytes: */
38 #define BTS_RECORD_SIZE         24
39
40 /* The size of a per-cpu BTS buffer in bytes: */
41 #define BTS_BUFFER_SIZE         (BTS_RECORD_SIZE * 2048)
42
43 /* The BTS overflow threshold in bytes from the end of the buffer: */
44 #define BTS_OVFL_TH             (BTS_RECORD_SIZE * 128)
45
46
47 /*
48  * Bits in the debugctlmsr controlling branch tracing.
49  */
50 #define X86_DEBUGCTL_TR                 (1 << 6)
51 #define X86_DEBUGCTL_BTS                (1 << 7)
52 #define X86_DEBUGCTL_BTINT              (1 << 8)
53 #define X86_DEBUGCTL_BTS_OFF_OS         (1 << 9)
54 #define X86_DEBUGCTL_BTS_OFF_USR        (1 << 10)
55
56 /*
57  * A debug store configuration.
58  *
59  * We only support architectures that use 64bit fields.
60  */
61 struct debug_store {
62         u64     bts_buffer_base;
63         u64     bts_index;
64         u64     bts_absolute_maximum;
65         u64     bts_interrupt_threshold;
66         u64     pebs_buffer_base;
67         u64     pebs_index;
68         u64     pebs_absolute_maximum;
69         u64     pebs_interrupt_threshold;
70         u64     pebs_event_reset[MAX_PEBS_EVENTS];
71 };
72
73 struct event_constraint {
74         union {
75                 unsigned long   idxmsk[BITS_TO_LONGS(X86_PMC_IDX_MAX)];
76                 u64             idxmsk64[1];
77         };
78         int     code;
79         int     cmask;
80         int     weight;
81 };
82
83 struct cpu_hw_events {
84         struct perf_event       *events[X86_PMC_IDX_MAX]; /* in counter order */
85         unsigned long           active_mask[BITS_TO_LONGS(X86_PMC_IDX_MAX)];
86         unsigned long           interrupts;
87         int                     enabled;
88         struct debug_store      *ds;
89
90         int                     n_events;
91         int                     n_added;
92         int                     assign[X86_PMC_IDX_MAX]; /* event to counter assignment */
93         struct perf_event       *event_list[X86_PMC_IDX_MAX]; /* in enabled order */
94 };
95
96 #define EVENT_CONSTRAINT(c, n, m) {     \
97         { .idxmsk64[0] = (n) },         \
98         .code = (c),                    \
99         .cmask = (m),                   \
100         .weight = HWEIGHT64((u64)(n)),  \
101 }
102
103 #define INTEL_EVENT_CONSTRAINT(c, n)    \
104         EVENT_CONSTRAINT(c, n, INTEL_ARCH_EVTSEL_MASK)
105
106 #define FIXED_EVENT_CONSTRAINT(c, n)    \
107         EVENT_CONSTRAINT(c, n, INTEL_ARCH_FIXED_MASK)
108
109 #define EVENT_CONSTRAINT_END            \
110         EVENT_CONSTRAINT(0, 0, 0)
111
112 #define for_each_event_constraint(e, c) \
113         for ((e) = (c); (e)->cmask; (e)++)
114
115 /*
116  * struct x86_pmu - generic x86 pmu
117  */
118 struct x86_pmu {
119         const char      *name;
120         int             version;
121         int             (*handle_irq)(struct pt_regs *);
122         void            (*disable_all)(void);
123         void            (*enable_all)(void);
124         void            (*enable)(struct hw_perf_event *, int);
125         void            (*disable)(struct hw_perf_event *, int);
126         unsigned        eventsel;
127         unsigned        perfctr;
128         u64             (*event_map)(int);
129         u64             (*raw_event)(u64);
130         int             max_events;
131         int             num_events;
132         int             num_events_fixed;
133         int             event_bits;
134         u64             event_mask;
135         int             apic;
136         u64             max_period;
137         u64             intel_ctrl;
138         void            (*enable_bts)(u64 config);
139         void            (*disable_bts)(void);
140
141         struct event_constraint *
142                         (*get_event_constraints)(struct cpu_hw_events *cpuc,
143                                                  struct perf_event *event);
144
145         void            (*put_event_constraints)(struct cpu_hw_events *cpuc,
146                                                  struct perf_event *event);
147         struct event_constraint *event_constraints;
148 };
149
150 static struct x86_pmu x86_pmu __read_mostly;
151
152 static DEFINE_PER_CPU(struct cpu_hw_events, cpu_hw_events) = {
153         .enabled = 1,
154 };
155
156 static int x86_perf_event_set_period(struct perf_event *event,
157                              struct hw_perf_event *hwc, int idx);
158
159 /*
160  * Not sure about some of these
161  */
162 static const u64 p6_perfmon_event_map[] =
163 {
164   [PERF_COUNT_HW_CPU_CYCLES]            = 0x0079,
165   [PERF_COUNT_HW_INSTRUCTIONS]          = 0x00c0,
166   [PERF_COUNT_HW_CACHE_REFERENCES]      = 0x0f2e,
167   [PERF_COUNT_HW_CACHE_MISSES]          = 0x012e,
168   [PERF_COUNT_HW_BRANCH_INSTRUCTIONS]   = 0x00c4,
169   [PERF_COUNT_HW_BRANCH_MISSES]         = 0x00c5,
170   [PERF_COUNT_HW_BUS_CYCLES]            = 0x0062,
171 };
172
173 static u64 p6_pmu_event_map(int hw_event)
174 {
175         return p6_perfmon_event_map[hw_event];
176 }
177
178 /*
179  * Event setting that is specified not to count anything.
180  * We use this to effectively disable a counter.
181  *
182  * L2_RQSTS with 0 MESI unit mask.
183  */
184 #define P6_NOP_EVENT                    0x0000002EULL
185
186 static u64 p6_pmu_raw_event(u64 hw_event)
187 {
188 #define P6_EVNTSEL_EVENT_MASK           0x000000FFULL
189 #define P6_EVNTSEL_UNIT_MASK            0x0000FF00ULL
190 #define P6_EVNTSEL_EDGE_MASK            0x00040000ULL
191 #define P6_EVNTSEL_INV_MASK             0x00800000ULL
192 #define P6_EVNTSEL_REG_MASK             0xFF000000ULL
193
194 #define P6_EVNTSEL_MASK                 \
195         (P6_EVNTSEL_EVENT_MASK |        \
196          P6_EVNTSEL_UNIT_MASK  |        \
197          P6_EVNTSEL_EDGE_MASK  |        \
198          P6_EVNTSEL_INV_MASK   |        \
199          P6_EVNTSEL_REG_MASK)
200
201         return hw_event & P6_EVNTSEL_MASK;
202 }
203
204 static struct event_constraint intel_p6_event_constraints[] =
205 {
206         INTEL_EVENT_CONSTRAINT(0xc1, 0x1),      /* FLOPS */
207         INTEL_EVENT_CONSTRAINT(0x10, 0x1),      /* FP_COMP_OPS_EXE */
208         INTEL_EVENT_CONSTRAINT(0x11, 0x1),      /* FP_ASSIST */
209         INTEL_EVENT_CONSTRAINT(0x12, 0x2),      /* MUL */
210         INTEL_EVENT_CONSTRAINT(0x13, 0x2),      /* DIV */
211         INTEL_EVENT_CONSTRAINT(0x14, 0x1),      /* CYCLES_DIV_BUSY */
212         EVENT_CONSTRAINT_END
213 };
214
215 /*
216  * Intel PerfMon v3. Used on Core2 and later.
217  */
218 static const u64 intel_perfmon_event_map[] =
219 {
220   [PERF_COUNT_HW_CPU_CYCLES]            = 0x003c,
221   [PERF_COUNT_HW_INSTRUCTIONS]          = 0x00c0,
222   [PERF_COUNT_HW_CACHE_REFERENCES]      = 0x4f2e,
223   [PERF_COUNT_HW_CACHE_MISSES]          = 0x412e,
224   [PERF_COUNT_HW_BRANCH_INSTRUCTIONS]   = 0x00c4,
225   [PERF_COUNT_HW_BRANCH_MISSES]         = 0x00c5,
226   [PERF_COUNT_HW_BUS_CYCLES]            = 0x013c,
227 };
228
229 static struct event_constraint intel_core_event_constraints[] =
230 {
231         FIXED_EVENT_CONSTRAINT(0xc0, (0x3|(1ULL<<32))), /* INSTRUCTIONS_RETIRED */
232         FIXED_EVENT_CONSTRAINT(0x3c, (0x3|(1ULL<<33))), /* UNHALTED_CORE_CYCLES */
233         INTEL_EVENT_CONSTRAINT(0x10, 0x1), /* FP_COMP_OPS_EXE */
234         INTEL_EVENT_CONSTRAINT(0x11, 0x2), /* FP_ASSIST */
235         INTEL_EVENT_CONSTRAINT(0x12, 0x2), /* MUL */
236         INTEL_EVENT_CONSTRAINT(0x13, 0x2), /* DIV */
237         INTEL_EVENT_CONSTRAINT(0x14, 0x1), /* CYCLES_DIV_BUSY */
238         INTEL_EVENT_CONSTRAINT(0x18, 0x1), /* IDLE_DURING_DIV */
239         INTEL_EVENT_CONSTRAINT(0x19, 0x2), /* DELAYED_BYPASS */
240         INTEL_EVENT_CONSTRAINT(0xa1, 0x1), /* RS_UOPS_DISPATCH_CYCLES */
241         INTEL_EVENT_CONSTRAINT(0xcb, 0x1), /* MEM_LOAD_RETIRED */
242         EVENT_CONSTRAINT_END
243 };
244
245 static struct event_constraint intel_nehalem_event_constraints[] =
246 {
247         FIXED_EVENT_CONSTRAINT(0xc0, (0x3|(1ULL<<32))), /* INSTRUCTIONS_RETIRED */
248         FIXED_EVENT_CONSTRAINT(0x3c, (0x3|(1ULL<<33))), /* UNHALTED_CORE_CYCLES */
249         INTEL_EVENT_CONSTRAINT(0x40, 0x3), /* L1D_CACHE_LD */
250         INTEL_EVENT_CONSTRAINT(0x41, 0x3), /* L1D_CACHE_ST */
251         INTEL_EVENT_CONSTRAINT(0x42, 0x3), /* L1D_CACHE_LOCK */
252         INTEL_EVENT_CONSTRAINT(0x43, 0x3), /* L1D_ALL_REF */
253         INTEL_EVENT_CONSTRAINT(0x4e, 0x3), /* L1D_PREFETCH */
254         INTEL_EVENT_CONSTRAINT(0x4c, 0x3), /* LOAD_HIT_PRE */
255         INTEL_EVENT_CONSTRAINT(0x51, 0x3), /* L1D */
256         INTEL_EVENT_CONSTRAINT(0x52, 0x3), /* L1D_CACHE_PREFETCH_LOCK_FB_HIT */
257         INTEL_EVENT_CONSTRAINT(0x53, 0x3), /* L1D_CACHE_LOCK_FB_HIT */
258         INTEL_EVENT_CONSTRAINT(0xc5, 0x3), /* CACHE_LOCK_CYCLES */
259         EVENT_CONSTRAINT_END
260 };
261
262 static struct event_constraint intel_gen_event_constraints[] =
263 {
264         FIXED_EVENT_CONSTRAINT(0xc0, (0x3|(1ULL<<32))), /* INSTRUCTIONS_RETIRED */
265         FIXED_EVENT_CONSTRAINT(0x3c, (0x3|(1ULL<<33))), /* UNHALTED_CORE_CYCLES */
266         EVENT_CONSTRAINT_END
267 };
268
269 static u64 intel_pmu_event_map(int hw_event)
270 {
271         return intel_perfmon_event_map[hw_event];
272 }
273
274 /*
275  * Generalized hw caching related hw_event table, filled
276  * in on a per model basis. A value of 0 means
277  * 'not supported', -1 means 'hw_event makes no sense on
278  * this CPU', any other value means the raw hw_event
279  * ID.
280  */
281
282 #define C(x) PERF_COUNT_HW_CACHE_##x
283
284 static u64 __read_mostly hw_cache_event_ids
285                                 [PERF_COUNT_HW_CACHE_MAX]
286                                 [PERF_COUNT_HW_CACHE_OP_MAX]
287                                 [PERF_COUNT_HW_CACHE_RESULT_MAX];
288
289 static __initconst u64 nehalem_hw_cache_event_ids
290                                 [PERF_COUNT_HW_CACHE_MAX]
291                                 [PERF_COUNT_HW_CACHE_OP_MAX]
292                                 [PERF_COUNT_HW_CACHE_RESULT_MAX] =
293 {
294  [ C(L1D) ] = {
295         [ C(OP_READ) ] = {
296                 [ C(RESULT_ACCESS) ] = 0x0f40, /* L1D_CACHE_LD.MESI            */
297                 [ C(RESULT_MISS)   ] = 0x0140, /* L1D_CACHE_LD.I_STATE         */
298         },
299         [ C(OP_WRITE) ] = {
300                 [ C(RESULT_ACCESS) ] = 0x0f41, /* L1D_CACHE_ST.MESI            */
301                 [ C(RESULT_MISS)   ] = 0x0141, /* L1D_CACHE_ST.I_STATE         */
302         },
303         [ C(OP_PREFETCH) ] = {
304                 [ C(RESULT_ACCESS) ] = 0x014e, /* L1D_PREFETCH.REQUESTS        */
305                 [ C(RESULT_MISS)   ] = 0x024e, /* L1D_PREFETCH.MISS            */
306         },
307  },
308  [ C(L1I ) ] = {
309         [ C(OP_READ) ] = {
310                 [ C(RESULT_ACCESS) ] = 0x0380, /* L1I.READS                    */
311                 [ C(RESULT_MISS)   ] = 0x0280, /* L1I.MISSES                   */
312         },
313         [ C(OP_WRITE) ] = {
314                 [ C(RESULT_ACCESS) ] = -1,
315                 [ C(RESULT_MISS)   ] = -1,
316         },
317         [ C(OP_PREFETCH) ] = {
318                 [ C(RESULT_ACCESS) ] = 0x0,
319                 [ C(RESULT_MISS)   ] = 0x0,
320         },
321  },
322  [ C(LL  ) ] = {
323         [ C(OP_READ) ] = {
324                 [ C(RESULT_ACCESS) ] = 0x0324, /* L2_RQSTS.LOADS               */
325                 [ C(RESULT_MISS)   ] = 0x0224, /* L2_RQSTS.LD_MISS             */
326         },
327         [ C(OP_WRITE) ] = {
328                 [ C(RESULT_ACCESS) ] = 0x0c24, /* L2_RQSTS.RFOS                */
329                 [ C(RESULT_MISS)   ] = 0x0824, /* L2_RQSTS.RFO_MISS            */
330         },
331         [ C(OP_PREFETCH) ] = {
332                 [ C(RESULT_ACCESS) ] = 0x4f2e, /* LLC Reference                */
333                 [ C(RESULT_MISS)   ] = 0x412e, /* LLC Misses                   */
334         },
335  },
336  [ C(DTLB) ] = {
337         [ C(OP_READ) ] = {
338                 [ C(RESULT_ACCESS) ] = 0x0f40, /* L1D_CACHE_LD.MESI   (alias)  */
339                 [ C(RESULT_MISS)   ] = 0x0108, /* DTLB_LOAD_MISSES.ANY         */
340         },
341         [ C(OP_WRITE) ] = {
342                 [ C(RESULT_ACCESS) ] = 0x0f41, /* L1D_CACHE_ST.MESI   (alias)  */
343                 [ C(RESULT_MISS)   ] = 0x010c, /* MEM_STORE_RETIRED.DTLB_MISS  */
344         },
345         [ C(OP_PREFETCH) ] = {
346                 [ C(RESULT_ACCESS) ] = 0x0,
347                 [ C(RESULT_MISS)   ] = 0x0,
348         },
349  },
350  [ C(ITLB) ] = {
351         [ C(OP_READ) ] = {
352                 [ C(RESULT_ACCESS) ] = 0x01c0, /* INST_RETIRED.ANY_P           */
353                 [ C(RESULT_MISS)   ] = 0x20c8, /* ITLB_MISS_RETIRED            */
354         },
355         [ C(OP_WRITE) ] = {
356                 [ C(RESULT_ACCESS) ] = -1,
357                 [ C(RESULT_MISS)   ] = -1,
358         },
359         [ C(OP_PREFETCH) ] = {
360                 [ C(RESULT_ACCESS) ] = -1,
361                 [ C(RESULT_MISS)   ] = -1,
362         },
363  },
364  [ C(BPU ) ] = {
365         [ C(OP_READ) ] = {
366                 [ C(RESULT_ACCESS) ] = 0x00c4, /* BR_INST_RETIRED.ALL_BRANCHES */
367                 [ C(RESULT_MISS)   ] = 0x03e8, /* BPU_CLEARS.ANY               */
368         },
369         [ C(OP_WRITE) ] = {
370                 [ C(RESULT_ACCESS) ] = -1,
371                 [ C(RESULT_MISS)   ] = -1,
372         },
373         [ C(OP_PREFETCH) ] = {
374                 [ C(RESULT_ACCESS) ] = -1,
375                 [ C(RESULT_MISS)   ] = -1,
376         },
377  },
378 };
379
380 static __initconst u64 core2_hw_cache_event_ids
381                                 [PERF_COUNT_HW_CACHE_MAX]
382                                 [PERF_COUNT_HW_CACHE_OP_MAX]
383                                 [PERF_COUNT_HW_CACHE_RESULT_MAX] =
384 {
385  [ C(L1D) ] = {
386         [ C(OP_READ) ] = {
387                 [ C(RESULT_ACCESS) ] = 0x0f40, /* L1D_CACHE_LD.MESI          */
388                 [ C(RESULT_MISS)   ] = 0x0140, /* L1D_CACHE_LD.I_STATE       */
389         },
390         [ C(OP_WRITE) ] = {
391                 [ C(RESULT_ACCESS) ] = 0x0f41, /* L1D_CACHE_ST.MESI          */
392                 [ C(RESULT_MISS)   ] = 0x0141, /* L1D_CACHE_ST.I_STATE       */
393         },
394         [ C(OP_PREFETCH) ] = {
395                 [ C(RESULT_ACCESS) ] = 0x104e, /* L1D_PREFETCH.REQUESTS      */
396                 [ C(RESULT_MISS)   ] = 0,
397         },
398  },
399  [ C(L1I ) ] = {
400         [ C(OP_READ) ] = {
401                 [ C(RESULT_ACCESS) ] = 0x0080, /* L1I.READS                  */
402                 [ C(RESULT_MISS)   ] = 0x0081, /* L1I.MISSES                 */
403         },
404         [ C(OP_WRITE) ] = {
405                 [ C(RESULT_ACCESS) ] = -1,
406                 [ C(RESULT_MISS)   ] = -1,
407         },
408         [ C(OP_PREFETCH) ] = {
409                 [ C(RESULT_ACCESS) ] = 0,
410                 [ C(RESULT_MISS)   ] = 0,
411         },
412  },
413  [ C(LL  ) ] = {
414         [ C(OP_READ) ] = {
415                 [ C(RESULT_ACCESS) ] = 0x4f29, /* L2_LD.MESI                 */
416                 [ C(RESULT_MISS)   ] = 0x4129, /* L2_LD.ISTATE               */
417         },
418         [ C(OP_WRITE) ] = {
419                 [ C(RESULT_ACCESS) ] = 0x4f2A, /* L2_ST.MESI                 */
420                 [ C(RESULT_MISS)   ] = 0x412A, /* L2_ST.ISTATE               */
421         },
422         [ C(OP_PREFETCH) ] = {
423                 [ C(RESULT_ACCESS) ] = 0,
424                 [ C(RESULT_MISS)   ] = 0,
425         },
426  },
427  [ C(DTLB) ] = {
428         [ C(OP_READ) ] = {
429                 [ C(RESULT_ACCESS) ] = 0x0f40, /* L1D_CACHE_LD.MESI  (alias) */
430                 [ C(RESULT_MISS)   ] = 0x0208, /* DTLB_MISSES.MISS_LD        */
431         },
432         [ C(OP_WRITE) ] = {
433                 [ C(RESULT_ACCESS) ] = 0x0f41, /* L1D_CACHE_ST.MESI  (alias) */
434                 [ C(RESULT_MISS)   ] = 0x0808, /* DTLB_MISSES.MISS_ST        */
435         },
436         [ C(OP_PREFETCH) ] = {
437                 [ C(RESULT_ACCESS) ] = 0,
438                 [ C(RESULT_MISS)   ] = 0,
439         },
440  },
441  [ C(ITLB) ] = {
442         [ C(OP_READ) ] = {
443                 [ C(RESULT_ACCESS) ] = 0x00c0, /* INST_RETIRED.ANY_P         */
444                 [ C(RESULT_MISS)   ] = 0x1282, /* ITLBMISSES                 */
445         },
446         [ C(OP_WRITE) ] = {
447                 [ C(RESULT_ACCESS) ] = -1,
448                 [ C(RESULT_MISS)   ] = -1,
449         },
450         [ C(OP_PREFETCH) ] = {
451                 [ C(RESULT_ACCESS) ] = -1,
452                 [ C(RESULT_MISS)   ] = -1,
453         },
454  },
455  [ C(BPU ) ] = {
456         [ C(OP_READ) ] = {
457                 [ C(RESULT_ACCESS) ] = 0x00c4, /* BR_INST_RETIRED.ANY        */
458                 [ C(RESULT_MISS)   ] = 0x00c5, /* BP_INST_RETIRED.MISPRED    */
459         },
460         [ C(OP_WRITE) ] = {
461                 [ C(RESULT_ACCESS) ] = -1,
462                 [ C(RESULT_MISS)   ] = -1,
463         },
464         [ C(OP_PREFETCH) ] = {
465                 [ C(RESULT_ACCESS) ] = -1,
466                 [ C(RESULT_MISS)   ] = -1,
467         },
468  },
469 };
470
471 static __initconst u64 atom_hw_cache_event_ids
472                                 [PERF_COUNT_HW_CACHE_MAX]
473                                 [PERF_COUNT_HW_CACHE_OP_MAX]
474                                 [PERF_COUNT_HW_CACHE_RESULT_MAX] =
475 {
476  [ C(L1D) ] = {
477         [ C(OP_READ) ] = {
478                 [ C(RESULT_ACCESS) ] = 0x2140, /* L1D_CACHE.LD               */
479                 [ C(RESULT_MISS)   ] = 0,
480         },
481         [ C(OP_WRITE) ] = {
482                 [ C(RESULT_ACCESS) ] = 0x2240, /* L1D_CACHE.ST               */
483                 [ C(RESULT_MISS)   ] = 0,
484         },
485         [ C(OP_PREFETCH) ] = {
486                 [ C(RESULT_ACCESS) ] = 0x0,
487                 [ C(RESULT_MISS)   ] = 0,
488         },
489  },
490  [ C(L1I ) ] = {
491         [ C(OP_READ) ] = {
492                 [ C(RESULT_ACCESS) ] = 0x0380, /* L1I.READS                  */
493                 [ C(RESULT_MISS)   ] = 0x0280, /* L1I.MISSES                 */
494         },
495         [ C(OP_WRITE) ] = {
496                 [ C(RESULT_ACCESS) ] = -1,
497                 [ C(RESULT_MISS)   ] = -1,
498         },
499         [ C(OP_PREFETCH) ] = {
500                 [ C(RESULT_ACCESS) ] = 0,
501                 [ C(RESULT_MISS)   ] = 0,
502         },
503  },
504  [ C(LL  ) ] = {
505         [ C(OP_READ) ] = {
506                 [ C(RESULT_ACCESS) ] = 0x4f29, /* L2_LD.MESI                 */
507                 [ C(RESULT_MISS)   ] = 0x4129, /* L2_LD.ISTATE               */
508         },
509         [ C(OP_WRITE) ] = {
510                 [ C(RESULT_ACCESS) ] = 0x4f2A, /* L2_ST.MESI                 */
511                 [ C(RESULT_MISS)   ] = 0x412A, /* L2_ST.ISTATE               */
512         },
513         [ C(OP_PREFETCH) ] = {
514                 [ C(RESULT_ACCESS) ] = 0,
515                 [ C(RESULT_MISS)   ] = 0,
516         },
517  },
518  [ C(DTLB) ] = {
519         [ C(OP_READ) ] = {
520                 [ C(RESULT_ACCESS) ] = 0x2140, /* L1D_CACHE_LD.MESI  (alias) */
521                 [ C(RESULT_MISS)   ] = 0x0508, /* DTLB_MISSES.MISS_LD        */
522         },
523         [ C(OP_WRITE) ] = {
524                 [ C(RESULT_ACCESS) ] = 0x2240, /* L1D_CACHE_ST.MESI  (alias) */
525                 [ C(RESULT_MISS)   ] = 0x0608, /* DTLB_MISSES.MISS_ST        */
526         },
527         [ C(OP_PREFETCH) ] = {
528                 [ C(RESULT_ACCESS) ] = 0,
529                 [ C(RESULT_MISS)   ] = 0,
530         },
531  },
532  [ C(ITLB) ] = {
533         [ C(OP_READ) ] = {
534                 [ C(RESULT_ACCESS) ] = 0x00c0, /* INST_RETIRED.ANY_P         */
535                 [ C(RESULT_MISS)   ] = 0x0282, /* ITLB.MISSES                */
536         },
537         [ C(OP_WRITE) ] = {
538                 [ C(RESULT_ACCESS) ] = -1,
539                 [ C(RESULT_MISS)   ] = -1,
540         },
541         [ C(OP_PREFETCH) ] = {
542                 [ C(RESULT_ACCESS) ] = -1,
543                 [ C(RESULT_MISS)   ] = -1,
544         },
545  },
546  [ C(BPU ) ] = {
547         [ C(OP_READ) ] = {
548                 [ C(RESULT_ACCESS) ] = 0x00c4, /* BR_INST_RETIRED.ANY        */
549                 [ C(RESULT_MISS)   ] = 0x00c5, /* BP_INST_RETIRED.MISPRED    */
550         },
551         [ C(OP_WRITE) ] = {
552                 [ C(RESULT_ACCESS) ] = -1,
553                 [ C(RESULT_MISS)   ] = -1,
554         },
555         [ C(OP_PREFETCH) ] = {
556                 [ C(RESULT_ACCESS) ] = -1,
557                 [ C(RESULT_MISS)   ] = -1,
558         },
559  },
560 };
561
562 static u64 intel_pmu_raw_event(u64 hw_event)
563 {
564 #define CORE_EVNTSEL_EVENT_MASK         0x000000FFULL
565 #define CORE_EVNTSEL_UNIT_MASK          0x0000FF00ULL
566 #define CORE_EVNTSEL_EDGE_MASK          0x00040000ULL
567 #define CORE_EVNTSEL_INV_MASK           0x00800000ULL
568 #define CORE_EVNTSEL_REG_MASK           0xFF000000ULL
569
570 #define CORE_EVNTSEL_MASK               \
571         (INTEL_ARCH_EVTSEL_MASK |       \
572          INTEL_ARCH_UNIT_MASK   |       \
573          INTEL_ARCH_EDGE_MASK   |       \
574          INTEL_ARCH_INV_MASK    |       \
575          INTEL_ARCH_CNT_MASK)
576
577         return hw_event & CORE_EVNTSEL_MASK;
578 }
579
580 static __initconst u64 amd_hw_cache_event_ids
581                                 [PERF_COUNT_HW_CACHE_MAX]
582                                 [PERF_COUNT_HW_CACHE_OP_MAX]
583                                 [PERF_COUNT_HW_CACHE_RESULT_MAX] =
584 {
585  [ C(L1D) ] = {
586         [ C(OP_READ) ] = {
587                 [ C(RESULT_ACCESS) ] = 0x0040, /* Data Cache Accesses        */
588                 [ C(RESULT_MISS)   ] = 0x0041, /* Data Cache Misses          */
589         },
590         [ C(OP_WRITE) ] = {
591                 [ C(RESULT_ACCESS) ] = 0x0142, /* Data Cache Refills :system */
592                 [ C(RESULT_MISS)   ] = 0,
593         },
594         [ C(OP_PREFETCH) ] = {
595                 [ C(RESULT_ACCESS) ] = 0x0267, /* Data Prefetcher :attempts  */
596                 [ C(RESULT_MISS)   ] = 0x0167, /* Data Prefetcher :cancelled */
597         },
598  },
599  [ C(L1I ) ] = {
600         [ C(OP_READ) ] = {
601                 [ C(RESULT_ACCESS) ] = 0x0080, /* Instruction cache fetches  */
602                 [ C(RESULT_MISS)   ] = 0x0081, /* Instruction cache misses   */
603         },
604         [ C(OP_WRITE) ] = {
605                 [ C(RESULT_ACCESS) ] = -1,
606                 [ C(RESULT_MISS)   ] = -1,
607         },
608         [ C(OP_PREFETCH) ] = {
609                 [ C(RESULT_ACCESS) ] = 0x014B, /* Prefetch Instructions :Load */
610                 [ C(RESULT_MISS)   ] = 0,
611         },
612  },
613  [ C(LL  ) ] = {
614         [ C(OP_READ) ] = {
615                 [ C(RESULT_ACCESS) ] = 0x037D, /* Requests to L2 Cache :IC+DC */
616                 [ C(RESULT_MISS)   ] = 0x037E, /* L2 Cache Misses : IC+DC     */
617         },
618         [ C(OP_WRITE) ] = {
619                 [ C(RESULT_ACCESS) ] = 0x017F, /* L2 Fill/Writeback           */
620                 [ C(RESULT_MISS)   ] = 0,
621         },
622         [ C(OP_PREFETCH) ] = {
623                 [ C(RESULT_ACCESS) ] = 0,
624                 [ C(RESULT_MISS)   ] = 0,
625         },
626  },
627  [ C(DTLB) ] = {
628         [ C(OP_READ) ] = {
629                 [ C(RESULT_ACCESS) ] = 0x0040, /* Data Cache Accesses        */
630                 [ C(RESULT_MISS)   ] = 0x0046, /* L1 DTLB and L2 DLTB Miss   */
631         },
632         [ C(OP_WRITE) ] = {
633                 [ C(RESULT_ACCESS) ] = 0,
634                 [ C(RESULT_MISS)   ] = 0,
635         },
636         [ C(OP_PREFETCH) ] = {
637                 [ C(RESULT_ACCESS) ] = 0,
638                 [ C(RESULT_MISS)   ] = 0,
639         },
640  },
641  [ C(ITLB) ] = {
642         [ C(OP_READ) ] = {
643                 [ C(RESULT_ACCESS) ] = 0x0080, /* Instruction fecthes        */
644                 [ C(RESULT_MISS)   ] = 0x0085, /* Instr. fetch ITLB misses   */
645         },
646         [ C(OP_WRITE) ] = {
647                 [ C(RESULT_ACCESS) ] = -1,
648                 [ C(RESULT_MISS)   ] = -1,
649         },
650         [ C(OP_PREFETCH) ] = {
651                 [ C(RESULT_ACCESS) ] = -1,
652                 [ C(RESULT_MISS)   ] = -1,
653         },
654  },
655  [ C(BPU ) ] = {
656         [ C(OP_READ) ] = {
657                 [ C(RESULT_ACCESS) ] = 0x00c2, /* Retired Branch Instr.      */
658                 [ C(RESULT_MISS)   ] = 0x00c3, /* Retired Mispredicted BI    */
659         },
660         [ C(OP_WRITE) ] = {
661                 [ C(RESULT_ACCESS) ] = -1,
662                 [ C(RESULT_MISS)   ] = -1,
663         },
664         [ C(OP_PREFETCH) ] = {
665                 [ C(RESULT_ACCESS) ] = -1,
666                 [ C(RESULT_MISS)   ] = -1,
667         },
668  },
669 };
670
671 /*
672  * AMD Performance Monitor K7 and later.
673  */
674 static const u64 amd_perfmon_event_map[] =
675 {
676   [PERF_COUNT_HW_CPU_CYCLES]            = 0x0076,
677   [PERF_COUNT_HW_INSTRUCTIONS]          = 0x00c0,
678   [PERF_COUNT_HW_CACHE_REFERENCES]      = 0x0080,
679   [PERF_COUNT_HW_CACHE_MISSES]          = 0x0081,
680   [PERF_COUNT_HW_BRANCH_INSTRUCTIONS]   = 0x00c4,
681   [PERF_COUNT_HW_BRANCH_MISSES]         = 0x00c5,
682 };
683
684 static u64 amd_pmu_event_map(int hw_event)
685 {
686         return amd_perfmon_event_map[hw_event];
687 }
688
689 static u64 amd_pmu_raw_event(u64 hw_event)
690 {
691 #define K7_EVNTSEL_EVENT_MASK   0x7000000FFULL
692 #define K7_EVNTSEL_UNIT_MASK    0x00000FF00ULL
693 #define K7_EVNTSEL_EDGE_MASK    0x000040000ULL
694 #define K7_EVNTSEL_INV_MASK     0x000800000ULL
695 #define K7_EVNTSEL_REG_MASK     0x0FF000000ULL
696
697 #define K7_EVNTSEL_MASK                 \
698         (K7_EVNTSEL_EVENT_MASK |        \
699          K7_EVNTSEL_UNIT_MASK  |        \
700          K7_EVNTSEL_EDGE_MASK  |        \
701          K7_EVNTSEL_INV_MASK   |        \
702          K7_EVNTSEL_REG_MASK)
703
704         return hw_event & K7_EVNTSEL_MASK;
705 }
706
707 /*
708  * Propagate event elapsed time into the generic event.
709  * Can only be executed on the CPU where the event is active.
710  * Returns the delta events processed.
711  */
712 static u64
713 x86_perf_event_update(struct perf_event *event,
714                         struct hw_perf_event *hwc, int idx)
715 {
716         int shift = 64 - x86_pmu.event_bits;
717         u64 prev_raw_count, new_raw_count;
718         s64 delta;
719
720         if (idx == X86_PMC_IDX_FIXED_BTS)
721                 return 0;
722
723         /*
724          * Careful: an NMI might modify the previous event value.
725          *
726          * Our tactic to handle this is to first atomically read and
727          * exchange a new raw count - then add that new-prev delta
728          * count to the generic event atomically:
729          */
730 again:
731         prev_raw_count = atomic64_read(&hwc->prev_count);
732         rdmsrl(hwc->event_base + idx, new_raw_count);
733
734         if (atomic64_cmpxchg(&hwc->prev_count, prev_raw_count,
735                                         new_raw_count) != prev_raw_count)
736                 goto again;
737
738         /*
739          * Now we have the new raw value and have updated the prev
740          * timestamp already. We can now calculate the elapsed delta
741          * (event-)time and add that to the generic event.
742          *
743          * Careful, not all hw sign-extends above the physical width
744          * of the count.
745          */
746         delta = (new_raw_count << shift) - (prev_raw_count << shift);
747         delta >>= shift;
748
749         atomic64_add(delta, &event->count);
750         atomic64_sub(delta, &hwc->period_left);
751
752         return new_raw_count;
753 }
754
755 static atomic_t active_events;
756 static DEFINE_MUTEX(pmc_reserve_mutex);
757
758 static bool reserve_pmc_hardware(void)
759 {
760 #ifdef CONFIG_X86_LOCAL_APIC
761         int i;
762
763         if (nmi_watchdog == NMI_LOCAL_APIC)
764                 disable_lapic_nmi_watchdog();
765
766         for (i = 0; i < x86_pmu.num_events; i++) {
767                 if (!reserve_perfctr_nmi(x86_pmu.perfctr + i))
768                         goto perfctr_fail;
769         }
770
771         for (i = 0; i < x86_pmu.num_events; i++) {
772                 if (!reserve_evntsel_nmi(x86_pmu.eventsel + i))
773                         goto eventsel_fail;
774         }
775 #endif
776
777         return true;
778
779 #ifdef CONFIG_X86_LOCAL_APIC
780 eventsel_fail:
781         for (i--; i >= 0; i--)
782                 release_evntsel_nmi(x86_pmu.eventsel + i);
783
784         i = x86_pmu.num_events;
785
786 perfctr_fail:
787         for (i--; i >= 0; i--)
788                 release_perfctr_nmi(x86_pmu.perfctr + i);
789
790         if (nmi_watchdog == NMI_LOCAL_APIC)
791                 enable_lapic_nmi_watchdog();
792
793         return false;
794 #endif
795 }
796
797 static void release_pmc_hardware(void)
798 {
799 #ifdef CONFIG_X86_LOCAL_APIC
800         int i;
801
802         for (i = 0; i < x86_pmu.num_events; i++) {
803                 release_perfctr_nmi(x86_pmu.perfctr + i);
804                 release_evntsel_nmi(x86_pmu.eventsel + i);
805         }
806
807         if (nmi_watchdog == NMI_LOCAL_APIC)
808                 enable_lapic_nmi_watchdog();
809 #endif
810 }
811
812 static inline bool bts_available(void)
813 {
814         return x86_pmu.enable_bts != NULL;
815 }
816
817 static inline void init_debug_store_on_cpu(int cpu)
818 {
819         struct debug_store *ds = per_cpu(cpu_hw_events, cpu).ds;
820
821         if (!ds)
822                 return;
823
824         wrmsr_on_cpu(cpu, MSR_IA32_DS_AREA,
825                      (u32)((u64)(unsigned long)ds),
826                      (u32)((u64)(unsigned long)ds >> 32));
827 }
828
829 static inline void fini_debug_store_on_cpu(int cpu)
830 {
831         if (!per_cpu(cpu_hw_events, cpu).ds)
832                 return;
833
834         wrmsr_on_cpu(cpu, MSR_IA32_DS_AREA, 0, 0);
835 }
836
837 static void release_bts_hardware(void)
838 {
839         int cpu;
840
841         if (!bts_available())
842                 return;
843
844         get_online_cpus();
845
846         for_each_online_cpu(cpu)
847                 fini_debug_store_on_cpu(cpu);
848
849         for_each_possible_cpu(cpu) {
850                 struct debug_store *ds = per_cpu(cpu_hw_events, cpu).ds;
851
852                 if (!ds)
853                         continue;
854
855                 per_cpu(cpu_hw_events, cpu).ds = NULL;
856
857                 kfree((void *)(unsigned long)ds->bts_buffer_base);
858                 kfree(ds);
859         }
860
861         put_online_cpus();
862 }
863
864 static int reserve_bts_hardware(void)
865 {
866         int cpu, err = 0;
867
868         if (!bts_available())
869                 return 0;
870
871         get_online_cpus();
872
873         for_each_possible_cpu(cpu) {
874                 struct debug_store *ds;
875                 void *buffer;
876
877                 err = -ENOMEM;
878                 buffer = kzalloc(BTS_BUFFER_SIZE, GFP_KERNEL);
879                 if (unlikely(!buffer))
880                         break;
881
882                 ds = kzalloc(sizeof(*ds), GFP_KERNEL);
883                 if (unlikely(!ds)) {
884                         kfree(buffer);
885                         break;
886                 }
887
888                 ds->bts_buffer_base = (u64)(unsigned long)buffer;
889                 ds->bts_index = ds->bts_buffer_base;
890                 ds->bts_absolute_maximum =
891                         ds->bts_buffer_base + BTS_BUFFER_SIZE;
892                 ds->bts_interrupt_threshold =
893                         ds->bts_absolute_maximum - BTS_OVFL_TH;
894
895                 per_cpu(cpu_hw_events, cpu).ds = ds;
896                 err = 0;
897         }
898
899         if (err)
900                 release_bts_hardware();
901         else {
902                 for_each_online_cpu(cpu)
903                         init_debug_store_on_cpu(cpu);
904         }
905
906         put_online_cpus();
907
908         return err;
909 }
910
911 static void hw_perf_event_destroy(struct perf_event *event)
912 {
913         if (atomic_dec_and_mutex_lock(&active_events, &pmc_reserve_mutex)) {
914                 release_pmc_hardware();
915                 release_bts_hardware();
916                 mutex_unlock(&pmc_reserve_mutex);
917         }
918 }
919
920 static inline int x86_pmu_initialized(void)
921 {
922         return x86_pmu.handle_irq != NULL;
923 }
924
925 static inline int
926 set_ext_hw_attr(struct hw_perf_event *hwc, struct perf_event_attr *attr)
927 {
928         unsigned int cache_type, cache_op, cache_result;
929         u64 config, val;
930
931         config = attr->config;
932
933         cache_type = (config >>  0) & 0xff;
934         if (cache_type >= PERF_COUNT_HW_CACHE_MAX)
935                 return -EINVAL;
936
937         cache_op = (config >>  8) & 0xff;
938         if (cache_op >= PERF_COUNT_HW_CACHE_OP_MAX)
939                 return -EINVAL;
940
941         cache_result = (config >> 16) & 0xff;
942         if (cache_result >= PERF_COUNT_HW_CACHE_RESULT_MAX)
943                 return -EINVAL;
944
945         val = hw_cache_event_ids[cache_type][cache_op][cache_result];
946
947         if (val == 0)
948                 return -ENOENT;
949
950         if (val == -1)
951                 return -EINVAL;
952
953         hwc->config |= val;
954
955         return 0;
956 }
957
958 static void intel_pmu_enable_bts(u64 config)
959 {
960         unsigned long debugctlmsr;
961
962         debugctlmsr = get_debugctlmsr();
963
964         debugctlmsr |= X86_DEBUGCTL_TR;
965         debugctlmsr |= X86_DEBUGCTL_BTS;
966         debugctlmsr |= X86_DEBUGCTL_BTINT;
967
968         if (!(config & ARCH_PERFMON_EVENTSEL_OS))
969                 debugctlmsr |= X86_DEBUGCTL_BTS_OFF_OS;
970
971         if (!(config & ARCH_PERFMON_EVENTSEL_USR))
972                 debugctlmsr |= X86_DEBUGCTL_BTS_OFF_USR;
973
974         update_debugctlmsr(debugctlmsr);
975 }
976
977 static void intel_pmu_disable_bts(void)
978 {
979         struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events);
980         unsigned long debugctlmsr;
981
982         if (!cpuc->ds)
983                 return;
984
985         debugctlmsr = get_debugctlmsr();
986
987         debugctlmsr &=
988                 ~(X86_DEBUGCTL_TR | X86_DEBUGCTL_BTS | X86_DEBUGCTL_BTINT |
989                   X86_DEBUGCTL_BTS_OFF_OS | X86_DEBUGCTL_BTS_OFF_USR);
990
991         update_debugctlmsr(debugctlmsr);
992 }
993
994 /*
995  * Setup the hardware configuration for a given attr_type
996  */
997 static int __hw_perf_event_init(struct perf_event *event)
998 {
999         struct perf_event_attr *attr = &event->attr;
1000         struct hw_perf_event *hwc = &event->hw;
1001         u64 config;
1002         int err;
1003
1004         if (!x86_pmu_initialized())
1005                 return -ENODEV;
1006
1007         err = 0;
1008         if (!atomic_inc_not_zero(&active_events)) {
1009                 mutex_lock(&pmc_reserve_mutex);
1010                 if (atomic_read(&active_events) == 0) {
1011                         if (!reserve_pmc_hardware())
1012                                 err = -EBUSY;
1013                         else
1014                                 err = reserve_bts_hardware();
1015                 }
1016                 if (!err)
1017                         atomic_inc(&active_events);
1018                 mutex_unlock(&pmc_reserve_mutex);
1019         }
1020         if (err)
1021                 return err;
1022
1023         event->destroy = hw_perf_event_destroy;
1024
1025         /*
1026          * Generate PMC IRQs:
1027          * (keep 'enabled' bit clear for now)
1028          */
1029         hwc->config = ARCH_PERFMON_EVENTSEL_INT;
1030
1031         hwc->idx = -1;
1032
1033         /*
1034          * Count user and OS events unless requested not to.
1035          */
1036         if (!attr->exclude_user)
1037                 hwc->config |= ARCH_PERFMON_EVENTSEL_USR;
1038         if (!attr->exclude_kernel)
1039                 hwc->config |= ARCH_PERFMON_EVENTSEL_OS;
1040
1041         if (!hwc->sample_period) {
1042                 hwc->sample_period = x86_pmu.max_period;
1043                 hwc->last_period = hwc->sample_period;
1044                 atomic64_set(&hwc->period_left, hwc->sample_period);
1045         } else {
1046                 /*
1047                  * If we have a PMU initialized but no APIC
1048                  * interrupts, we cannot sample hardware
1049                  * events (user-space has to fall back and
1050                  * sample via a hrtimer based software event):
1051                  */
1052                 if (!x86_pmu.apic)
1053                         return -EOPNOTSUPP;
1054         }
1055
1056         /*
1057          * Raw hw_event type provide the config in the hw_event structure
1058          */
1059         if (attr->type == PERF_TYPE_RAW) {
1060                 hwc->config |= x86_pmu.raw_event(attr->config);
1061                 return 0;
1062         }
1063
1064         if (attr->type == PERF_TYPE_HW_CACHE)
1065                 return set_ext_hw_attr(hwc, attr);
1066
1067         if (attr->config >= x86_pmu.max_events)
1068                 return -EINVAL;
1069
1070         /*
1071          * The generic map:
1072          */
1073         config = x86_pmu.event_map(attr->config);
1074
1075         if (config == 0)
1076                 return -ENOENT;
1077
1078         if (config == -1LL)
1079                 return -EINVAL;
1080
1081         /*
1082          * Branch tracing:
1083          */
1084         if ((attr->config == PERF_COUNT_HW_BRANCH_INSTRUCTIONS) &&
1085             (hwc->sample_period == 1)) {
1086                 /* BTS is not supported by this architecture. */
1087                 if (!bts_available())
1088                         return -EOPNOTSUPP;
1089
1090                 /* BTS is currently only allowed for user-mode. */
1091                 if (hwc->config & ARCH_PERFMON_EVENTSEL_OS)
1092                         return -EOPNOTSUPP;
1093         }
1094
1095         hwc->config |= config;
1096
1097         return 0;
1098 }
1099
1100 static void p6_pmu_disable_all(void)
1101 {
1102         struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events);
1103         u64 val;
1104
1105         if (!cpuc->enabled)
1106                 return;
1107
1108         cpuc->enabled = 0;
1109         barrier();
1110
1111         /* p6 only has one enable register */
1112         rdmsrl(MSR_P6_EVNTSEL0, val);
1113         val &= ~ARCH_PERFMON_EVENTSEL0_ENABLE;
1114         wrmsrl(MSR_P6_EVNTSEL0, val);
1115 }
1116
1117 static void intel_pmu_disable_all(void)
1118 {
1119         struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events);
1120
1121         if (!cpuc->enabled)
1122                 return;
1123
1124         cpuc->enabled = 0;
1125         barrier();
1126
1127         wrmsrl(MSR_CORE_PERF_GLOBAL_CTRL, 0);
1128
1129         if (test_bit(X86_PMC_IDX_FIXED_BTS, cpuc->active_mask))
1130                 intel_pmu_disable_bts();
1131 }
1132
1133 static void amd_pmu_disable_all(void)
1134 {
1135         struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events);
1136         int idx;
1137
1138         if (!cpuc->enabled)
1139                 return;
1140
1141         cpuc->enabled = 0;
1142         /*
1143          * ensure we write the disable before we start disabling the
1144          * events proper, so that amd_pmu_enable_event() does the
1145          * right thing.
1146          */
1147         barrier();
1148
1149         for (idx = 0; idx < x86_pmu.num_events; idx++) {
1150                 u64 val;
1151
1152                 if (!test_bit(idx, cpuc->active_mask))
1153                         continue;
1154                 rdmsrl(MSR_K7_EVNTSEL0 + idx, val);
1155                 if (!(val & ARCH_PERFMON_EVENTSEL0_ENABLE))
1156                         continue;
1157                 val &= ~ARCH_PERFMON_EVENTSEL0_ENABLE;
1158                 wrmsrl(MSR_K7_EVNTSEL0 + idx, val);
1159         }
1160 }
1161
1162 void hw_perf_disable(void)
1163 {
1164         struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events);
1165
1166         if (!x86_pmu_initialized())
1167                 return;
1168
1169         if (cpuc->enabled)
1170                 cpuc->n_added = 0;
1171
1172         x86_pmu.disable_all();
1173 }
1174
1175 static void p6_pmu_enable_all(void)
1176 {
1177         struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events);
1178         unsigned long val;
1179
1180         if (cpuc->enabled)
1181                 return;
1182
1183         cpuc->enabled = 1;
1184         barrier();
1185
1186         /* p6 only has one enable register */
1187         rdmsrl(MSR_P6_EVNTSEL0, val);
1188         val |= ARCH_PERFMON_EVENTSEL0_ENABLE;
1189         wrmsrl(MSR_P6_EVNTSEL0, val);
1190 }
1191
1192 static void intel_pmu_enable_all(void)
1193 {
1194         struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events);
1195
1196         if (cpuc->enabled)
1197                 return;
1198
1199         cpuc->enabled = 1;
1200         barrier();
1201
1202         wrmsrl(MSR_CORE_PERF_GLOBAL_CTRL, x86_pmu.intel_ctrl);
1203
1204         if (test_bit(X86_PMC_IDX_FIXED_BTS, cpuc->active_mask)) {
1205                 struct perf_event *event =
1206                         cpuc->events[X86_PMC_IDX_FIXED_BTS];
1207
1208                 if (WARN_ON_ONCE(!event))
1209                         return;
1210
1211                 intel_pmu_enable_bts(event->hw.config);
1212         }
1213 }
1214
1215 static void amd_pmu_enable_all(void)
1216 {
1217         struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events);
1218         int idx;
1219
1220         if (cpuc->enabled)
1221                 return;
1222
1223         cpuc->enabled = 1;
1224         barrier();
1225
1226         for (idx = 0; idx < x86_pmu.num_events; idx++) {
1227                 struct perf_event *event = cpuc->events[idx];
1228                 u64 val;
1229
1230                 if (!test_bit(idx, cpuc->active_mask))
1231                         continue;
1232
1233                 val = event->hw.config;
1234                 val |= ARCH_PERFMON_EVENTSEL0_ENABLE;
1235                 wrmsrl(MSR_K7_EVNTSEL0 + idx, val);
1236         }
1237 }
1238
1239 static const struct pmu pmu;
1240
1241 static inline int is_x86_event(struct perf_event *event)
1242 {
1243         return event->pmu == &pmu;
1244 }
1245
1246 static int x86_schedule_events(struct cpu_hw_events *cpuc, int n, int *assign)
1247 {
1248         struct event_constraint *c, *constraints[X86_PMC_IDX_MAX];
1249         unsigned long used_mask[BITS_TO_LONGS(X86_PMC_IDX_MAX)];
1250         int i, j, w, wmax, num = 0;
1251         struct hw_perf_event *hwc;
1252
1253         bitmap_zero(used_mask, X86_PMC_IDX_MAX);
1254
1255         for (i = 0; i < n; i++) {
1256                 constraints[i] =
1257                   x86_pmu.get_event_constraints(cpuc, cpuc->event_list[i]);
1258         }
1259
1260         /*
1261          * fastpath, try to reuse previous register
1262          */
1263         for (i = 0; i < n; i++) {
1264                 hwc = &cpuc->event_list[i]->hw;
1265                 c = constraints[i];
1266
1267                 /* never assigned */
1268                 if (hwc->idx == -1)
1269                         break;
1270
1271                 /* constraint still honored */
1272                 if (!test_bit(hwc->idx, c->idxmsk))
1273                         break;
1274
1275                 /* not already used */
1276                 if (test_bit(hwc->idx, used_mask))
1277                         break;
1278
1279                 set_bit(hwc->idx, used_mask);
1280                 if (assign)
1281                         assign[i] = hwc->idx;
1282         }
1283         if (i == n)
1284                 goto done;
1285
1286         /*
1287          * begin slow path
1288          */
1289
1290         bitmap_zero(used_mask, X86_PMC_IDX_MAX);
1291
1292         /*
1293          * weight = number of possible counters
1294          *
1295          * 1    = most constrained, only works on one counter
1296          * wmax = least constrained, works on any counter
1297          *
1298          * assign events to counters starting with most
1299          * constrained events.
1300          */
1301         wmax = x86_pmu.num_events;
1302
1303         /*
1304          * when fixed event counters are present,
1305          * wmax is incremented by 1 to account
1306          * for one more choice
1307          */
1308         if (x86_pmu.num_events_fixed)
1309                 wmax++;
1310
1311         for (w = 1, num = n; num && w <= wmax; w++) {
1312                 /* for each event */
1313                 for (i = 0; num && i < n; i++) {
1314                         c = constraints[i];
1315                         hwc = &cpuc->event_list[i]->hw;
1316
1317                         if (c->weight != w)
1318                                 continue;
1319
1320                         for_each_bit(j, c->idxmsk, X86_PMC_IDX_MAX) {
1321                                 if (!test_bit(j, used_mask))
1322                                         break;
1323                         }
1324
1325                         if (j == X86_PMC_IDX_MAX)
1326                                 break;
1327
1328                         set_bit(j, used_mask);
1329
1330                         if (assign)
1331                                 assign[i] = j;
1332                         num--;
1333                 }
1334         }
1335 done:
1336         /*
1337          * scheduling failed or is just a simulation,
1338          * free resources if necessary
1339          */
1340         if (!assign || num) {
1341                 for (i = 0; i < n; i++) {
1342                         if (x86_pmu.put_event_constraints)
1343                                 x86_pmu.put_event_constraints(cpuc, cpuc->event_list[i]);
1344                 }
1345         }
1346         return num ? -ENOSPC : 0;
1347 }
1348
1349 /*
1350  * dogrp: true if must collect siblings events (group)
1351  * returns total number of events and error code
1352  */
1353 static int collect_events(struct cpu_hw_events *cpuc, struct perf_event *leader, bool dogrp)
1354 {
1355         struct perf_event *event;
1356         int n, max_count;
1357
1358         max_count = x86_pmu.num_events + x86_pmu.num_events_fixed;
1359
1360         /* current number of events already accepted */
1361         n = cpuc->n_events;
1362
1363         if (is_x86_event(leader)) {
1364                 if (n >= max_count)
1365                         return -ENOSPC;
1366                 cpuc->event_list[n] = leader;
1367                 n++;
1368         }
1369         if (!dogrp)
1370                 return n;
1371
1372         list_for_each_entry(event, &leader->sibling_list, group_entry) {
1373                 if (!is_x86_event(event) ||
1374                     event->state <= PERF_EVENT_STATE_OFF)
1375                         continue;
1376
1377                 if (n >= max_count)
1378                         return -ENOSPC;
1379
1380                 cpuc->event_list[n] = event;
1381                 n++;
1382         }
1383         return n;
1384 }
1385
1386
1387 static inline void x86_assign_hw_event(struct perf_event *event,
1388                                 struct hw_perf_event *hwc, int idx)
1389 {
1390         hwc->idx = idx;
1391
1392         if (hwc->idx == X86_PMC_IDX_FIXED_BTS) {
1393                 hwc->config_base = 0;
1394                 hwc->event_base = 0;
1395         } else if (hwc->idx >= X86_PMC_IDX_FIXED) {
1396                 hwc->config_base = MSR_ARCH_PERFMON_FIXED_CTR_CTRL;
1397                 /*
1398                  * We set it so that event_base + idx in wrmsr/rdmsr maps to
1399                  * MSR_ARCH_PERFMON_FIXED_CTR0 ... CTR2:
1400                  */
1401                 hwc->event_base =
1402                         MSR_ARCH_PERFMON_FIXED_CTR0 - X86_PMC_IDX_FIXED;
1403         } else {
1404                 hwc->config_base = x86_pmu.eventsel;
1405                 hwc->event_base  = x86_pmu.perfctr;
1406         }
1407 }
1408
1409 static void __x86_pmu_disable(struct perf_event *event, struct cpu_hw_events *cpuc);
1410
1411 void hw_perf_enable(void)
1412 {
1413         struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events);
1414         struct perf_event *event;
1415         struct hw_perf_event *hwc;
1416         int i;
1417
1418         if (!x86_pmu_initialized())
1419                 return;
1420         if (cpuc->n_added) {
1421                 /*
1422                  * apply assignment obtained either from
1423                  * hw_perf_group_sched_in() or x86_pmu_enable()
1424                  *
1425                  * step1: save events moving to new counters
1426                  * step2: reprogram moved events into new counters
1427                  */
1428                 for (i = 0; i < cpuc->n_events; i++) {
1429
1430                         event = cpuc->event_list[i];
1431                         hwc = &event->hw;
1432
1433                         if (hwc->idx == -1 || hwc->idx == cpuc->assign[i])
1434                                 continue;
1435
1436                         __x86_pmu_disable(event, cpuc);
1437
1438                         hwc->idx = -1;
1439                 }
1440
1441                 for (i = 0; i < cpuc->n_events; i++) {
1442
1443                         event = cpuc->event_list[i];
1444                         hwc = &event->hw;
1445
1446                         if (hwc->idx == -1) {
1447                                 x86_assign_hw_event(event, hwc, cpuc->assign[i]);
1448                                 x86_perf_event_set_period(event, hwc, hwc->idx);
1449                         }
1450                         /*
1451                          * need to mark as active because x86_pmu_disable()
1452                          * clear active_mask and eventsp[] yet it preserves
1453                          * idx
1454                          */
1455                         set_bit(hwc->idx, cpuc->active_mask);
1456                         cpuc->events[hwc->idx] = event;
1457
1458                         x86_pmu.enable(hwc, hwc->idx);
1459                         perf_event_update_userpage(event);
1460                 }
1461                 cpuc->n_added = 0;
1462                 perf_events_lapic_init();
1463         }
1464         x86_pmu.enable_all();
1465 }
1466
1467 static inline u64 intel_pmu_get_status(void)
1468 {
1469         u64 status;
1470
1471         rdmsrl(MSR_CORE_PERF_GLOBAL_STATUS, status);
1472
1473         return status;
1474 }
1475
1476 static inline void intel_pmu_ack_status(u64 ack)
1477 {
1478         wrmsrl(MSR_CORE_PERF_GLOBAL_OVF_CTRL, ack);
1479 }
1480
1481 static inline void x86_pmu_enable_event(struct hw_perf_event *hwc, int idx)
1482 {
1483         (void)checking_wrmsrl(hwc->config_base + idx,
1484                               hwc->config | ARCH_PERFMON_EVENTSEL0_ENABLE);
1485 }
1486
1487 static inline void x86_pmu_disable_event(struct hw_perf_event *hwc, int idx)
1488 {
1489         (void)checking_wrmsrl(hwc->config_base + idx, hwc->config);
1490 }
1491
1492 static inline void
1493 intel_pmu_disable_fixed(struct hw_perf_event *hwc, int __idx)
1494 {
1495         int idx = __idx - X86_PMC_IDX_FIXED;
1496         u64 ctrl_val, mask;
1497
1498         mask = 0xfULL << (idx * 4);
1499
1500         rdmsrl(hwc->config_base, ctrl_val);
1501         ctrl_val &= ~mask;
1502         (void)checking_wrmsrl(hwc->config_base, ctrl_val);
1503 }
1504
1505 static inline void
1506 p6_pmu_disable_event(struct hw_perf_event *hwc, int idx)
1507 {
1508         struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events);
1509         u64 val = P6_NOP_EVENT;
1510
1511         if (cpuc->enabled)
1512                 val |= ARCH_PERFMON_EVENTSEL0_ENABLE;
1513
1514         (void)checking_wrmsrl(hwc->config_base + idx, val);
1515 }
1516
1517 static inline void
1518 intel_pmu_disable_event(struct hw_perf_event *hwc, int idx)
1519 {
1520         if (unlikely(idx == X86_PMC_IDX_FIXED_BTS)) {
1521                 intel_pmu_disable_bts();
1522                 return;
1523         }
1524
1525         if (unlikely(hwc->config_base == MSR_ARCH_PERFMON_FIXED_CTR_CTRL)) {
1526                 intel_pmu_disable_fixed(hwc, idx);
1527                 return;
1528         }
1529
1530         x86_pmu_disable_event(hwc, idx);
1531 }
1532
1533 static inline void
1534 amd_pmu_disable_event(struct hw_perf_event *hwc, int idx)
1535 {
1536         x86_pmu_disable_event(hwc, idx);
1537 }
1538
1539 static DEFINE_PER_CPU(u64 [X86_PMC_IDX_MAX], pmc_prev_left);
1540
1541 /*
1542  * Set the next IRQ period, based on the hwc->period_left value.
1543  * To be called with the event disabled in hw:
1544  */
1545 static int
1546 x86_perf_event_set_period(struct perf_event *event,
1547                              struct hw_perf_event *hwc, int idx)
1548 {
1549         s64 left = atomic64_read(&hwc->period_left);
1550         s64 period = hwc->sample_period;
1551         int err, ret = 0;
1552
1553         if (idx == X86_PMC_IDX_FIXED_BTS)
1554                 return 0;
1555
1556         /*
1557          * If we are way outside a reasonable range then just skip forward:
1558          */
1559         if (unlikely(left <= -period)) {
1560                 left = period;
1561                 atomic64_set(&hwc->period_left, left);
1562                 hwc->last_period = period;
1563                 ret = 1;
1564         }
1565
1566         if (unlikely(left <= 0)) {
1567                 left += period;
1568                 atomic64_set(&hwc->period_left, left);
1569                 hwc->last_period = period;
1570                 ret = 1;
1571         }
1572         /*
1573          * Quirk: certain CPUs dont like it if just 1 hw_event is left:
1574          */
1575         if (unlikely(left < 2))
1576                 left = 2;
1577
1578         if (left > x86_pmu.max_period)
1579                 left = x86_pmu.max_period;
1580
1581         per_cpu(pmc_prev_left[idx], smp_processor_id()) = left;
1582
1583         /*
1584          * The hw event starts counting from this event offset,
1585          * mark it to be able to extra future deltas:
1586          */
1587         atomic64_set(&hwc->prev_count, (u64)-left);
1588
1589         err = checking_wrmsrl(hwc->event_base + idx,
1590                              (u64)(-left) & x86_pmu.event_mask);
1591
1592         perf_event_update_userpage(event);
1593
1594         return ret;
1595 }
1596
1597 static inline void
1598 intel_pmu_enable_fixed(struct hw_perf_event *hwc, int __idx)
1599 {
1600         int idx = __idx - X86_PMC_IDX_FIXED;
1601         u64 ctrl_val, bits, mask;
1602         int err;
1603
1604         /*
1605          * Enable IRQ generation (0x8),
1606          * and enable ring-3 counting (0x2) and ring-0 counting (0x1)
1607          * if requested:
1608          */
1609         bits = 0x8ULL;
1610         if (hwc->config & ARCH_PERFMON_EVENTSEL_USR)
1611                 bits |= 0x2;
1612         if (hwc->config & ARCH_PERFMON_EVENTSEL_OS)
1613                 bits |= 0x1;
1614         bits <<= (idx * 4);
1615         mask = 0xfULL << (idx * 4);
1616
1617         rdmsrl(hwc->config_base, ctrl_val);
1618         ctrl_val &= ~mask;
1619         ctrl_val |= bits;
1620         err = checking_wrmsrl(hwc->config_base, ctrl_val);
1621 }
1622
1623 static void p6_pmu_enable_event(struct hw_perf_event *hwc, int idx)
1624 {
1625         struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events);
1626         u64 val;
1627
1628         val = hwc->config;
1629         if (cpuc->enabled)
1630                 val |= ARCH_PERFMON_EVENTSEL0_ENABLE;
1631
1632         (void)checking_wrmsrl(hwc->config_base + idx, val);
1633 }
1634
1635
1636 static void intel_pmu_enable_event(struct hw_perf_event *hwc, int idx)
1637 {
1638         if (unlikely(idx == X86_PMC_IDX_FIXED_BTS)) {
1639                 if (!__get_cpu_var(cpu_hw_events).enabled)
1640                         return;
1641
1642                 intel_pmu_enable_bts(hwc->config);
1643                 return;
1644         }
1645
1646         if (unlikely(hwc->config_base == MSR_ARCH_PERFMON_FIXED_CTR_CTRL)) {
1647                 intel_pmu_enable_fixed(hwc, idx);
1648                 return;
1649         }
1650
1651         x86_pmu_enable_event(hwc, idx);
1652 }
1653
1654 static void amd_pmu_enable_event(struct hw_perf_event *hwc, int idx)
1655 {
1656         struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events);
1657
1658         if (cpuc->enabled)
1659                 x86_pmu_enable_event(hwc, idx);
1660 }
1661
1662 /*
1663  * activate a single event
1664  *
1665  * The event is added to the group of enabled events
1666  * but only if it can be scehduled with existing events.
1667  *
1668  * Called with PMU disabled. If successful and return value 1,
1669  * then guaranteed to call perf_enable() and hw_perf_enable()
1670  */
1671 static int x86_pmu_enable(struct perf_event *event)
1672 {
1673         struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events);
1674         struct hw_perf_event *hwc;
1675         int assign[X86_PMC_IDX_MAX];
1676         int n, n0, ret;
1677
1678         hwc = &event->hw;
1679
1680         n0 = cpuc->n_events;
1681         n = collect_events(cpuc, event, false);
1682         if (n < 0)
1683                 return n;
1684
1685         ret = x86_schedule_events(cpuc, n, assign);
1686         if (ret)
1687                 return ret;
1688         /*
1689          * copy new assignment, now we know it is possible
1690          * will be used by hw_perf_enable()
1691          */
1692         memcpy(cpuc->assign, assign, n*sizeof(int));
1693
1694         cpuc->n_events = n;
1695         cpuc->n_added  = n - n0;
1696
1697         if (hwc->idx != -1)
1698                 x86_perf_event_set_period(event, hwc, hwc->idx);
1699
1700         return 0;
1701 }
1702
1703 static void x86_pmu_unthrottle(struct perf_event *event)
1704 {
1705         struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events);
1706         struct hw_perf_event *hwc = &event->hw;
1707
1708         if (WARN_ON_ONCE(hwc->idx >= X86_PMC_IDX_MAX ||
1709                                 cpuc->events[hwc->idx] != event))
1710                 return;
1711
1712         x86_pmu.enable(hwc, hwc->idx);
1713 }
1714
1715 void perf_event_print_debug(void)
1716 {
1717         u64 ctrl, status, overflow, pmc_ctrl, pmc_count, prev_left, fixed;
1718         struct cpu_hw_events *cpuc;
1719         unsigned long flags;
1720         int cpu, idx;
1721
1722         if (!x86_pmu.num_events)
1723                 return;
1724
1725         local_irq_save(flags);
1726
1727         cpu = smp_processor_id();
1728         cpuc = &per_cpu(cpu_hw_events, cpu);
1729
1730         if (x86_pmu.version >= 2) {
1731                 rdmsrl(MSR_CORE_PERF_GLOBAL_CTRL, ctrl);
1732                 rdmsrl(MSR_CORE_PERF_GLOBAL_STATUS, status);
1733                 rdmsrl(MSR_CORE_PERF_GLOBAL_OVF_CTRL, overflow);
1734                 rdmsrl(MSR_ARCH_PERFMON_FIXED_CTR_CTRL, fixed);
1735
1736                 pr_info("\n");
1737                 pr_info("CPU#%d: ctrl:       %016llx\n", cpu, ctrl);
1738                 pr_info("CPU#%d: status:     %016llx\n", cpu, status);
1739                 pr_info("CPU#%d: overflow:   %016llx\n", cpu, overflow);
1740                 pr_info("CPU#%d: fixed:      %016llx\n", cpu, fixed);
1741         }
1742         pr_info("CPU#%d: active:       %016llx\n", cpu, *(u64 *)cpuc->active_mask);
1743
1744         for (idx = 0; idx < x86_pmu.num_events; idx++) {
1745                 rdmsrl(x86_pmu.eventsel + idx, pmc_ctrl);
1746                 rdmsrl(x86_pmu.perfctr  + idx, pmc_count);
1747
1748                 prev_left = per_cpu(pmc_prev_left[idx], cpu);
1749
1750                 pr_info("CPU#%d:   gen-PMC%d ctrl:  %016llx\n",
1751                         cpu, idx, pmc_ctrl);
1752                 pr_info("CPU#%d:   gen-PMC%d count: %016llx\n",
1753                         cpu, idx, pmc_count);
1754                 pr_info("CPU#%d:   gen-PMC%d left:  %016llx\n",
1755                         cpu, idx, prev_left);
1756         }
1757         for (idx = 0; idx < x86_pmu.num_events_fixed; idx++) {
1758                 rdmsrl(MSR_ARCH_PERFMON_FIXED_CTR0 + idx, pmc_count);
1759
1760                 pr_info("CPU#%d: fixed-PMC%d count: %016llx\n",
1761                         cpu, idx, pmc_count);
1762         }
1763         local_irq_restore(flags);
1764 }
1765
1766 static void intel_pmu_drain_bts_buffer(struct cpu_hw_events *cpuc)
1767 {
1768         struct debug_store *ds = cpuc->ds;
1769         struct bts_record {
1770                 u64     from;
1771                 u64     to;
1772                 u64     flags;
1773         };
1774         struct perf_event *event = cpuc->events[X86_PMC_IDX_FIXED_BTS];
1775         struct bts_record *at, *top;
1776         struct perf_output_handle handle;
1777         struct perf_event_header header;
1778         struct perf_sample_data data;
1779         struct pt_regs regs;
1780
1781         if (!event)
1782                 return;
1783
1784         if (!ds)
1785                 return;
1786
1787         at  = (struct bts_record *)(unsigned long)ds->bts_buffer_base;
1788         top = (struct bts_record *)(unsigned long)ds->bts_index;
1789
1790         if (top <= at)
1791                 return;
1792
1793         ds->bts_index = ds->bts_buffer_base;
1794
1795
1796         data.period     = event->hw.last_period;
1797         data.addr       = 0;
1798         data.raw        = NULL;
1799         regs.ip         = 0;
1800
1801         /*
1802          * Prepare a generic sample, i.e. fill in the invariant fields.
1803          * We will overwrite the from and to address before we output
1804          * the sample.
1805          */
1806         perf_prepare_sample(&header, &data, event, &regs);
1807
1808         if (perf_output_begin(&handle, event,
1809                               header.size * (top - at), 1, 1))
1810                 return;
1811
1812         for (; at < top; at++) {
1813                 data.ip         = at->from;
1814                 data.addr       = at->to;
1815
1816                 perf_output_sample(&handle, &header, &data, event);
1817         }
1818
1819         perf_output_end(&handle);
1820
1821         /* There's new data available. */
1822         event->hw.interrupts++;
1823         event->pending_kill = POLL_IN;
1824 }
1825
1826 static void __x86_pmu_disable(struct perf_event *event, struct cpu_hw_events *cpuc)
1827 {
1828         struct hw_perf_event *hwc = &event->hw;
1829         int idx = hwc->idx;
1830
1831         /*
1832          * Must be done before we disable, otherwise the nmi handler
1833          * could reenable again:
1834          */
1835         clear_bit(idx, cpuc->active_mask);
1836         x86_pmu.disable(hwc, idx);
1837
1838         /*
1839          * Drain the remaining delta count out of a event
1840          * that we are disabling:
1841          */
1842         x86_perf_event_update(event, hwc, idx);
1843
1844         /* Drain the remaining BTS records. */
1845         if (unlikely(idx == X86_PMC_IDX_FIXED_BTS))
1846                 intel_pmu_drain_bts_buffer(cpuc);
1847
1848         cpuc->events[idx] = NULL;
1849 }
1850
1851 static void x86_pmu_disable(struct perf_event *event)
1852 {
1853         struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events);
1854         int i;
1855
1856         __x86_pmu_disable(event, cpuc);
1857
1858         for (i = 0; i < cpuc->n_events; i++) {
1859                 if (event == cpuc->event_list[i]) {
1860
1861                         if (x86_pmu.put_event_constraints)
1862                                 x86_pmu.put_event_constraints(cpuc, event);
1863
1864                         while (++i < cpuc->n_events)
1865                                 cpuc->event_list[i-1] = cpuc->event_list[i];
1866
1867                         --cpuc->n_events;
1868                         break;
1869                 }
1870         }
1871         perf_event_update_userpage(event);
1872 }
1873
1874 /*
1875  * Save and restart an expired event. Called by NMI contexts,
1876  * so it has to be careful about preempting normal event ops:
1877  */
1878 static int intel_pmu_save_and_restart(struct perf_event *event)
1879 {
1880         struct hw_perf_event *hwc = &event->hw;
1881         int idx = hwc->idx;
1882         int ret;
1883
1884         x86_perf_event_update(event, hwc, idx);
1885         ret = x86_perf_event_set_period(event, hwc, idx);
1886
1887         if (event->state == PERF_EVENT_STATE_ACTIVE)
1888                 intel_pmu_enable_event(hwc, idx);
1889
1890         return ret;
1891 }
1892
1893 static void intel_pmu_reset(void)
1894 {
1895         struct debug_store *ds = __get_cpu_var(cpu_hw_events).ds;
1896         unsigned long flags;
1897         int idx;
1898
1899         if (!x86_pmu.num_events)
1900                 return;
1901
1902         local_irq_save(flags);
1903
1904         printk("clearing PMU state on CPU#%d\n", smp_processor_id());
1905
1906         for (idx = 0; idx < x86_pmu.num_events; idx++) {
1907                 checking_wrmsrl(x86_pmu.eventsel + idx, 0ull);
1908                 checking_wrmsrl(x86_pmu.perfctr  + idx, 0ull);
1909         }
1910         for (idx = 0; idx < x86_pmu.num_events_fixed; idx++) {
1911                 checking_wrmsrl(MSR_ARCH_PERFMON_FIXED_CTR0 + idx, 0ull);
1912         }
1913         if (ds)
1914                 ds->bts_index = ds->bts_buffer_base;
1915
1916         local_irq_restore(flags);
1917 }
1918
1919 static int p6_pmu_handle_irq(struct pt_regs *regs)
1920 {
1921         struct perf_sample_data data;
1922         struct cpu_hw_events *cpuc;
1923         struct perf_event *event;
1924         struct hw_perf_event *hwc;
1925         int idx, handled = 0;
1926         u64 val;
1927
1928         data.addr = 0;
1929         data.raw = NULL;
1930
1931         cpuc = &__get_cpu_var(cpu_hw_events);
1932
1933         for (idx = 0; idx < x86_pmu.num_events; idx++) {
1934                 if (!test_bit(idx, cpuc->active_mask))
1935                         continue;
1936
1937                 event = cpuc->events[idx];
1938                 hwc = &event->hw;
1939
1940                 val = x86_perf_event_update(event, hwc, idx);
1941                 if (val & (1ULL << (x86_pmu.event_bits - 1)))
1942                         continue;
1943
1944                 /*
1945                  * event overflow
1946                  */
1947                 handled         = 1;
1948                 data.period     = event->hw.last_period;
1949
1950                 if (!x86_perf_event_set_period(event, hwc, idx))
1951                         continue;
1952
1953                 if (perf_event_overflow(event, 1, &data, regs))
1954                         p6_pmu_disable_event(hwc, idx);
1955         }
1956
1957         if (handled)
1958                 inc_irq_stat(apic_perf_irqs);
1959
1960         return handled;
1961 }
1962
1963 /*
1964  * This handler is triggered by the local APIC, so the APIC IRQ handling
1965  * rules apply:
1966  */
1967 static int intel_pmu_handle_irq(struct pt_regs *regs)
1968 {
1969         struct perf_sample_data data;
1970         struct cpu_hw_events *cpuc;
1971         int bit, loops;
1972         u64 ack, status;
1973
1974         data.addr = 0;
1975         data.raw = NULL;
1976
1977         cpuc = &__get_cpu_var(cpu_hw_events);
1978
1979         perf_disable();
1980         intel_pmu_drain_bts_buffer(cpuc);
1981         status = intel_pmu_get_status();
1982         if (!status) {
1983                 perf_enable();
1984                 return 0;
1985         }
1986
1987         loops = 0;
1988 again:
1989         if (++loops > 100) {
1990                 WARN_ONCE(1, "perfevents: irq loop stuck!\n");
1991                 perf_event_print_debug();
1992                 intel_pmu_reset();
1993                 perf_enable();
1994                 return 1;
1995         }
1996
1997         inc_irq_stat(apic_perf_irqs);
1998         ack = status;
1999         for_each_bit(bit, (unsigned long *)&status, X86_PMC_IDX_MAX) {
2000                 struct perf_event *event = cpuc->events[bit];
2001
2002                 clear_bit(bit, (unsigned long *) &status);
2003                 if (!test_bit(bit, cpuc->active_mask))
2004                         continue;
2005
2006                 if (!intel_pmu_save_and_restart(event))
2007                         continue;
2008
2009                 data.period = event->hw.last_period;
2010
2011                 if (perf_event_overflow(event, 1, &data, regs))
2012                         intel_pmu_disable_event(&event->hw, bit);
2013         }
2014
2015         intel_pmu_ack_status(ack);
2016
2017         /*
2018          * Repeat if there is more work to be done:
2019          */
2020         status = intel_pmu_get_status();
2021         if (status)
2022                 goto again;
2023
2024         perf_enable();
2025
2026         return 1;
2027 }
2028
2029 static int amd_pmu_handle_irq(struct pt_regs *regs)
2030 {
2031         struct perf_sample_data data;
2032         struct cpu_hw_events *cpuc;
2033         struct perf_event *event;
2034         struct hw_perf_event *hwc;
2035         int idx, handled = 0;
2036         u64 val;
2037
2038         data.addr = 0;
2039         data.raw = NULL;
2040
2041         cpuc = &__get_cpu_var(cpu_hw_events);
2042
2043         for (idx = 0; idx < x86_pmu.num_events; idx++) {
2044                 if (!test_bit(idx, cpuc->active_mask))
2045                         continue;
2046
2047                 event = cpuc->events[idx];
2048                 hwc = &event->hw;
2049
2050                 val = x86_perf_event_update(event, hwc, idx);
2051                 if (val & (1ULL << (x86_pmu.event_bits - 1)))
2052                         continue;
2053
2054                 /*
2055                  * event overflow
2056                  */
2057                 handled         = 1;
2058                 data.period     = event->hw.last_period;
2059
2060                 if (!x86_perf_event_set_period(event, hwc, idx))
2061                         continue;
2062
2063                 if (perf_event_overflow(event, 1, &data, regs))
2064                         amd_pmu_disable_event(hwc, idx);
2065         }
2066
2067         if (handled)
2068                 inc_irq_stat(apic_perf_irqs);
2069
2070         return handled;
2071 }
2072
2073 void smp_perf_pending_interrupt(struct pt_regs *regs)
2074 {
2075         irq_enter();
2076         ack_APIC_irq();
2077         inc_irq_stat(apic_pending_irqs);
2078         perf_event_do_pending();
2079         irq_exit();
2080 }
2081
2082 void set_perf_event_pending(void)
2083 {
2084 #ifdef CONFIG_X86_LOCAL_APIC
2085         if (!x86_pmu.apic || !x86_pmu_initialized())
2086                 return;
2087
2088         apic->send_IPI_self(LOCAL_PENDING_VECTOR);
2089 #endif
2090 }
2091
2092 void perf_events_lapic_init(void)
2093 {
2094 #ifdef CONFIG_X86_LOCAL_APIC
2095         if (!x86_pmu.apic || !x86_pmu_initialized())
2096                 return;
2097
2098         /*
2099          * Always use NMI for PMU
2100          */
2101         apic_write(APIC_LVTPC, APIC_DM_NMI);
2102 #endif
2103 }
2104
2105 static int __kprobes
2106 perf_event_nmi_handler(struct notifier_block *self,
2107                          unsigned long cmd, void *__args)
2108 {
2109         struct die_args *args = __args;
2110         struct pt_regs *regs;
2111
2112         if (!atomic_read(&active_events))
2113                 return NOTIFY_DONE;
2114
2115         switch (cmd) {
2116         case DIE_NMI:
2117         case DIE_NMI_IPI:
2118                 break;
2119
2120         default:
2121                 return NOTIFY_DONE;
2122         }
2123
2124         regs = args->regs;
2125
2126 #ifdef CONFIG_X86_LOCAL_APIC
2127         apic_write(APIC_LVTPC, APIC_DM_NMI);
2128 #endif
2129         /*
2130          * Can't rely on the handled return value to say it was our NMI, two
2131          * events could trigger 'simultaneously' raising two back-to-back NMIs.
2132          *
2133          * If the first NMI handles both, the latter will be empty and daze
2134          * the CPU.
2135          */
2136         x86_pmu.handle_irq(regs);
2137
2138         return NOTIFY_STOP;
2139 }
2140
2141 static struct event_constraint unconstrained;
2142
2143 static struct event_constraint bts_constraint =
2144         EVENT_CONSTRAINT(0, 1ULL << X86_PMC_IDX_FIXED_BTS, 0);
2145
2146 static struct event_constraint *
2147 intel_special_constraints(struct perf_event *event)
2148 {
2149         unsigned int hw_event;
2150
2151         hw_event = event->hw.config & INTEL_ARCH_EVENT_MASK;
2152
2153         if (unlikely((hw_event ==
2154                       x86_pmu.event_map(PERF_COUNT_HW_BRANCH_INSTRUCTIONS)) &&
2155                      (event->hw.sample_period == 1))) {
2156
2157                 return &bts_constraint;
2158         }
2159         return NULL;
2160 }
2161
2162 static struct event_constraint *
2163 intel_get_event_constraints(struct cpu_hw_events *cpuc, struct perf_event *event)
2164 {
2165         struct event_constraint *c;
2166
2167         c = intel_special_constraints(event);
2168         if (c)
2169                 return c;
2170
2171         if (x86_pmu.event_constraints) {
2172                 for_each_event_constraint(c, x86_pmu.event_constraints) {
2173                         if ((event->hw.config & c->cmask) == c->code)
2174                                 return c;
2175                 }
2176         }
2177
2178         return &unconstrained;
2179 }
2180
2181 static struct event_constraint *
2182 amd_get_event_constraints(struct cpu_hw_events *cpuc, struct perf_event *event)
2183 {
2184         return &unconstrained;
2185 }
2186
2187 static int x86_event_sched_in(struct perf_event *event,
2188                           struct perf_cpu_context *cpuctx, int cpu)
2189 {
2190         int ret = 0;
2191
2192         event->state = PERF_EVENT_STATE_ACTIVE;
2193         event->oncpu = cpu;
2194         event->tstamp_running += event->ctx->time - event->tstamp_stopped;
2195
2196         if (!is_x86_event(event))
2197                 ret = event->pmu->enable(event);
2198
2199         if (!ret && !is_software_event(event))
2200                 cpuctx->active_oncpu++;
2201
2202         if (!ret && event->attr.exclusive)
2203                 cpuctx->exclusive = 1;
2204
2205         return ret;
2206 }
2207
2208 static void x86_event_sched_out(struct perf_event *event,
2209                             struct perf_cpu_context *cpuctx, int cpu)
2210 {
2211         event->state = PERF_EVENT_STATE_INACTIVE;
2212         event->oncpu = -1;
2213
2214         if (!is_x86_event(event))
2215                 event->pmu->disable(event);
2216
2217         event->tstamp_running -= event->ctx->time - event->tstamp_stopped;
2218
2219         if (!is_software_event(event))
2220                 cpuctx->active_oncpu--;
2221
2222         if (event->attr.exclusive || !cpuctx->active_oncpu)
2223                 cpuctx->exclusive = 0;
2224 }
2225
2226 /*
2227  * Called to enable a whole group of events.
2228  * Returns 1 if the group was enabled, or -EAGAIN if it could not be.
2229  * Assumes the caller has disabled interrupts and has
2230  * frozen the PMU with hw_perf_save_disable.
2231  *
2232  * called with PMU disabled. If successful and return value 1,
2233  * then guaranteed to call perf_enable() and hw_perf_enable()
2234  */
2235 int hw_perf_group_sched_in(struct perf_event *leader,
2236                struct perf_cpu_context *cpuctx,
2237                struct perf_event_context *ctx, int cpu)
2238 {
2239         struct cpu_hw_events *cpuc = &per_cpu(cpu_hw_events, cpu);
2240         struct perf_event *sub;
2241         int assign[X86_PMC_IDX_MAX];
2242         int n0, n1, ret;
2243
2244         /* n0 = total number of events */
2245         n0 = collect_events(cpuc, leader, true);
2246         if (n0 < 0)
2247                 return n0;
2248
2249         ret = x86_schedule_events(cpuc, n0, assign);
2250         if (ret)
2251                 return ret;
2252
2253         ret = x86_event_sched_in(leader, cpuctx, cpu);
2254         if (ret)
2255                 return ret;
2256
2257         n1 = 1;
2258         list_for_each_entry(sub, &leader->sibling_list, group_entry) {
2259                 if (sub->state > PERF_EVENT_STATE_OFF) {
2260                         ret = x86_event_sched_in(sub, cpuctx, cpu);
2261                         if (ret)
2262                                 goto undo;
2263                         ++n1;
2264                 }
2265         }
2266         /*
2267          * copy new assignment, now we know it is possible
2268          * will be used by hw_perf_enable()
2269          */
2270         memcpy(cpuc->assign, assign, n0*sizeof(int));
2271
2272         cpuc->n_events  = n0;
2273         cpuc->n_added   = n1;
2274         ctx->nr_active += n1;
2275
2276         /*
2277          * 1 means successful and events are active
2278          * This is not quite true because we defer
2279          * actual activation until hw_perf_enable() but
2280          * this way we* ensure caller won't try to enable
2281          * individual events
2282          */
2283         return 1;
2284 undo:
2285         x86_event_sched_out(leader, cpuctx, cpu);
2286         n0  = 1;
2287         list_for_each_entry(sub, &leader->sibling_list, group_entry) {
2288                 if (sub->state == PERF_EVENT_STATE_ACTIVE) {
2289                         x86_event_sched_out(sub, cpuctx, cpu);
2290                         if (++n0 == n1)
2291                                 break;
2292                 }
2293         }
2294         return ret;
2295 }
2296
2297 static __read_mostly struct notifier_block perf_event_nmi_notifier = {
2298         .notifier_call          = perf_event_nmi_handler,
2299         .next                   = NULL,
2300         .priority               = 1
2301 };
2302
2303 static __initconst struct x86_pmu p6_pmu = {
2304         .name                   = "p6",
2305         .handle_irq             = p6_pmu_handle_irq,
2306         .disable_all            = p6_pmu_disable_all,
2307         .enable_all             = p6_pmu_enable_all,
2308         .enable                 = p6_pmu_enable_event,
2309         .disable                = p6_pmu_disable_event,
2310         .eventsel               = MSR_P6_EVNTSEL0,
2311         .perfctr                = MSR_P6_PERFCTR0,
2312         .event_map              = p6_pmu_event_map,
2313         .raw_event              = p6_pmu_raw_event,
2314         .max_events             = ARRAY_SIZE(p6_perfmon_event_map),
2315         .apic                   = 1,
2316         .max_period             = (1ULL << 31) - 1,
2317         .version                = 0,
2318         .num_events             = 2,
2319         /*
2320          * Events have 40 bits implemented. However they are designed such
2321          * that bits [32-39] are sign extensions of bit 31. As such the
2322          * effective width of a event for P6-like PMU is 32 bits only.
2323          *
2324          * See IA-32 Intel Architecture Software developer manual Vol 3B
2325          */
2326         .event_bits             = 32,
2327         .event_mask             = (1ULL << 32) - 1,
2328         .get_event_constraints  = intel_get_event_constraints,
2329         .event_constraints      = intel_p6_event_constraints
2330 };
2331
2332 static __initconst struct x86_pmu intel_pmu = {
2333         .name                   = "Intel",
2334         .handle_irq             = intel_pmu_handle_irq,
2335         .disable_all            = intel_pmu_disable_all,
2336         .enable_all             = intel_pmu_enable_all,
2337         .enable                 = intel_pmu_enable_event,
2338         .disable                = intel_pmu_disable_event,
2339         .eventsel               = MSR_ARCH_PERFMON_EVENTSEL0,
2340         .perfctr                = MSR_ARCH_PERFMON_PERFCTR0,
2341         .event_map              = intel_pmu_event_map,
2342         .raw_event              = intel_pmu_raw_event,
2343         .max_events             = ARRAY_SIZE(intel_perfmon_event_map),
2344         .apic                   = 1,
2345         /*
2346          * Intel PMCs cannot be accessed sanely above 32 bit width,
2347          * so we install an artificial 1<<31 period regardless of
2348          * the generic event period:
2349          */
2350         .max_period             = (1ULL << 31) - 1,
2351         .enable_bts             = intel_pmu_enable_bts,
2352         .disable_bts            = intel_pmu_disable_bts,
2353         .get_event_constraints  = intel_get_event_constraints
2354 };
2355
2356 static __initconst struct x86_pmu amd_pmu = {
2357         .name                   = "AMD",
2358         .handle_irq             = amd_pmu_handle_irq,
2359         .disable_all            = amd_pmu_disable_all,
2360         .enable_all             = amd_pmu_enable_all,
2361         .enable                 = amd_pmu_enable_event,
2362         .disable                = amd_pmu_disable_event,
2363         .eventsel               = MSR_K7_EVNTSEL0,
2364         .perfctr                = MSR_K7_PERFCTR0,
2365         .event_map              = amd_pmu_event_map,
2366         .raw_event              = amd_pmu_raw_event,
2367         .max_events             = ARRAY_SIZE(amd_perfmon_event_map),
2368         .num_events             = 4,
2369         .event_bits             = 48,
2370         .event_mask             = (1ULL << 48) - 1,
2371         .apic                   = 1,
2372         /* use highest bit to detect overflow */
2373         .max_period             = (1ULL << 47) - 1,
2374         .get_event_constraints  = amd_get_event_constraints
2375 };
2376
2377 static __init int p6_pmu_init(void)
2378 {
2379         switch (boot_cpu_data.x86_model) {
2380         case 1:
2381         case 3:  /* Pentium Pro */
2382         case 5:
2383         case 6:  /* Pentium II */
2384         case 7:
2385         case 8:
2386         case 11: /* Pentium III */
2387         case 9:
2388         case 13:
2389                 /* Pentium M */
2390                 break;
2391         default:
2392                 pr_cont("unsupported p6 CPU model %d ",
2393                         boot_cpu_data.x86_model);
2394                 return -ENODEV;
2395         }
2396
2397         x86_pmu = p6_pmu;
2398
2399         return 0;
2400 }
2401
2402 static __init int intel_pmu_init(void)
2403 {
2404         union cpuid10_edx edx;
2405         union cpuid10_eax eax;
2406         unsigned int unused;
2407         unsigned int ebx;
2408         int version;
2409
2410         if (!cpu_has(&boot_cpu_data, X86_FEATURE_ARCH_PERFMON)) {
2411                 /* check for P6 processor family */
2412            if (boot_cpu_data.x86 == 6) {
2413                 return p6_pmu_init();
2414            } else {
2415                 return -ENODEV;
2416            }
2417         }
2418
2419         /*
2420          * Check whether the Architectural PerfMon supports
2421          * Branch Misses Retired hw_event or not.
2422          */
2423         cpuid(10, &eax.full, &ebx, &unused, &edx.full);
2424         if (eax.split.mask_length <= ARCH_PERFMON_BRANCH_MISSES_RETIRED)
2425                 return -ENODEV;
2426
2427         version = eax.split.version_id;
2428         if (version < 2)
2429                 return -ENODEV;
2430
2431         x86_pmu                         = intel_pmu;
2432         x86_pmu.version                 = version;
2433         x86_pmu.num_events              = eax.split.num_events;
2434         x86_pmu.event_bits              = eax.split.bit_width;
2435         x86_pmu.event_mask              = (1ULL << eax.split.bit_width) - 1;
2436
2437         /*
2438          * Quirk: v2 perfmon does not report fixed-purpose events, so
2439          * assume at least 3 events:
2440          */
2441         x86_pmu.num_events_fixed        = max((int)edx.split.num_events_fixed, 3);
2442
2443         /*
2444          * Install the hw-cache-events table:
2445          */
2446         switch (boot_cpu_data.x86_model) {
2447         case 15: /* original 65 nm celeron/pentium/core2/xeon, "Merom"/"Conroe" */
2448         case 22: /* single-core 65 nm celeron/core2solo "Merom-L"/"Conroe-L" */
2449         case 23: /* current 45 nm celeron/core2/xeon "Penryn"/"Wolfdale" */
2450         case 29: /* six-core 45 nm xeon "Dunnington" */
2451                 memcpy(hw_cache_event_ids, core2_hw_cache_event_ids,
2452                        sizeof(hw_cache_event_ids));
2453
2454                 x86_pmu.event_constraints = intel_core_event_constraints;
2455                 pr_cont("Core2 events, ");
2456                 break;
2457         case 26:
2458                 memcpy(hw_cache_event_ids, nehalem_hw_cache_event_ids,
2459                        sizeof(hw_cache_event_ids));
2460
2461                 x86_pmu.event_constraints = intel_nehalem_event_constraints;
2462                 pr_cont("Nehalem/Corei7 events, ");
2463                 break;
2464         case 28:
2465                 memcpy(hw_cache_event_ids, atom_hw_cache_event_ids,
2466                        sizeof(hw_cache_event_ids));
2467
2468                 x86_pmu.event_constraints = intel_gen_event_constraints;
2469                 pr_cont("Atom events, ");
2470                 break;
2471         default:
2472                 /*
2473                  * default constraints for v2 and up
2474                  */
2475                 x86_pmu.event_constraints = intel_gen_event_constraints;
2476                 pr_cont("generic architected perfmon, ");
2477         }
2478         return 0;
2479 }
2480
2481 static __init int amd_pmu_init(void)
2482 {
2483         /* Performance-monitoring supported from K7 and later: */
2484         if (boot_cpu_data.x86 < 6)
2485                 return -ENODEV;
2486
2487         x86_pmu = amd_pmu;
2488
2489         /* Events are common for all AMDs */
2490         memcpy(hw_cache_event_ids, amd_hw_cache_event_ids,
2491                sizeof(hw_cache_event_ids));
2492
2493         return 0;
2494 }
2495
2496 static void __init pmu_check_apic(void)
2497 {
2498         if (cpu_has_apic)
2499                 return;
2500
2501         x86_pmu.apic = 0;
2502         pr_info("no APIC, boot with the \"lapic\" boot parameter to force-enable it.\n");
2503         pr_info("no hardware sampling interrupt available.\n");
2504 }
2505
2506 void __init init_hw_perf_events(void)
2507 {
2508         int err;
2509
2510         pr_info("Performance Events: ");
2511
2512         switch (boot_cpu_data.x86_vendor) {
2513         case X86_VENDOR_INTEL:
2514                 err = intel_pmu_init();
2515                 break;
2516         case X86_VENDOR_AMD:
2517                 err = amd_pmu_init();
2518                 break;
2519         default:
2520                 return;
2521         }
2522         if (err != 0) {
2523                 pr_cont("no PMU driver, software events only.\n");
2524                 return;
2525         }
2526
2527         pmu_check_apic();
2528
2529         pr_cont("%s PMU driver.\n", x86_pmu.name);
2530
2531         if (x86_pmu.num_events > X86_PMC_MAX_GENERIC) {
2532                 WARN(1, KERN_ERR "hw perf events %d > max(%d), clipping!",
2533                      x86_pmu.num_events, X86_PMC_MAX_GENERIC);
2534                 x86_pmu.num_events = X86_PMC_MAX_GENERIC;
2535         }
2536         perf_event_mask = (1 << x86_pmu.num_events) - 1;
2537         perf_max_events = x86_pmu.num_events;
2538
2539         if (x86_pmu.num_events_fixed > X86_PMC_MAX_FIXED) {
2540                 WARN(1, KERN_ERR "hw perf events fixed %d > max(%d), clipping!",
2541                      x86_pmu.num_events_fixed, X86_PMC_MAX_FIXED);
2542                 x86_pmu.num_events_fixed = X86_PMC_MAX_FIXED;
2543         }
2544
2545         perf_event_mask |=
2546                 ((1LL << x86_pmu.num_events_fixed)-1) << X86_PMC_IDX_FIXED;
2547         x86_pmu.intel_ctrl = perf_event_mask;
2548
2549         perf_events_lapic_init();
2550         register_die_notifier(&perf_event_nmi_notifier);
2551
2552         unconstrained = (struct event_constraint)
2553                 EVENT_CONSTRAINT(0, (1ULL << x86_pmu.num_events) - 1, 0);
2554
2555         pr_info("... version:                %d\n",     x86_pmu.version);
2556         pr_info("... bit width:              %d\n",     x86_pmu.event_bits);
2557         pr_info("... generic registers:      %d\n",     x86_pmu.num_events);
2558         pr_info("... value mask:             %016Lx\n", x86_pmu.event_mask);
2559         pr_info("... max period:             %016Lx\n", x86_pmu.max_period);
2560         pr_info("... fixed-purpose events:   %d\n",     x86_pmu.num_events_fixed);
2561         pr_info("... event mask:             %016Lx\n", perf_event_mask);
2562 }
2563
2564 static inline void x86_pmu_read(struct perf_event *event)
2565 {
2566         x86_perf_event_update(event, &event->hw, event->hw.idx);
2567 }
2568
2569 static const struct pmu pmu = {
2570         .enable         = x86_pmu_enable,
2571         .disable        = x86_pmu_disable,
2572         .read           = x86_pmu_read,
2573         .unthrottle     = x86_pmu_unthrottle,
2574 };
2575
2576 /*
2577  * validate a single event group
2578  *
2579  * validation include:
2580  *      - check events are compatible which each other
2581  *      - events do not compete for the same counter
2582  *      - number of events <= number of counters
2583  *
2584  * validation ensures the group can be loaded onto the
2585  * PMU if it was the only group available.
2586  */
2587 static int validate_group(struct perf_event *event)
2588 {
2589         struct perf_event *leader = event->group_leader;
2590         struct cpu_hw_events *fake_cpuc;
2591         int ret, n;
2592
2593         ret = -ENOMEM;
2594         fake_cpuc = kmalloc(sizeof(*fake_cpuc), GFP_KERNEL | __GFP_ZERO);
2595         if (!fake_cpuc)
2596                 goto out;
2597
2598         /*
2599          * the event is not yet connected with its
2600          * siblings therefore we must first collect
2601          * existing siblings, then add the new event
2602          * before we can simulate the scheduling
2603          */
2604         ret = -ENOSPC;
2605         n = collect_events(fake_cpuc, leader, true);
2606         if (n < 0)
2607                 goto out_free;
2608
2609         fake_cpuc->n_events = n;
2610         n = collect_events(fake_cpuc, event, false);
2611         if (n < 0)
2612                 goto out_free;
2613
2614         fake_cpuc->n_events = n;
2615
2616         ret = x86_schedule_events(fake_cpuc, n, NULL);
2617
2618 out_free:
2619         kfree(fake_cpuc);
2620 out:
2621         return ret;
2622 }
2623
2624 const struct pmu *hw_perf_event_init(struct perf_event *event)
2625 {
2626         const struct pmu *tmp;
2627         int err;
2628
2629         err = __hw_perf_event_init(event);
2630         if (!err) {
2631                 /*
2632                  * we temporarily connect event to its pmu
2633                  * such that validate_group() can classify
2634                  * it as an x86 event using is_x86_event()
2635                  */
2636                 tmp = event->pmu;
2637                 event->pmu = &pmu;
2638
2639                 if (event->group_leader != event)
2640                         err = validate_group(event);
2641
2642                 event->pmu = tmp;
2643         }
2644         if (err) {
2645                 if (event->destroy)
2646                         event->destroy(event);
2647                 return ERR_PTR(err);
2648         }
2649
2650         return &pmu;
2651 }
2652
2653 /*
2654  * callchain support
2655  */
2656
2657 static inline
2658 void callchain_store(struct perf_callchain_entry *entry, u64 ip)
2659 {
2660         if (entry->nr < PERF_MAX_STACK_DEPTH)
2661                 entry->ip[entry->nr++] = ip;
2662 }
2663
2664 static DEFINE_PER_CPU(struct perf_callchain_entry, pmc_irq_entry);
2665 static DEFINE_PER_CPU(struct perf_callchain_entry, pmc_nmi_entry);
2666
2667
2668 static void
2669 backtrace_warning_symbol(void *data, char *msg, unsigned long symbol)
2670 {
2671         /* Ignore warnings */
2672 }
2673
2674 static void backtrace_warning(void *data, char *msg)
2675 {
2676         /* Ignore warnings */
2677 }
2678
2679 static int backtrace_stack(void *data, char *name)
2680 {
2681         return 0;
2682 }
2683
2684 static void backtrace_address(void *data, unsigned long addr, int reliable)
2685 {
2686         struct perf_callchain_entry *entry = data;
2687
2688         if (reliable)
2689                 callchain_store(entry, addr);
2690 }
2691
2692 static const struct stacktrace_ops backtrace_ops = {
2693         .warning                = backtrace_warning,
2694         .warning_symbol         = backtrace_warning_symbol,
2695         .stack                  = backtrace_stack,
2696         .address                = backtrace_address,
2697         .walk_stack             = print_context_stack_bp,
2698 };
2699
2700 #include "../dumpstack.h"
2701
2702 static void
2703 perf_callchain_kernel(struct pt_regs *regs, struct perf_callchain_entry *entry)
2704 {
2705         callchain_store(entry, PERF_CONTEXT_KERNEL);
2706         callchain_store(entry, regs->ip);
2707
2708         dump_trace(NULL, regs, NULL, regs->bp, &backtrace_ops, entry);
2709 }
2710
2711 /*
2712  * best effort, GUP based copy_from_user() that assumes IRQ or NMI context
2713  */
2714 static unsigned long
2715 copy_from_user_nmi(void *to, const void __user *from, unsigned long n)
2716 {
2717         unsigned long offset, addr = (unsigned long)from;
2718         int type = in_nmi() ? KM_NMI : KM_IRQ0;
2719         unsigned long size, len = 0;
2720         struct page *page;
2721         void *map;
2722         int ret;
2723
2724         do {
2725                 ret = __get_user_pages_fast(addr, 1, 0, &page);
2726                 if (!ret)
2727                         break;
2728
2729                 offset = addr & (PAGE_SIZE - 1);
2730                 size = min(PAGE_SIZE - offset, n - len);
2731
2732                 map = kmap_atomic(page, type);
2733                 memcpy(to, map+offset, size);
2734                 kunmap_atomic(map, type);
2735                 put_page(page);
2736
2737                 len  += size;
2738                 to   += size;
2739                 addr += size;
2740
2741         } while (len < n);
2742
2743         return len;
2744 }
2745
2746 static int copy_stack_frame(const void __user *fp, struct stack_frame *frame)
2747 {
2748         unsigned long bytes;
2749
2750         bytes = copy_from_user_nmi(frame, fp, sizeof(*frame));
2751
2752         return bytes == sizeof(*frame);
2753 }
2754
2755 static void
2756 perf_callchain_user(struct pt_regs *regs, struct perf_callchain_entry *entry)
2757 {
2758         struct stack_frame frame;
2759         const void __user *fp;
2760
2761         if (!user_mode(regs))
2762                 regs = task_pt_regs(current);
2763
2764         fp = (void __user *)regs->bp;
2765
2766         callchain_store(entry, PERF_CONTEXT_USER);
2767         callchain_store(entry, regs->ip);
2768
2769         while (entry->nr < PERF_MAX_STACK_DEPTH) {
2770                 frame.next_frame             = NULL;
2771                 frame.return_address = 0;
2772
2773                 if (!copy_stack_frame(fp, &frame))
2774                         break;
2775
2776                 if ((unsigned long)fp < regs->sp)
2777                         break;
2778
2779                 callchain_store(entry, frame.return_address);
2780                 fp = frame.next_frame;
2781         }
2782 }
2783
2784 static void
2785 perf_do_callchain(struct pt_regs *regs, struct perf_callchain_entry *entry)
2786 {
2787         int is_user;
2788
2789         if (!regs)
2790                 return;
2791
2792         is_user = user_mode(regs);
2793
2794         if (is_user && current->state != TASK_RUNNING)
2795                 return;
2796
2797         if (!is_user)
2798                 perf_callchain_kernel(regs, entry);
2799
2800         if (current->mm)
2801                 perf_callchain_user(regs, entry);
2802 }
2803
2804 struct perf_callchain_entry *perf_callchain(struct pt_regs *regs)
2805 {
2806         struct perf_callchain_entry *entry;
2807
2808         if (in_nmi())
2809                 entry = &__get_cpu_var(pmc_nmi_entry);
2810         else
2811                 entry = &__get_cpu_var(pmc_irq_entry);
2812
2813         entry->nr = 0;
2814
2815         perf_do_callchain(regs, entry);
2816
2817         return entry;
2818 }
2819
2820 void hw_perf_event_setup_online(int cpu)
2821 {
2822         init_debug_store_on_cpu(cpu);
2823 }