perf, x86: Add Nehalem programming quirk to Westmere
[safe/jmp/linux-2.6] / arch / x86 / kernel / cpu / perf_event_intel.c
1 #ifdef CONFIG_CPU_SUP_INTEL
2
3 /*
4  * Intel PerfMon, used on Core and later.
5  */
6 static const u64 intel_perfmon_event_map[] =
7 {
8   [PERF_COUNT_HW_CPU_CYCLES]            = 0x003c,
9   [PERF_COUNT_HW_INSTRUCTIONS]          = 0x00c0,
10   [PERF_COUNT_HW_CACHE_REFERENCES]      = 0x4f2e,
11   [PERF_COUNT_HW_CACHE_MISSES]          = 0x412e,
12   [PERF_COUNT_HW_BRANCH_INSTRUCTIONS]   = 0x00c4,
13   [PERF_COUNT_HW_BRANCH_MISSES]         = 0x00c5,
14   [PERF_COUNT_HW_BUS_CYCLES]            = 0x013c,
15 };
16
17 static struct event_constraint intel_core_event_constraints[] =
18 {
19         INTEL_EVENT_CONSTRAINT(0x11, 0x2), /* FP_ASSIST */
20         INTEL_EVENT_CONSTRAINT(0x12, 0x2), /* MUL */
21         INTEL_EVENT_CONSTRAINT(0x13, 0x2), /* DIV */
22         INTEL_EVENT_CONSTRAINT(0x14, 0x1), /* CYCLES_DIV_BUSY */
23         INTEL_EVENT_CONSTRAINT(0x19, 0x2), /* DELAYED_BYPASS */
24         INTEL_EVENT_CONSTRAINT(0xc1, 0x1), /* FP_COMP_INSTR_RET */
25         EVENT_CONSTRAINT_END
26 };
27
28 static struct event_constraint intel_core2_event_constraints[] =
29 {
30         FIXED_EVENT_CONSTRAINT(0x00c0, 0), /* INST_RETIRED.ANY */
31         FIXED_EVENT_CONSTRAINT(0x003c, 1), /* CPU_CLK_UNHALTED.CORE */
32         /*
33          * Core2 has Fixed Counter 2 listed as CPU_CLK_UNHALTED.REF and event
34          * 0x013c as CPU_CLK_UNHALTED.BUS and specifies there is a fixed
35          * ratio between these counters.
36          */
37         /* FIXED_EVENT_CONSTRAINT(0x013c, 2),  CPU_CLK_UNHALTED.REF */
38         INTEL_EVENT_CONSTRAINT(0x10, 0x1), /* FP_COMP_OPS_EXE */
39         INTEL_EVENT_CONSTRAINT(0x11, 0x2), /* FP_ASSIST */
40         INTEL_EVENT_CONSTRAINT(0x12, 0x2), /* MUL */
41         INTEL_EVENT_CONSTRAINT(0x13, 0x2), /* DIV */
42         INTEL_EVENT_CONSTRAINT(0x14, 0x1), /* CYCLES_DIV_BUSY */
43         INTEL_EVENT_CONSTRAINT(0x18, 0x1), /* IDLE_DURING_DIV */
44         INTEL_EVENT_CONSTRAINT(0x19, 0x2), /* DELAYED_BYPASS */
45         INTEL_EVENT_CONSTRAINT(0xa1, 0x1), /* RS_UOPS_DISPATCH_CYCLES */
46         INTEL_EVENT_CONSTRAINT(0xc9, 0x1), /* ITLB_MISS_RETIRED (T30-9) */
47         INTEL_EVENT_CONSTRAINT(0xcb, 0x1), /* MEM_LOAD_RETIRED */
48         EVENT_CONSTRAINT_END
49 };
50
51 static struct event_constraint intel_nehalem_event_constraints[] =
52 {
53         FIXED_EVENT_CONSTRAINT(0x00c0, 0), /* INST_RETIRED.ANY */
54         FIXED_EVENT_CONSTRAINT(0x003c, 1), /* CPU_CLK_UNHALTED.CORE */
55         /* FIXED_EVENT_CONSTRAINT(0x013c, 2), CPU_CLK_UNHALTED.REF */
56         INTEL_EVENT_CONSTRAINT(0x40, 0x3), /* L1D_CACHE_LD */
57         INTEL_EVENT_CONSTRAINT(0x41, 0x3), /* L1D_CACHE_ST */
58         INTEL_EVENT_CONSTRAINT(0x42, 0x3), /* L1D_CACHE_LOCK */
59         INTEL_EVENT_CONSTRAINT(0x43, 0x3), /* L1D_ALL_REF */
60         INTEL_EVENT_CONSTRAINT(0x48, 0x3), /* L1D_PEND_MISS */
61         INTEL_EVENT_CONSTRAINT(0x4e, 0x3), /* L1D_PREFETCH */
62         INTEL_EVENT_CONSTRAINT(0x51, 0x3), /* L1D */
63         INTEL_EVENT_CONSTRAINT(0x63, 0x3), /* CACHE_LOCK_CYCLES */
64         EVENT_CONSTRAINT_END
65 };
66
67 static struct event_constraint intel_westmere_event_constraints[] =
68 {
69         FIXED_EVENT_CONSTRAINT(0x00c0, 0), /* INST_RETIRED.ANY */
70         FIXED_EVENT_CONSTRAINT(0x003c, 1), /* CPU_CLK_UNHALTED.CORE */
71         /* FIXED_EVENT_CONSTRAINT(0x013c, 2), CPU_CLK_UNHALTED.REF */
72         INTEL_EVENT_CONSTRAINT(0x51, 0x3), /* L1D */
73         INTEL_EVENT_CONSTRAINT(0x60, 0x1), /* OFFCORE_REQUESTS_OUTSTANDING */
74         INTEL_EVENT_CONSTRAINT(0x63, 0x3), /* CACHE_LOCK_CYCLES */
75         EVENT_CONSTRAINT_END
76 };
77
78 static struct event_constraint intel_gen_event_constraints[] =
79 {
80         FIXED_EVENT_CONSTRAINT(0x00c0, 0), /* INST_RETIRED.ANY */
81         FIXED_EVENT_CONSTRAINT(0x003c, 1), /* CPU_CLK_UNHALTED.CORE */
82         /* FIXED_EVENT_CONSTRAINT(0x013c, 2), CPU_CLK_UNHALTED.REF */
83         EVENT_CONSTRAINT_END
84 };
85
86 static u64 intel_pmu_event_map(int hw_event)
87 {
88         return intel_perfmon_event_map[hw_event];
89 }
90
91 static __initconst const u64 westmere_hw_cache_event_ids
92                                 [PERF_COUNT_HW_CACHE_MAX]
93                                 [PERF_COUNT_HW_CACHE_OP_MAX]
94                                 [PERF_COUNT_HW_CACHE_RESULT_MAX] =
95 {
96  [ C(L1D) ] = {
97         [ C(OP_READ) ] = {
98                 [ C(RESULT_ACCESS) ] = 0x010b, /* MEM_INST_RETIRED.LOADS       */
99                 [ C(RESULT_MISS)   ] = 0x0151, /* L1D.REPL                     */
100         },
101         [ C(OP_WRITE) ] = {
102                 [ C(RESULT_ACCESS) ] = 0x020b, /* MEM_INST_RETURED.STORES      */
103                 [ C(RESULT_MISS)   ] = 0x0251, /* L1D.M_REPL                   */
104         },
105         [ C(OP_PREFETCH) ] = {
106                 [ C(RESULT_ACCESS) ] = 0x014e, /* L1D_PREFETCH.REQUESTS        */
107                 [ C(RESULT_MISS)   ] = 0x024e, /* L1D_PREFETCH.MISS            */
108         },
109  },
110  [ C(L1I ) ] = {
111         [ C(OP_READ) ] = {
112                 [ C(RESULT_ACCESS) ] = 0x0380, /* L1I.READS                    */
113                 [ C(RESULT_MISS)   ] = 0x0280, /* L1I.MISSES                   */
114         },
115         [ C(OP_WRITE) ] = {
116                 [ C(RESULT_ACCESS) ] = -1,
117                 [ C(RESULT_MISS)   ] = -1,
118         },
119         [ C(OP_PREFETCH) ] = {
120                 [ C(RESULT_ACCESS) ] = 0x0,
121                 [ C(RESULT_MISS)   ] = 0x0,
122         },
123  },
124  [ C(LL  ) ] = {
125         [ C(OP_READ) ] = {
126                 [ C(RESULT_ACCESS) ] = 0x0324, /* L2_RQSTS.LOADS               */
127                 [ C(RESULT_MISS)   ] = 0x0224, /* L2_RQSTS.LD_MISS             */
128         },
129         [ C(OP_WRITE) ] = {
130                 [ C(RESULT_ACCESS) ] = 0x0c24, /* L2_RQSTS.RFOS                */
131                 [ C(RESULT_MISS)   ] = 0x0824, /* L2_RQSTS.RFO_MISS            */
132         },
133         [ C(OP_PREFETCH) ] = {
134                 [ C(RESULT_ACCESS) ] = 0x4f2e, /* LLC Reference                */
135                 [ C(RESULT_MISS)   ] = 0x412e, /* LLC Misses                   */
136         },
137  },
138  [ C(DTLB) ] = {
139         [ C(OP_READ) ] = {
140                 [ C(RESULT_ACCESS) ] = 0x010b, /* MEM_INST_RETIRED.LOADS       */
141                 [ C(RESULT_MISS)   ] = 0x0108, /* DTLB_LOAD_MISSES.ANY         */
142         },
143         [ C(OP_WRITE) ] = {
144                 [ C(RESULT_ACCESS) ] = 0x020b, /* MEM_INST_RETURED.STORES      */
145                 [ C(RESULT_MISS)   ] = 0x010c, /* MEM_STORE_RETIRED.DTLB_MISS  */
146         },
147         [ C(OP_PREFETCH) ] = {
148                 [ C(RESULT_ACCESS) ] = 0x0,
149                 [ C(RESULT_MISS)   ] = 0x0,
150         },
151  },
152  [ C(ITLB) ] = {
153         [ C(OP_READ) ] = {
154                 [ C(RESULT_ACCESS) ] = 0x01c0, /* INST_RETIRED.ANY_P           */
155                 [ C(RESULT_MISS)   ] = 0x0185, /* ITLB_MISSES.ANY              */
156         },
157         [ C(OP_WRITE) ] = {
158                 [ C(RESULT_ACCESS) ] = -1,
159                 [ C(RESULT_MISS)   ] = -1,
160         },
161         [ C(OP_PREFETCH) ] = {
162                 [ C(RESULT_ACCESS) ] = -1,
163                 [ C(RESULT_MISS)   ] = -1,
164         },
165  },
166  [ C(BPU ) ] = {
167         [ C(OP_READ) ] = {
168                 [ C(RESULT_ACCESS) ] = 0x00c4, /* BR_INST_RETIRED.ALL_BRANCHES */
169                 [ C(RESULT_MISS)   ] = 0x03e8, /* BPU_CLEARS.ANY               */
170         },
171         [ C(OP_WRITE) ] = {
172                 [ C(RESULT_ACCESS) ] = -1,
173                 [ C(RESULT_MISS)   ] = -1,
174         },
175         [ C(OP_PREFETCH) ] = {
176                 [ C(RESULT_ACCESS) ] = -1,
177                 [ C(RESULT_MISS)   ] = -1,
178         },
179  },
180 };
181
182 static __initconst const u64 nehalem_hw_cache_event_ids
183                                 [PERF_COUNT_HW_CACHE_MAX]
184                                 [PERF_COUNT_HW_CACHE_OP_MAX]
185                                 [PERF_COUNT_HW_CACHE_RESULT_MAX] =
186 {
187  [ C(L1D) ] = {
188         [ C(OP_READ) ] = {
189                 [ C(RESULT_ACCESS) ] = 0x0f40, /* L1D_CACHE_LD.MESI            */
190                 [ C(RESULT_MISS)   ] = 0x0140, /* L1D_CACHE_LD.I_STATE         */
191         },
192         [ C(OP_WRITE) ] = {
193                 [ C(RESULT_ACCESS) ] = 0x0f41, /* L1D_CACHE_ST.MESI            */
194                 [ C(RESULT_MISS)   ] = 0x0141, /* L1D_CACHE_ST.I_STATE         */
195         },
196         [ C(OP_PREFETCH) ] = {
197                 [ C(RESULT_ACCESS) ] = 0x014e, /* L1D_PREFETCH.REQUESTS        */
198                 [ C(RESULT_MISS)   ] = 0x024e, /* L1D_PREFETCH.MISS            */
199         },
200  },
201  [ C(L1I ) ] = {
202         [ C(OP_READ) ] = {
203                 [ C(RESULT_ACCESS) ] = 0x0380, /* L1I.READS                    */
204                 [ C(RESULT_MISS)   ] = 0x0280, /* L1I.MISSES                   */
205         },
206         [ C(OP_WRITE) ] = {
207                 [ C(RESULT_ACCESS) ] = -1,
208                 [ C(RESULT_MISS)   ] = -1,
209         },
210         [ C(OP_PREFETCH) ] = {
211                 [ C(RESULT_ACCESS) ] = 0x0,
212                 [ C(RESULT_MISS)   ] = 0x0,
213         },
214  },
215  [ C(LL  ) ] = {
216         [ C(OP_READ) ] = {
217                 [ C(RESULT_ACCESS) ] = 0x0324, /* L2_RQSTS.LOADS               */
218                 [ C(RESULT_MISS)   ] = 0x0224, /* L2_RQSTS.LD_MISS             */
219         },
220         [ C(OP_WRITE) ] = {
221                 [ C(RESULT_ACCESS) ] = 0x0c24, /* L2_RQSTS.RFOS                */
222                 [ C(RESULT_MISS)   ] = 0x0824, /* L2_RQSTS.RFO_MISS            */
223         },
224         [ C(OP_PREFETCH) ] = {
225                 [ C(RESULT_ACCESS) ] = 0x4f2e, /* LLC Reference                */
226                 [ C(RESULT_MISS)   ] = 0x412e, /* LLC Misses                   */
227         },
228  },
229  [ C(DTLB) ] = {
230         [ C(OP_READ) ] = {
231                 [ C(RESULT_ACCESS) ] = 0x0f40, /* L1D_CACHE_LD.MESI   (alias)  */
232                 [ C(RESULT_MISS)   ] = 0x0108, /* DTLB_LOAD_MISSES.ANY         */
233         },
234         [ C(OP_WRITE) ] = {
235                 [ C(RESULT_ACCESS) ] = 0x0f41, /* L1D_CACHE_ST.MESI   (alias)  */
236                 [ C(RESULT_MISS)   ] = 0x010c, /* MEM_STORE_RETIRED.DTLB_MISS  */
237         },
238         [ C(OP_PREFETCH) ] = {
239                 [ C(RESULT_ACCESS) ] = 0x0,
240                 [ C(RESULT_MISS)   ] = 0x0,
241         },
242  },
243  [ C(ITLB) ] = {
244         [ C(OP_READ) ] = {
245                 [ C(RESULT_ACCESS) ] = 0x01c0, /* INST_RETIRED.ANY_P           */
246                 [ C(RESULT_MISS)   ] = 0x20c8, /* ITLB_MISS_RETIRED            */
247         },
248         [ C(OP_WRITE) ] = {
249                 [ C(RESULT_ACCESS) ] = -1,
250                 [ C(RESULT_MISS)   ] = -1,
251         },
252         [ C(OP_PREFETCH) ] = {
253                 [ C(RESULT_ACCESS) ] = -1,
254                 [ C(RESULT_MISS)   ] = -1,
255         },
256  },
257  [ C(BPU ) ] = {
258         [ C(OP_READ) ] = {
259                 [ C(RESULT_ACCESS) ] = 0x00c4, /* BR_INST_RETIRED.ALL_BRANCHES */
260                 [ C(RESULT_MISS)   ] = 0x03e8, /* BPU_CLEARS.ANY               */
261         },
262         [ C(OP_WRITE) ] = {
263                 [ C(RESULT_ACCESS) ] = -1,
264                 [ C(RESULT_MISS)   ] = -1,
265         },
266         [ C(OP_PREFETCH) ] = {
267                 [ C(RESULT_ACCESS) ] = -1,
268                 [ C(RESULT_MISS)   ] = -1,
269         },
270  },
271 };
272
273 static __initconst const u64 core2_hw_cache_event_ids
274                                 [PERF_COUNT_HW_CACHE_MAX]
275                                 [PERF_COUNT_HW_CACHE_OP_MAX]
276                                 [PERF_COUNT_HW_CACHE_RESULT_MAX] =
277 {
278  [ C(L1D) ] = {
279         [ C(OP_READ) ] = {
280                 [ C(RESULT_ACCESS) ] = 0x0f40, /* L1D_CACHE_LD.MESI          */
281                 [ C(RESULT_MISS)   ] = 0x0140, /* L1D_CACHE_LD.I_STATE       */
282         },
283         [ C(OP_WRITE) ] = {
284                 [ C(RESULT_ACCESS) ] = 0x0f41, /* L1D_CACHE_ST.MESI          */
285                 [ C(RESULT_MISS)   ] = 0x0141, /* L1D_CACHE_ST.I_STATE       */
286         },
287         [ C(OP_PREFETCH) ] = {
288                 [ C(RESULT_ACCESS) ] = 0x104e, /* L1D_PREFETCH.REQUESTS      */
289                 [ C(RESULT_MISS)   ] = 0,
290         },
291  },
292  [ C(L1I ) ] = {
293         [ C(OP_READ) ] = {
294                 [ C(RESULT_ACCESS) ] = 0x0080, /* L1I.READS                  */
295                 [ C(RESULT_MISS)   ] = 0x0081, /* L1I.MISSES                 */
296         },
297         [ C(OP_WRITE) ] = {
298                 [ C(RESULT_ACCESS) ] = -1,
299                 [ C(RESULT_MISS)   ] = -1,
300         },
301         [ C(OP_PREFETCH) ] = {
302                 [ C(RESULT_ACCESS) ] = 0,
303                 [ C(RESULT_MISS)   ] = 0,
304         },
305  },
306  [ C(LL  ) ] = {
307         [ C(OP_READ) ] = {
308                 [ C(RESULT_ACCESS) ] = 0x4f29, /* L2_LD.MESI                 */
309                 [ C(RESULT_MISS)   ] = 0x4129, /* L2_LD.ISTATE               */
310         },
311         [ C(OP_WRITE) ] = {
312                 [ C(RESULT_ACCESS) ] = 0x4f2A, /* L2_ST.MESI                 */
313                 [ C(RESULT_MISS)   ] = 0x412A, /* L2_ST.ISTATE               */
314         },
315         [ C(OP_PREFETCH) ] = {
316                 [ C(RESULT_ACCESS) ] = 0,
317                 [ C(RESULT_MISS)   ] = 0,
318         },
319  },
320  [ C(DTLB) ] = {
321         [ C(OP_READ) ] = {
322                 [ C(RESULT_ACCESS) ] = 0x0f40, /* L1D_CACHE_LD.MESI  (alias) */
323                 [ C(RESULT_MISS)   ] = 0x0208, /* DTLB_MISSES.MISS_LD        */
324         },
325         [ C(OP_WRITE) ] = {
326                 [ C(RESULT_ACCESS) ] = 0x0f41, /* L1D_CACHE_ST.MESI  (alias) */
327                 [ C(RESULT_MISS)   ] = 0x0808, /* DTLB_MISSES.MISS_ST        */
328         },
329         [ C(OP_PREFETCH) ] = {
330                 [ C(RESULT_ACCESS) ] = 0,
331                 [ C(RESULT_MISS)   ] = 0,
332         },
333  },
334  [ C(ITLB) ] = {
335         [ C(OP_READ) ] = {
336                 [ C(RESULT_ACCESS) ] = 0x00c0, /* INST_RETIRED.ANY_P         */
337                 [ C(RESULT_MISS)   ] = 0x1282, /* ITLBMISSES                 */
338         },
339         [ C(OP_WRITE) ] = {
340                 [ C(RESULT_ACCESS) ] = -1,
341                 [ C(RESULT_MISS)   ] = -1,
342         },
343         [ C(OP_PREFETCH) ] = {
344                 [ C(RESULT_ACCESS) ] = -1,
345                 [ C(RESULT_MISS)   ] = -1,
346         },
347  },
348  [ C(BPU ) ] = {
349         [ C(OP_READ) ] = {
350                 [ C(RESULT_ACCESS) ] = 0x00c4, /* BR_INST_RETIRED.ANY        */
351                 [ C(RESULT_MISS)   ] = 0x00c5, /* BP_INST_RETIRED.MISPRED    */
352         },
353         [ C(OP_WRITE) ] = {
354                 [ C(RESULT_ACCESS) ] = -1,
355                 [ C(RESULT_MISS)   ] = -1,
356         },
357         [ C(OP_PREFETCH) ] = {
358                 [ C(RESULT_ACCESS) ] = -1,
359                 [ C(RESULT_MISS)   ] = -1,
360         },
361  },
362 };
363
364 static __initconst const u64 atom_hw_cache_event_ids
365                                 [PERF_COUNT_HW_CACHE_MAX]
366                                 [PERF_COUNT_HW_CACHE_OP_MAX]
367                                 [PERF_COUNT_HW_CACHE_RESULT_MAX] =
368 {
369  [ C(L1D) ] = {
370         [ C(OP_READ) ] = {
371                 [ C(RESULT_ACCESS) ] = 0x2140, /* L1D_CACHE.LD               */
372                 [ C(RESULT_MISS)   ] = 0,
373         },
374         [ C(OP_WRITE) ] = {
375                 [ C(RESULT_ACCESS) ] = 0x2240, /* L1D_CACHE.ST               */
376                 [ C(RESULT_MISS)   ] = 0,
377         },
378         [ C(OP_PREFETCH) ] = {
379                 [ C(RESULT_ACCESS) ] = 0x0,
380                 [ C(RESULT_MISS)   ] = 0,
381         },
382  },
383  [ C(L1I ) ] = {
384         [ C(OP_READ) ] = {
385                 [ C(RESULT_ACCESS) ] = 0x0380, /* L1I.READS                  */
386                 [ C(RESULT_MISS)   ] = 0x0280, /* L1I.MISSES                 */
387         },
388         [ C(OP_WRITE) ] = {
389                 [ C(RESULT_ACCESS) ] = -1,
390                 [ C(RESULT_MISS)   ] = -1,
391         },
392         [ C(OP_PREFETCH) ] = {
393                 [ C(RESULT_ACCESS) ] = 0,
394                 [ C(RESULT_MISS)   ] = 0,
395         },
396  },
397  [ C(LL  ) ] = {
398         [ C(OP_READ) ] = {
399                 [ C(RESULT_ACCESS) ] = 0x4f29, /* L2_LD.MESI                 */
400                 [ C(RESULT_MISS)   ] = 0x4129, /* L2_LD.ISTATE               */
401         },
402         [ C(OP_WRITE) ] = {
403                 [ C(RESULT_ACCESS) ] = 0x4f2A, /* L2_ST.MESI                 */
404                 [ C(RESULT_MISS)   ] = 0x412A, /* L2_ST.ISTATE               */
405         },
406         [ C(OP_PREFETCH) ] = {
407                 [ C(RESULT_ACCESS) ] = 0,
408                 [ C(RESULT_MISS)   ] = 0,
409         },
410  },
411  [ C(DTLB) ] = {
412         [ C(OP_READ) ] = {
413                 [ C(RESULT_ACCESS) ] = 0x2140, /* L1D_CACHE_LD.MESI  (alias) */
414                 [ C(RESULT_MISS)   ] = 0x0508, /* DTLB_MISSES.MISS_LD        */
415         },
416         [ C(OP_WRITE) ] = {
417                 [ C(RESULT_ACCESS) ] = 0x2240, /* L1D_CACHE_ST.MESI  (alias) */
418                 [ C(RESULT_MISS)   ] = 0x0608, /* DTLB_MISSES.MISS_ST        */
419         },
420         [ C(OP_PREFETCH) ] = {
421                 [ C(RESULT_ACCESS) ] = 0,
422                 [ C(RESULT_MISS)   ] = 0,
423         },
424  },
425  [ C(ITLB) ] = {
426         [ C(OP_READ) ] = {
427                 [ C(RESULT_ACCESS) ] = 0x00c0, /* INST_RETIRED.ANY_P         */
428                 [ C(RESULT_MISS)   ] = 0x0282, /* ITLB.MISSES                */
429         },
430         [ C(OP_WRITE) ] = {
431                 [ C(RESULT_ACCESS) ] = -1,
432                 [ C(RESULT_MISS)   ] = -1,
433         },
434         [ C(OP_PREFETCH) ] = {
435                 [ C(RESULT_ACCESS) ] = -1,
436                 [ C(RESULT_MISS)   ] = -1,
437         },
438  },
439  [ C(BPU ) ] = {
440         [ C(OP_READ) ] = {
441                 [ C(RESULT_ACCESS) ] = 0x00c4, /* BR_INST_RETIRED.ANY        */
442                 [ C(RESULT_MISS)   ] = 0x00c5, /* BP_INST_RETIRED.MISPRED    */
443         },
444         [ C(OP_WRITE) ] = {
445                 [ C(RESULT_ACCESS) ] = -1,
446                 [ C(RESULT_MISS)   ] = -1,
447         },
448         [ C(OP_PREFETCH) ] = {
449                 [ C(RESULT_ACCESS) ] = -1,
450                 [ C(RESULT_MISS)   ] = -1,
451         },
452  },
453 };
454
455 static void intel_pmu_disable_all(void)
456 {
457         struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events);
458
459         wrmsrl(MSR_CORE_PERF_GLOBAL_CTRL, 0);
460
461         if (test_bit(X86_PMC_IDX_FIXED_BTS, cpuc->active_mask))
462                 intel_pmu_disable_bts();
463
464         intel_pmu_pebs_disable_all();
465         intel_pmu_lbr_disable_all();
466 }
467
468 static void intel_pmu_enable_all(int added)
469 {
470         struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events);
471
472         intel_pmu_pebs_enable_all();
473         intel_pmu_lbr_enable_all();
474         wrmsrl(MSR_CORE_PERF_GLOBAL_CTRL, x86_pmu.intel_ctrl);
475
476         if (test_bit(X86_PMC_IDX_FIXED_BTS, cpuc->active_mask)) {
477                 struct perf_event *event =
478                         cpuc->events[X86_PMC_IDX_FIXED_BTS];
479
480                 if (WARN_ON_ONCE(!event))
481                         return;
482
483                 intel_pmu_enable_bts(event->hw.config);
484         }
485 }
486
487 /*
488  * Workaround for:
489  *   Intel Errata AAK100 (model 26)
490  *   Intel Errata AAP53  (model 30)
491  *   Intel Errata BD53   (model 44)
492  *
493  * These chips need to be 'reset' when adding counters by programming
494  * the magic three (non counting) events 0x4300D2, 0x4300B1 and 0x4300B5
495  * either in sequence on the same PMC or on different PMCs.
496  */
497 static void intel_pmu_nhm_enable_all(int added)
498 {
499         if (added) {
500                 struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events);
501                 int i;
502
503                 wrmsrl(MSR_ARCH_PERFMON_EVENTSEL0 + 0, 0x4300D2);
504                 wrmsrl(MSR_ARCH_PERFMON_EVENTSEL0 + 1, 0x4300B1);
505                 wrmsrl(MSR_ARCH_PERFMON_EVENTSEL0 + 2, 0x4300B5);
506
507                 wrmsrl(MSR_CORE_PERF_GLOBAL_CTRL, 0x3);
508                 wrmsrl(MSR_CORE_PERF_GLOBAL_CTRL, 0x0);
509
510                 for (i = 0; i < 3; i++) {
511                         struct perf_event *event = cpuc->events[i];
512
513                         if (!event)
514                                 continue;
515
516                         __x86_pmu_enable_event(&event->hw);
517                 }
518         }
519         intel_pmu_enable_all(added);
520 }
521
522 static inline u64 intel_pmu_get_status(void)
523 {
524         u64 status;
525
526         rdmsrl(MSR_CORE_PERF_GLOBAL_STATUS, status);
527
528         return status;
529 }
530
531 static inline void intel_pmu_ack_status(u64 ack)
532 {
533         wrmsrl(MSR_CORE_PERF_GLOBAL_OVF_CTRL, ack);
534 }
535
536 static void intel_pmu_disable_fixed(struct hw_perf_event *hwc)
537 {
538         int idx = hwc->idx - X86_PMC_IDX_FIXED;
539         u64 ctrl_val, mask;
540
541         mask = 0xfULL << (idx * 4);
542
543         rdmsrl(hwc->config_base, ctrl_val);
544         ctrl_val &= ~mask;
545         wrmsrl(hwc->config_base, ctrl_val);
546 }
547
548 static void intel_pmu_disable_event(struct perf_event *event)
549 {
550         struct hw_perf_event *hwc = &event->hw;
551
552         if (unlikely(hwc->idx == X86_PMC_IDX_FIXED_BTS)) {
553                 intel_pmu_disable_bts();
554                 intel_pmu_drain_bts_buffer();
555                 return;
556         }
557
558         if (unlikely(hwc->config_base == MSR_ARCH_PERFMON_FIXED_CTR_CTRL)) {
559                 intel_pmu_disable_fixed(hwc);
560                 return;
561         }
562
563         x86_pmu_disable_event(event);
564
565         if (unlikely(event->attr.precise))
566                 intel_pmu_pebs_disable(event);
567 }
568
569 static void intel_pmu_enable_fixed(struct hw_perf_event *hwc)
570 {
571         int idx = hwc->idx - X86_PMC_IDX_FIXED;
572         u64 ctrl_val, bits, mask;
573
574         /*
575          * Enable IRQ generation (0x8),
576          * and enable ring-3 counting (0x2) and ring-0 counting (0x1)
577          * if requested:
578          */
579         bits = 0x8ULL;
580         if (hwc->config & ARCH_PERFMON_EVENTSEL_USR)
581                 bits |= 0x2;
582         if (hwc->config & ARCH_PERFMON_EVENTSEL_OS)
583                 bits |= 0x1;
584
585         /*
586          * ANY bit is supported in v3 and up
587          */
588         if (x86_pmu.version > 2 && hwc->config & ARCH_PERFMON_EVENTSEL_ANY)
589                 bits |= 0x4;
590
591         bits <<= (idx * 4);
592         mask = 0xfULL << (idx * 4);
593
594         rdmsrl(hwc->config_base, ctrl_val);
595         ctrl_val &= ~mask;
596         ctrl_val |= bits;
597         wrmsrl(hwc->config_base, ctrl_val);
598 }
599
600 static void intel_pmu_enable_event(struct perf_event *event)
601 {
602         struct hw_perf_event *hwc = &event->hw;
603
604         if (unlikely(hwc->idx == X86_PMC_IDX_FIXED_BTS)) {
605                 if (!__get_cpu_var(cpu_hw_events).enabled)
606                         return;
607
608                 intel_pmu_enable_bts(hwc->config);
609                 return;
610         }
611
612         if (unlikely(hwc->config_base == MSR_ARCH_PERFMON_FIXED_CTR_CTRL)) {
613                 intel_pmu_enable_fixed(hwc);
614                 return;
615         }
616
617         if (unlikely(event->attr.precise))
618                 intel_pmu_pebs_enable(event);
619
620         __x86_pmu_enable_event(hwc);
621 }
622
623 /*
624  * Save and restart an expired event. Called by NMI contexts,
625  * so it has to be careful about preempting normal event ops:
626  */
627 static int intel_pmu_save_and_restart(struct perf_event *event)
628 {
629         x86_perf_event_update(event);
630         return x86_perf_event_set_period(event);
631 }
632
633 static void intel_pmu_reset(void)
634 {
635         struct debug_store *ds = __get_cpu_var(cpu_hw_events).ds;
636         unsigned long flags;
637         int idx;
638
639         if (!x86_pmu.num_counters)
640                 return;
641
642         local_irq_save(flags);
643
644         printk("clearing PMU state on CPU#%d\n", smp_processor_id());
645
646         for (idx = 0; idx < x86_pmu.num_counters; idx++) {
647                 checking_wrmsrl(x86_pmu.eventsel + idx, 0ull);
648                 checking_wrmsrl(x86_pmu.perfctr  + idx, 0ull);
649         }
650         for (idx = 0; idx < x86_pmu.num_counters_fixed; idx++)
651                 checking_wrmsrl(MSR_ARCH_PERFMON_FIXED_CTR0 + idx, 0ull);
652
653         if (ds)
654                 ds->bts_index = ds->bts_buffer_base;
655
656         local_irq_restore(flags);
657 }
658
659 /*
660  * This handler is triggered by the local APIC, so the APIC IRQ handling
661  * rules apply:
662  */
663 static int intel_pmu_handle_irq(struct pt_regs *regs)
664 {
665         struct perf_sample_data data;
666         struct cpu_hw_events *cpuc;
667         int bit, loops;
668         u64 ack, status;
669
670         perf_sample_data_init(&data, 0);
671
672         cpuc = &__get_cpu_var(cpu_hw_events);
673
674         intel_pmu_disable_all();
675         intel_pmu_drain_bts_buffer();
676         status = intel_pmu_get_status();
677         if (!status) {
678                 intel_pmu_enable_all(0);
679                 return 0;
680         }
681
682         loops = 0;
683 again:
684         if (++loops > 100) {
685                 WARN_ONCE(1, "perfevents: irq loop stuck!\n");
686                 perf_event_print_debug();
687                 intel_pmu_reset();
688                 goto done;
689         }
690
691         inc_irq_stat(apic_perf_irqs);
692         ack = status;
693
694         intel_pmu_lbr_read();
695
696         /*
697          * PEBS overflow sets bit 62 in the global status register
698          */
699         if (__test_and_clear_bit(62, (unsigned long *)&status))
700                 x86_pmu.drain_pebs(regs);
701
702         for_each_set_bit(bit, (unsigned long *)&status, X86_PMC_IDX_MAX) {
703                 struct perf_event *event = cpuc->events[bit];
704
705                 if (!test_bit(bit, cpuc->active_mask))
706                         continue;
707
708                 if (!intel_pmu_save_and_restart(event))
709                         continue;
710
711                 data.period = event->hw.last_period;
712
713                 if (perf_event_overflow(event, 1, &data, regs))
714                         x86_pmu_stop(event);
715         }
716
717         intel_pmu_ack_status(ack);
718
719         /*
720          * Repeat if there is more work to be done:
721          */
722         status = intel_pmu_get_status();
723         if (status)
724                 goto again;
725
726 done:
727         intel_pmu_enable_all(0);
728         return 1;
729 }
730
731 static struct event_constraint *
732 intel_bts_constraints(struct perf_event *event)
733 {
734         struct hw_perf_event *hwc = &event->hw;
735         unsigned int hw_event, bts_event;
736
737         hw_event = hwc->config & INTEL_ARCH_EVENT_MASK;
738         bts_event = x86_pmu.event_map(PERF_COUNT_HW_BRANCH_INSTRUCTIONS);
739
740         if (unlikely(hw_event == bts_event && hwc->sample_period == 1))
741                 return &bts_constraint;
742
743         return NULL;
744 }
745
746 static struct event_constraint *
747 intel_get_event_constraints(struct cpu_hw_events *cpuc, struct perf_event *event)
748 {
749         struct event_constraint *c;
750
751         c = intel_bts_constraints(event);
752         if (c)
753                 return c;
754
755         c = intel_pebs_constraints(event);
756         if (c)
757                 return c;
758
759         return x86_get_event_constraints(cpuc, event);
760 }
761
762 static int intel_pmu_hw_config(struct perf_event *event)
763 {
764         int ret = x86_pmu_hw_config(event);
765
766         if (ret)
767                 return ret;
768
769         if (event->attr.type != PERF_TYPE_RAW)
770                 return 0;
771
772         if (!(event->attr.config & ARCH_PERFMON_EVENTSEL_ANY))
773                 return 0;
774
775         if (x86_pmu.version < 3)
776                 return -EINVAL;
777
778         if (perf_paranoid_cpu() && !capable(CAP_SYS_ADMIN))
779                 return -EACCES;
780
781         event->hw.config |= ARCH_PERFMON_EVENTSEL_ANY;
782
783         return 0;
784 }
785
786 static __initconst const struct x86_pmu core_pmu = {
787         .name                   = "core",
788         .handle_irq             = x86_pmu_handle_irq,
789         .disable_all            = x86_pmu_disable_all,
790         .enable_all             = x86_pmu_enable_all,
791         .enable                 = x86_pmu_enable_event,
792         .disable                = x86_pmu_disable_event,
793         .hw_config              = x86_pmu_hw_config,
794         .schedule_events        = x86_schedule_events,
795         .eventsel               = MSR_ARCH_PERFMON_EVENTSEL0,
796         .perfctr                = MSR_ARCH_PERFMON_PERFCTR0,
797         .event_map              = intel_pmu_event_map,
798         .max_events             = ARRAY_SIZE(intel_perfmon_event_map),
799         .apic                   = 1,
800         /*
801          * Intel PMCs cannot be accessed sanely above 32 bit width,
802          * so we install an artificial 1<<31 period regardless of
803          * the generic event period:
804          */
805         .max_period             = (1ULL << 31) - 1,
806         .get_event_constraints  = intel_get_event_constraints,
807         .event_constraints      = intel_core_event_constraints,
808 };
809
810 static void intel_pmu_cpu_starting(int cpu)
811 {
812         init_debug_store_on_cpu(cpu);
813         /*
814          * Deal with CPUs that don't clear their LBRs on power-up.
815          */
816         intel_pmu_lbr_reset();
817 }
818
819 static void intel_pmu_cpu_dying(int cpu)
820 {
821         fini_debug_store_on_cpu(cpu);
822 }
823
824 static __initconst const struct x86_pmu intel_pmu = {
825         .name                   = "Intel",
826         .handle_irq             = intel_pmu_handle_irq,
827         .disable_all            = intel_pmu_disable_all,
828         .enable_all             = intel_pmu_enable_all,
829         .enable                 = intel_pmu_enable_event,
830         .disable                = intel_pmu_disable_event,
831         .hw_config              = intel_pmu_hw_config,
832         .schedule_events        = x86_schedule_events,
833         .eventsel               = MSR_ARCH_PERFMON_EVENTSEL0,
834         .perfctr                = MSR_ARCH_PERFMON_PERFCTR0,
835         .event_map              = intel_pmu_event_map,
836         .max_events             = ARRAY_SIZE(intel_perfmon_event_map),
837         .apic                   = 1,
838         /*
839          * Intel PMCs cannot be accessed sanely above 32 bit width,
840          * so we install an artificial 1<<31 period regardless of
841          * the generic event period:
842          */
843         .max_period             = (1ULL << 31) - 1,
844         .get_event_constraints  = intel_get_event_constraints,
845
846         .cpu_starting           = intel_pmu_cpu_starting,
847         .cpu_dying              = intel_pmu_cpu_dying,
848 };
849
850 static void intel_clovertown_quirks(void)
851 {
852         /*
853          * PEBS is unreliable due to:
854          *
855          *   AJ67  - PEBS may experience CPL leaks
856          *   AJ68  - PEBS PMI may be delayed by one event
857          *   AJ69  - GLOBAL_STATUS[62] will only be set when DEBUGCTL[12]
858          *   AJ106 - FREEZE_LBRS_ON_PMI doesn't work in combination with PEBS
859          *
860          * AJ67 could be worked around by restricting the OS/USR flags.
861          * AJ69 could be worked around by setting PMU_FREEZE_ON_PMI.
862          *
863          * AJ106 could possibly be worked around by not allowing LBR
864          *       usage from PEBS, including the fixup.
865          * AJ68  could possibly be worked around by always programming
866          *       a pebs_event_reset[0] value and coping with the lost events.
867          *
868          * But taken together it might just make sense to not enable PEBS on
869          * these chips.
870          */
871         printk(KERN_WARNING "PEBS disabled due to CPU errata.\n");
872         x86_pmu.pebs = 0;
873         x86_pmu.pebs_constraints = NULL;
874 }
875
876 static __init int intel_pmu_init(void)
877 {
878         union cpuid10_edx edx;
879         union cpuid10_eax eax;
880         unsigned int unused;
881         unsigned int ebx;
882         int version;
883
884         if (!cpu_has(&boot_cpu_data, X86_FEATURE_ARCH_PERFMON)) {
885                 switch (boot_cpu_data.x86) {
886                 case 0x6:
887                         return p6_pmu_init();
888                 case 0xf:
889                         return p4_pmu_init();
890                 }
891                 return -ENODEV;
892         }
893
894         /*
895          * Check whether the Architectural PerfMon supports
896          * Branch Misses Retired hw_event or not.
897          */
898         cpuid(10, &eax.full, &ebx, &unused, &edx.full);
899         if (eax.split.mask_length <= ARCH_PERFMON_BRANCH_MISSES_RETIRED)
900                 return -ENODEV;
901
902         version = eax.split.version_id;
903         if (version < 2)
904                 x86_pmu = core_pmu;
905         else
906                 x86_pmu = intel_pmu;
907
908         x86_pmu.version                 = version;
909         x86_pmu.num_counters            = eax.split.num_counters;
910         x86_pmu.cntval_bits             = eax.split.bit_width;
911         x86_pmu.cntval_mask             = (1ULL << eax.split.bit_width) - 1;
912
913         /*
914          * Quirk: v2 perfmon does not report fixed-purpose events, so
915          * assume at least 3 events:
916          */
917         if (version > 1)
918                 x86_pmu.num_counters_fixed = max((int)edx.split.num_counters_fixed, 3);
919
920         /*
921          * v2 and above have a perf capabilities MSR
922          */
923         if (version > 1) {
924                 u64 capabilities;
925
926                 rdmsrl(MSR_IA32_PERF_CAPABILITIES, capabilities);
927                 x86_pmu.intel_cap.capabilities = capabilities;
928         }
929
930         intel_ds_init();
931
932         /*
933          * Install the hw-cache-events table:
934          */
935         switch (boot_cpu_data.x86_model) {
936         case 14: /* 65 nm core solo/duo, "Yonah" */
937                 pr_cont("Core events, ");
938                 break;
939
940         case 15: /* original 65 nm celeron/pentium/core2/xeon, "Merom"/"Conroe" */
941                 x86_pmu.quirks = intel_clovertown_quirks;
942         case 22: /* single-core 65 nm celeron/core2solo "Merom-L"/"Conroe-L" */
943         case 23: /* current 45 nm celeron/core2/xeon "Penryn"/"Wolfdale" */
944         case 29: /* six-core 45 nm xeon "Dunnington" */
945                 memcpy(hw_cache_event_ids, core2_hw_cache_event_ids,
946                        sizeof(hw_cache_event_ids));
947
948                 intel_pmu_lbr_init_core();
949
950                 x86_pmu.event_constraints = intel_core2_event_constraints;
951                 pr_cont("Core2 events, ");
952                 break;
953
954         case 26: /* 45 nm nehalem, "Bloomfield" */
955         case 30: /* 45 nm nehalem, "Lynnfield" */
956                 memcpy(hw_cache_event_ids, nehalem_hw_cache_event_ids,
957                        sizeof(hw_cache_event_ids));
958
959                 intel_pmu_lbr_init_nhm();
960
961                 x86_pmu.event_constraints = intel_nehalem_event_constraints;
962                 x86_pmu.enable_all = intel_pmu_nhm_enable_all;
963                 pr_cont("Nehalem events, ");
964                 break;
965
966         case 28: /* Atom */
967                 memcpy(hw_cache_event_ids, atom_hw_cache_event_ids,
968                        sizeof(hw_cache_event_ids));
969
970                 intel_pmu_lbr_init_atom();
971
972                 x86_pmu.event_constraints = intel_gen_event_constraints;
973                 pr_cont("Atom events, ");
974                 break;
975
976         case 37: /* 32 nm nehalem, "Clarkdale" */
977         case 44: /* 32 nm nehalem, "Gulftown" */
978                 memcpy(hw_cache_event_ids, westmere_hw_cache_event_ids,
979                        sizeof(hw_cache_event_ids));
980
981                 intel_pmu_lbr_init_nhm();
982
983                 x86_pmu.event_constraints = intel_westmere_event_constraints;
984                 x86_pmu.enable_all = intel_pmu_nhm_enable_all;
985                 pr_cont("Westmere events, ");
986                 break;
987
988         default:
989                 /*
990                  * default constraints for v2 and up
991                  */
992                 x86_pmu.event_constraints = intel_gen_event_constraints;
993                 pr_cont("generic architected perfmon, ");
994         }
995         return 0;
996 }
997
998 #else /* CONFIG_CPU_SUP_INTEL */
999
1000 static int intel_pmu_init(void)
1001 {
1002         return 0;
1003 }
1004
1005 #endif /* CONFIG_CPU_SUP_INTEL */