sparc: Support HW cache events.
[safe/jmp/linux-2.6] / arch / sparc / kernel / perf_event.c
1 /* Performance event support for sparc64.
2  *
3  * Copyright (C) 2009 David S. Miller <davem@davemloft.net>
4  *
5  * This code is based almost entirely upon the x86 perf event
6  * code, which is:
7  *
8  *  Copyright (C) 2008 Thomas Gleixner <tglx@linutronix.de>
9  *  Copyright (C) 2008-2009 Red Hat, Inc., Ingo Molnar
10  *  Copyright (C) 2009 Jaswinder Singh Rajput
11  *  Copyright (C) 2009 Advanced Micro Devices, Inc., Robert Richter
12  *  Copyright (C) 2008-2009 Red Hat, Inc., Peter Zijlstra <pzijlstr@redhat.com>
13  */
14
15 #include <linux/perf_event.h>
16 #include <linux/kprobes.h>
17 #include <linux/kernel.h>
18 #include <linux/kdebug.h>
19 #include <linux/mutex.h>
20
21 #include <asm/cpudata.h>
22 #include <asm/atomic.h>
23 #include <asm/nmi.h>
24 #include <asm/pcr.h>
25
26 /* Sparc64 chips have two performance counters, 32-bits each, with
27  * overflow interrupts generated on transition from 0xffffffff to 0.
28  * The counters are accessed in one go using a 64-bit register.
29  *
30  * Both counters are controlled using a single control register.  The
31  * only way to stop all sampling is to clear all of the context (user,
32  * supervisor, hypervisor) sampling enable bits.  But these bits apply
33  * to both counters, thus the two counters can't be enabled/disabled
34  * individually.
35  *
36  * The control register has two event fields, one for each of the two
37  * counters.  It's thus nearly impossible to have one counter going
38  * while keeping the other one stopped.  Therefore it is possible to
39  * get overflow interrupts for counters not currently "in use" and
40  * that condition must be checked in the overflow interrupt handler.
41  *
42  * So we use a hack, in that we program inactive counters with the
43  * "sw_count0" and "sw_count1" events.  These count how many times
44  * the instruction "sethi %hi(0xfc000), %g0" is executed.  It's an
45  * unusual way to encode a NOP and therefore will not trigger in
46  * normal code.
47  */
48
49 #define MAX_HWEVENTS                    2
50 #define MAX_PERIOD                      ((1UL << 32) - 1)
51
52 #define PIC_UPPER_INDEX                 0
53 #define PIC_LOWER_INDEX                 1
54
55 struct cpu_hw_events {
56         struct perf_event       *events[MAX_HWEVENTS];
57         unsigned long           used_mask[BITS_TO_LONGS(MAX_HWEVENTS)];
58         unsigned long           active_mask[BITS_TO_LONGS(MAX_HWEVENTS)];
59         int enabled;
60 };
61 DEFINE_PER_CPU(struct cpu_hw_events, cpu_hw_events) = { .enabled = 1, };
62
63 struct perf_event_map {
64         u16     encoding;
65         u8      pic_mask;
66 #define PIC_NONE        0x00
67 #define PIC_UPPER       0x01
68 #define PIC_LOWER       0x02
69 };
70
71 #define C(x) PERF_COUNT_HW_CACHE_##x
72
73 #define CACHE_OP_UNSUPPORTED    0xfffe
74 #define CACHE_OP_NONSENSE       0xffff
75
76 typedef struct perf_event_map cache_map_t
77                                 [PERF_COUNT_HW_CACHE_MAX]
78                                 [PERF_COUNT_HW_CACHE_OP_MAX]
79                                 [PERF_COUNT_HW_CACHE_RESULT_MAX];
80
81 struct sparc_pmu {
82         const struct perf_event_map     *(*event_map)(int);
83         const cache_map_t               *cache_map;
84         int                             max_events;
85         int                             upper_shift;
86         int                             lower_shift;
87         int                             event_mask;
88         int                             hv_bit;
89         int                             irq_bit;
90         int                             upper_nop;
91         int                             lower_nop;
92 };
93
94 static const struct perf_event_map ultra3i_perfmon_event_map[] = {
95         [PERF_COUNT_HW_CPU_CYCLES] = { 0x0000, PIC_UPPER | PIC_LOWER },
96         [PERF_COUNT_HW_INSTRUCTIONS] = { 0x0001, PIC_UPPER | PIC_LOWER },
97         [PERF_COUNT_HW_CACHE_REFERENCES] = { 0x0009, PIC_LOWER },
98         [PERF_COUNT_HW_CACHE_MISSES] = { 0x0009, PIC_UPPER },
99 };
100
101 static const struct perf_event_map *ultra3i_event_map(int event_id)
102 {
103         return &ultra3i_perfmon_event_map[event_id];
104 }
105
106 static const cache_map_t ultra3i_cache_map = {
107 [C(L1D)] = {
108         [C(OP_READ)] = {
109                 [C(RESULT_ACCESS)] = { 0x09, PIC_LOWER, },
110                 [C(RESULT_MISS)] = { 0x09, PIC_UPPER, },
111         },
112         [C(OP_WRITE)] = {
113                 [C(RESULT_ACCESS)] = { 0x0a, PIC_LOWER },
114                 [C(RESULT_MISS)] = { 0x0a, PIC_UPPER },
115         },
116         [C(OP_PREFETCH)] = {
117                 [C(RESULT_ACCESS)] = { CACHE_OP_UNSUPPORTED },
118                 [C(RESULT_MISS)] = { CACHE_OP_UNSUPPORTED },
119         },
120 },
121 [C(L1I)] = {
122         [C(OP_READ)] = {
123                 [C(RESULT_ACCESS)] = { 0x09, PIC_LOWER, },
124                 [C(RESULT_MISS)] = { 0x09, PIC_UPPER, },
125         },
126         [ C(OP_WRITE) ] = {
127                 [ C(RESULT_ACCESS) ] = { CACHE_OP_NONSENSE },
128                 [ C(RESULT_MISS)   ] = { CACHE_OP_NONSENSE },
129         },
130         [ C(OP_PREFETCH) ] = {
131                 [ C(RESULT_ACCESS) ] = { CACHE_OP_UNSUPPORTED },
132                 [ C(RESULT_MISS)   ] = { CACHE_OP_UNSUPPORTED },
133         },
134 },
135 [C(LL)] = {
136         [C(OP_READ)] = {
137                 [C(RESULT_ACCESS)] = { 0x0c, PIC_LOWER, },
138                 [C(RESULT_MISS)] = { 0x0c, PIC_UPPER, },
139         },
140         [C(OP_WRITE)] = {
141                 [C(RESULT_ACCESS)] = { 0x0c, PIC_LOWER },
142                 [C(RESULT_MISS)] = { 0x0c, PIC_UPPER },
143         },
144         [C(OP_PREFETCH)] = {
145                 [C(RESULT_ACCESS)] = { CACHE_OP_UNSUPPORTED },
146                 [C(RESULT_MISS)] = { CACHE_OP_UNSUPPORTED },
147         },
148 },
149 [C(DTLB)] = {
150         [C(OP_READ)] = {
151                 [C(RESULT_ACCESS)] = { CACHE_OP_UNSUPPORTED },
152                 [C(RESULT_MISS)] = { 0x12, PIC_UPPER, },
153         },
154         [ C(OP_WRITE) ] = {
155                 [ C(RESULT_ACCESS) ] = { CACHE_OP_UNSUPPORTED },
156                 [ C(RESULT_MISS)   ] = { CACHE_OP_UNSUPPORTED },
157         },
158         [ C(OP_PREFETCH) ] = {
159                 [ C(RESULT_ACCESS) ] = { CACHE_OP_UNSUPPORTED },
160                 [ C(RESULT_MISS)   ] = { CACHE_OP_UNSUPPORTED },
161         },
162 },
163 [C(ITLB)] = {
164         [C(OP_READ)] = {
165                 [C(RESULT_ACCESS)] = { CACHE_OP_UNSUPPORTED },
166                 [C(RESULT_MISS)] = { 0x11, PIC_UPPER, },
167         },
168         [ C(OP_WRITE) ] = {
169                 [ C(RESULT_ACCESS) ] = { CACHE_OP_UNSUPPORTED },
170                 [ C(RESULT_MISS)   ] = { CACHE_OP_UNSUPPORTED },
171         },
172         [ C(OP_PREFETCH) ] = {
173                 [ C(RESULT_ACCESS) ] = { CACHE_OP_UNSUPPORTED },
174                 [ C(RESULT_MISS)   ] = { CACHE_OP_UNSUPPORTED },
175         },
176 },
177 [C(BPU)] = {
178         [C(OP_READ)] = {
179                 [C(RESULT_ACCESS)] = { CACHE_OP_UNSUPPORTED },
180                 [C(RESULT_MISS)] = { CACHE_OP_UNSUPPORTED },
181         },
182         [ C(OP_WRITE) ] = {
183                 [ C(RESULT_ACCESS) ] = { CACHE_OP_UNSUPPORTED },
184                 [ C(RESULT_MISS)   ] = { CACHE_OP_UNSUPPORTED },
185         },
186         [ C(OP_PREFETCH) ] = {
187                 [ C(RESULT_ACCESS) ] = { CACHE_OP_UNSUPPORTED },
188                 [ C(RESULT_MISS)   ] = { CACHE_OP_UNSUPPORTED },
189         },
190 },
191 };
192
193 static const struct sparc_pmu ultra3i_pmu = {
194         .event_map      = ultra3i_event_map,
195         .cache_map      = &ultra3i_cache_map,
196         .max_events     = ARRAY_SIZE(ultra3i_perfmon_event_map),
197         .upper_shift    = 11,
198         .lower_shift    = 4,
199         .event_mask     = 0x3f,
200         .upper_nop      = 0x1c,
201         .lower_nop      = 0x14,
202 };
203
204 static const struct perf_event_map niagara2_perfmon_event_map[] = {
205         [PERF_COUNT_HW_CPU_CYCLES] = { 0x02ff, PIC_UPPER | PIC_LOWER },
206         [PERF_COUNT_HW_INSTRUCTIONS] = { 0x02ff, PIC_UPPER | PIC_LOWER },
207         [PERF_COUNT_HW_CACHE_REFERENCES] = { 0x0208, PIC_UPPER | PIC_LOWER },
208         [PERF_COUNT_HW_CACHE_MISSES] = { 0x0302, PIC_UPPER | PIC_LOWER },
209         [PERF_COUNT_HW_BRANCH_INSTRUCTIONS] = { 0x0201, PIC_UPPER | PIC_LOWER },
210         [PERF_COUNT_HW_BRANCH_MISSES] = { 0x0202, PIC_UPPER | PIC_LOWER },
211 };
212
213 static const struct perf_event_map *niagara2_event_map(int event_id)
214 {
215         return &niagara2_perfmon_event_map[event_id];
216 }
217
218 static const struct sparc_pmu niagara2_pmu = {
219         .event_map      = niagara2_event_map,
220         .max_events     = ARRAY_SIZE(niagara2_perfmon_event_map),
221         .upper_shift    = 19,
222         .lower_shift    = 6,
223         .event_mask     = 0xfff,
224         .hv_bit         = 0x8,
225         .irq_bit        = 0x03,
226         .upper_nop      = 0x220,
227         .lower_nop      = 0x220,
228 };
229
230 static const struct sparc_pmu *sparc_pmu __read_mostly;
231
232 static u64 event_encoding(u64 event_id, int idx)
233 {
234         if (idx == PIC_UPPER_INDEX)
235                 event_id <<= sparc_pmu->upper_shift;
236         else
237                 event_id <<= sparc_pmu->lower_shift;
238         return event_id;
239 }
240
241 static u64 mask_for_index(int idx)
242 {
243         return event_encoding(sparc_pmu->event_mask, idx);
244 }
245
246 static u64 nop_for_index(int idx)
247 {
248         return event_encoding(idx == PIC_UPPER_INDEX ?
249                               sparc_pmu->upper_nop :
250                               sparc_pmu->lower_nop, idx);
251 }
252
253 static inline void sparc_pmu_enable_event(struct hw_perf_event *hwc,
254                                             int idx)
255 {
256         u64 val, mask = mask_for_index(idx);
257
258         val = pcr_ops->read();
259         pcr_ops->write((val & ~mask) | hwc->config);
260 }
261
262 static inline void sparc_pmu_disable_event(struct hw_perf_event *hwc,
263                                              int idx)
264 {
265         u64 mask = mask_for_index(idx);
266         u64 nop = nop_for_index(idx);
267         u64 val = pcr_ops->read();
268
269         pcr_ops->write((val & ~mask) | nop);
270 }
271
272 void hw_perf_enable(void)
273 {
274         struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events);
275         u64 val;
276         int i;
277
278         if (cpuc->enabled)
279                 return;
280
281         cpuc->enabled = 1;
282         barrier();
283
284         val = pcr_ops->read();
285
286         for (i = 0; i < MAX_HWEVENTS; i++) {
287                 struct perf_event *cp = cpuc->events[i];
288                 struct hw_perf_event *hwc;
289
290                 if (!cp)
291                         continue;
292                 hwc = &cp->hw;
293                 val |= hwc->config_base;
294         }
295
296         pcr_ops->write(val);
297 }
298
299 void hw_perf_disable(void)
300 {
301         struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events);
302         u64 val;
303
304         if (!cpuc->enabled)
305                 return;
306
307         cpuc->enabled = 0;
308
309         val = pcr_ops->read();
310         val &= ~(PCR_UTRACE | PCR_STRACE |
311                  sparc_pmu->hv_bit | sparc_pmu->irq_bit);
312         pcr_ops->write(val);
313 }
314
315 static u32 read_pmc(int idx)
316 {
317         u64 val;
318
319         read_pic(val);
320         if (idx == PIC_UPPER_INDEX)
321                 val >>= 32;
322
323         return val & 0xffffffff;
324 }
325
326 static void write_pmc(int idx, u64 val)
327 {
328         u64 shift, mask, pic;
329
330         shift = 0;
331         if (idx == PIC_UPPER_INDEX)
332                 shift = 32;
333
334         mask = ((u64) 0xffffffff) << shift;
335         val <<= shift;
336
337         read_pic(pic);
338         pic &= ~mask;
339         pic |= val;
340         write_pic(pic);
341 }
342
343 static int sparc_perf_event_set_period(struct perf_event *event,
344                                          struct hw_perf_event *hwc, int idx)
345 {
346         s64 left = atomic64_read(&hwc->period_left);
347         s64 period = hwc->sample_period;
348         int ret = 0;
349
350         if (unlikely(left <= -period)) {
351                 left = period;
352                 atomic64_set(&hwc->period_left, left);
353                 hwc->last_period = period;
354                 ret = 1;
355         }
356
357         if (unlikely(left <= 0)) {
358                 left += period;
359                 atomic64_set(&hwc->period_left, left);
360                 hwc->last_period = period;
361                 ret = 1;
362         }
363         if (left > MAX_PERIOD)
364                 left = MAX_PERIOD;
365
366         atomic64_set(&hwc->prev_count, (u64)-left);
367
368         write_pmc(idx, (u64)(-left) & 0xffffffff);
369
370         perf_event_update_userpage(event);
371
372         return ret;
373 }
374
375 static int sparc_pmu_enable(struct perf_event *event)
376 {
377         struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events);
378         struct hw_perf_event *hwc = &event->hw;
379         int idx = hwc->idx;
380
381         if (test_and_set_bit(idx, cpuc->used_mask))
382                 return -EAGAIN;
383
384         sparc_pmu_disable_event(hwc, idx);
385
386         cpuc->events[idx] = event;
387         set_bit(idx, cpuc->active_mask);
388
389         sparc_perf_event_set_period(event, hwc, idx);
390         sparc_pmu_enable_event(hwc, idx);
391         perf_event_update_userpage(event);
392         return 0;
393 }
394
395 static u64 sparc_perf_event_update(struct perf_event *event,
396                                      struct hw_perf_event *hwc, int idx)
397 {
398         int shift = 64 - 32;
399         u64 prev_raw_count, new_raw_count;
400         s64 delta;
401
402 again:
403         prev_raw_count = atomic64_read(&hwc->prev_count);
404         new_raw_count = read_pmc(idx);
405
406         if (atomic64_cmpxchg(&hwc->prev_count, prev_raw_count,
407                              new_raw_count) != prev_raw_count)
408                 goto again;
409
410         delta = (new_raw_count << shift) - (prev_raw_count << shift);
411         delta >>= shift;
412
413         atomic64_add(delta, &event->count);
414         atomic64_sub(delta, &hwc->period_left);
415
416         return new_raw_count;
417 }
418
419 static void sparc_pmu_disable(struct perf_event *event)
420 {
421         struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events);
422         struct hw_perf_event *hwc = &event->hw;
423         int idx = hwc->idx;
424
425         clear_bit(idx, cpuc->active_mask);
426         sparc_pmu_disable_event(hwc, idx);
427
428         barrier();
429
430         sparc_perf_event_update(event, hwc, idx);
431         cpuc->events[idx] = NULL;
432         clear_bit(idx, cpuc->used_mask);
433
434         perf_event_update_userpage(event);
435 }
436
437 static void sparc_pmu_read(struct perf_event *event)
438 {
439         struct hw_perf_event *hwc = &event->hw;
440         sparc_perf_event_update(event, hwc, hwc->idx);
441 }
442
443 static void sparc_pmu_unthrottle(struct perf_event *event)
444 {
445         struct hw_perf_event *hwc = &event->hw;
446         sparc_pmu_enable_event(hwc, hwc->idx);
447 }
448
449 static atomic_t active_events = ATOMIC_INIT(0);
450 static DEFINE_MUTEX(pmc_grab_mutex);
451
452 void perf_event_grab_pmc(void)
453 {
454         if (atomic_inc_not_zero(&active_events))
455                 return;
456
457         mutex_lock(&pmc_grab_mutex);
458         if (atomic_read(&active_events) == 0) {
459                 if (atomic_read(&nmi_active) > 0) {
460                         on_each_cpu(stop_nmi_watchdog, NULL, 1);
461                         BUG_ON(atomic_read(&nmi_active) != 0);
462                 }
463                 atomic_inc(&active_events);
464         }
465         mutex_unlock(&pmc_grab_mutex);
466 }
467
468 void perf_event_release_pmc(void)
469 {
470         if (atomic_dec_and_mutex_lock(&active_events, &pmc_grab_mutex)) {
471                 if (atomic_read(&nmi_active) == 0)
472                         on_each_cpu(start_nmi_watchdog, NULL, 1);
473                 mutex_unlock(&pmc_grab_mutex);
474         }
475 }
476
477 static const struct perf_event_map *sparc_map_cache_event(u64 config)
478 {
479         unsigned int cache_type, cache_op, cache_result;
480         const struct perf_event_map *pmap;
481
482         if (!sparc_pmu->cache_map)
483                 return ERR_PTR(-ENOENT);
484
485         cache_type = (config >>  0) & 0xff;
486         if (cache_type >= PERF_COUNT_HW_CACHE_MAX)
487                 return ERR_PTR(-EINVAL);
488
489         cache_op = (config >>  8) & 0xff;
490         if (cache_op >= PERF_COUNT_HW_CACHE_OP_MAX)
491                 return ERR_PTR(-EINVAL);
492
493         cache_result = (config >> 16) & 0xff;
494         if (cache_result >= PERF_COUNT_HW_CACHE_RESULT_MAX)
495                 return ERR_PTR(-EINVAL);
496
497         pmap = &((*sparc_pmu->cache_map)[cache_type][cache_op][cache_result]);
498
499         if (pmap->encoding == CACHE_OP_UNSUPPORTED)
500                 return ERR_PTR(-ENOENT);
501
502         if (pmap->encoding == CACHE_OP_NONSENSE)
503                 return ERR_PTR(-EINVAL);
504
505         return pmap;
506 }
507
508 static void hw_perf_event_destroy(struct perf_event *event)
509 {
510         perf_event_release_pmc();
511 }
512
513 static int __hw_perf_event_init(struct perf_event *event)
514 {
515         struct perf_event_attr *attr = &event->attr;
516         struct hw_perf_event *hwc = &event->hw;
517         const struct perf_event_map *pmap;
518         u64 enc;
519
520         if (atomic_read(&nmi_active) < 0)
521                 return -ENODEV;
522
523         if (attr->type == PERF_TYPE_HARDWARE) {
524                 if (attr->config >= sparc_pmu->max_events)
525                         return -EINVAL;
526                 pmap = sparc_pmu->event_map(attr->config);
527         } else if (attr->type == PERF_TYPE_HW_CACHE) {
528                 pmap = sparc_map_cache_event(attr->config);
529                 if (IS_ERR(pmap))
530                         return PTR_ERR(pmap);
531         } else
532                 return -EOPNOTSUPP;
533
534         perf_event_grab_pmc();
535         event->destroy = hw_perf_event_destroy;
536
537         /* We save the enable bits in the config_base.  So to
538          * turn off sampling just write 'config', and to enable
539          * things write 'config | config_base'.
540          */
541         hwc->config_base = sparc_pmu->irq_bit;
542         if (!attr->exclude_user)
543                 hwc->config_base |= PCR_UTRACE;
544         if (!attr->exclude_kernel)
545                 hwc->config_base |= PCR_STRACE;
546         if (!attr->exclude_hv)
547                 hwc->config_base |= sparc_pmu->hv_bit;
548
549         if (!hwc->sample_period) {
550                 hwc->sample_period = MAX_PERIOD;
551                 hwc->last_period = hwc->sample_period;
552                 atomic64_set(&hwc->period_left, hwc->sample_period);
553         }
554
555         enc = pmap->encoding;
556         if (pmap->pic_mask & PIC_UPPER) {
557                 hwc->idx = PIC_UPPER_INDEX;
558                 enc <<= sparc_pmu->upper_shift;
559         } else {
560                 hwc->idx = PIC_LOWER_INDEX;
561                 enc <<= sparc_pmu->lower_shift;
562         }
563
564         hwc->config |= enc;
565         return 0;
566 }
567
568 static const struct pmu pmu = {
569         .enable         = sparc_pmu_enable,
570         .disable        = sparc_pmu_disable,
571         .read           = sparc_pmu_read,
572         .unthrottle     = sparc_pmu_unthrottle,
573 };
574
575 const struct pmu *hw_perf_event_init(struct perf_event *event)
576 {
577         int err = __hw_perf_event_init(event);
578
579         if (err)
580                 return ERR_PTR(err);
581         return &pmu;
582 }
583
584 void perf_event_print_debug(void)
585 {
586         unsigned long flags;
587         u64 pcr, pic;
588         int cpu;
589
590         if (!sparc_pmu)
591                 return;
592
593         local_irq_save(flags);
594
595         cpu = smp_processor_id();
596
597         pcr = pcr_ops->read();
598         read_pic(pic);
599
600         pr_info("\n");
601         pr_info("CPU#%d: PCR[%016llx] PIC[%016llx]\n",
602                 cpu, pcr, pic);
603
604         local_irq_restore(flags);
605 }
606
607 static int __kprobes perf_event_nmi_handler(struct notifier_block *self,
608                                               unsigned long cmd, void *__args)
609 {
610         struct die_args *args = __args;
611         struct perf_sample_data data;
612         struct cpu_hw_events *cpuc;
613         struct pt_regs *regs;
614         int idx;
615
616         if (!atomic_read(&active_events))
617                 return NOTIFY_DONE;
618
619         switch (cmd) {
620         case DIE_NMI:
621                 break;
622
623         default:
624                 return NOTIFY_DONE;
625         }
626
627         regs = args->regs;
628
629         data.addr = 0;
630
631         cpuc = &__get_cpu_var(cpu_hw_events);
632         for (idx = 0; idx < MAX_HWEVENTS; idx++) {
633                 struct perf_event *event = cpuc->events[idx];
634                 struct hw_perf_event *hwc;
635                 u64 val;
636
637                 if (!test_bit(idx, cpuc->active_mask))
638                         continue;
639                 hwc = &event->hw;
640                 val = sparc_perf_event_update(event, hwc, idx);
641                 if (val & (1ULL << 31))
642                         continue;
643
644                 data.period = event->hw.last_period;
645                 if (!sparc_perf_event_set_period(event, hwc, idx))
646                         continue;
647
648                 if (perf_event_overflow(event, 1, &data, regs))
649                         sparc_pmu_disable_event(hwc, idx);
650         }
651
652         return NOTIFY_STOP;
653 }
654
655 static __read_mostly struct notifier_block perf_event_nmi_notifier = {
656         .notifier_call          = perf_event_nmi_handler,
657 };
658
659 static bool __init supported_pmu(void)
660 {
661         if (!strcmp(sparc_pmu_type, "ultra3i")) {
662                 sparc_pmu = &ultra3i_pmu;
663                 return true;
664         }
665         if (!strcmp(sparc_pmu_type, "niagara2")) {
666                 sparc_pmu = &niagara2_pmu;
667                 return true;
668         }
669         return false;
670 }
671
672 void __init init_hw_perf_events(void)
673 {
674         pr_info("Performance events: ");
675
676         if (!supported_pmu()) {
677                 pr_cont("No support for PMU type '%s'\n", sparc_pmu_type);
678                 return;
679         }
680
681         pr_cont("Supported PMU type is '%s'\n", sparc_pmu_type);
682
683         /* All sparc64 PMUs currently have 2 events.  But this simple
684          * driver only supports one active event at a time.
685          */
686         perf_max_events = 1;
687
688         register_die_notifier(&perf_event_nmi_notifier);
689 }