1 /* Performance event support for sparc64.
3 * Copyright (C) 2009 David S. Miller <davem@davemloft.net>
5 * This code is based almost entirely upon the x86 perf event
8 * Copyright (C) 2008 Thomas Gleixner <tglx@linutronix.de>
9 * Copyright (C) 2008-2009 Red Hat, Inc., Ingo Molnar
10 * Copyright (C) 2009 Jaswinder Singh Rajput
11 * Copyright (C) 2009 Advanced Micro Devices, Inc., Robert Richter
12 * Copyright (C) 2008-2009 Red Hat, Inc., Peter Zijlstra <pzijlstr@redhat.com>
15 #include <linux/perf_event.h>
16 #include <linux/kprobes.h>
17 #include <linux/kernel.h>
18 #include <linux/kdebug.h>
19 #include <linux/mutex.h>
21 #include <asm/cpudata.h>
22 #include <asm/atomic.h>
26 /* Sparc64 chips have two performance counters, 32-bits each, with
27 * overflow interrupts generated on transition from 0xffffffff to 0.
28 * The counters are accessed in one go using a 64-bit register.
30 * Both counters are controlled using a single control register. The
31 * only way to stop all sampling is to clear all of the context (user,
32 * supervisor, hypervisor) sampling enable bits. But these bits apply
33 * to both counters, thus the two counters can't be enabled/disabled
36 * The control register has two event fields, one for each of the two
37 * counters. It's thus nearly impossible to have one counter going
38 * while keeping the other one stopped. Therefore it is possible to
39 * get overflow interrupts for counters not currently "in use" and
40 * that condition must be checked in the overflow interrupt handler.
42 * So we use a hack, in that we program inactive counters with the
43 * "sw_count0" and "sw_count1" events. These count how many times
44 * the instruction "sethi %hi(0xfc000), %g0" is executed. It's an
45 * unusual way to encode a NOP and therefore will not trigger in
49 #define MAX_HWEVENTS 2
50 #define MAX_PERIOD ((1UL << 32) - 1)
52 #define PIC_UPPER_INDEX 0
53 #define PIC_LOWER_INDEX 1
55 struct cpu_hw_events {
56 struct perf_event *events[MAX_HWEVENTS];
57 unsigned long used_mask[BITS_TO_LONGS(MAX_HWEVENTS)];
58 unsigned long active_mask[BITS_TO_LONGS(MAX_HWEVENTS)];
61 DEFINE_PER_CPU(struct cpu_hw_events, cpu_hw_events) = { .enabled = 1, };
63 struct perf_event_map {
67 #define PIC_UPPER 0x01
68 #define PIC_LOWER 0x02
71 #define C(x) PERF_COUNT_HW_CACHE_##x
73 #define CACHE_OP_UNSUPPORTED 0xfffe
74 #define CACHE_OP_NONSENSE 0xffff
76 typedef struct perf_event_map cache_map_t
77 [PERF_COUNT_HW_CACHE_MAX]
78 [PERF_COUNT_HW_CACHE_OP_MAX]
79 [PERF_COUNT_HW_CACHE_RESULT_MAX];
82 const struct perf_event_map *(*event_map)(int);
83 const cache_map_t *cache_map;
94 static const struct perf_event_map ultra3_perfmon_event_map[] = {
95 [PERF_COUNT_HW_CPU_CYCLES] = { 0x0000, PIC_UPPER | PIC_LOWER },
96 [PERF_COUNT_HW_INSTRUCTIONS] = { 0x0001, PIC_UPPER | PIC_LOWER },
97 [PERF_COUNT_HW_CACHE_REFERENCES] = { 0x0009, PIC_LOWER },
98 [PERF_COUNT_HW_CACHE_MISSES] = { 0x0009, PIC_UPPER },
101 static const struct perf_event_map *ultra3_event_map(int event_id)
103 return &ultra3_perfmon_event_map[event_id];
106 static const cache_map_t ultra3_cache_map = {
109 [C(RESULT_ACCESS)] = { 0x09, PIC_LOWER, },
110 [C(RESULT_MISS)] = { 0x09, PIC_UPPER, },
113 [C(RESULT_ACCESS)] = { 0x0a, PIC_LOWER },
114 [C(RESULT_MISS)] = { 0x0a, PIC_UPPER },
117 [C(RESULT_ACCESS)] = { CACHE_OP_UNSUPPORTED },
118 [C(RESULT_MISS)] = { CACHE_OP_UNSUPPORTED },
123 [C(RESULT_ACCESS)] = { 0x09, PIC_LOWER, },
124 [C(RESULT_MISS)] = { 0x09, PIC_UPPER, },
127 [ C(RESULT_ACCESS) ] = { CACHE_OP_NONSENSE },
128 [ C(RESULT_MISS) ] = { CACHE_OP_NONSENSE },
130 [ C(OP_PREFETCH) ] = {
131 [ C(RESULT_ACCESS) ] = { CACHE_OP_UNSUPPORTED },
132 [ C(RESULT_MISS) ] = { CACHE_OP_UNSUPPORTED },
137 [C(RESULT_ACCESS)] = { 0x0c, PIC_LOWER, },
138 [C(RESULT_MISS)] = { 0x0c, PIC_UPPER, },
141 [C(RESULT_ACCESS)] = { 0x0c, PIC_LOWER },
142 [C(RESULT_MISS)] = { 0x0c, PIC_UPPER },
145 [C(RESULT_ACCESS)] = { CACHE_OP_UNSUPPORTED },
146 [C(RESULT_MISS)] = { CACHE_OP_UNSUPPORTED },
151 [C(RESULT_ACCESS)] = { CACHE_OP_UNSUPPORTED },
152 [C(RESULT_MISS)] = { 0x12, PIC_UPPER, },
155 [ C(RESULT_ACCESS) ] = { CACHE_OP_UNSUPPORTED },
156 [ C(RESULT_MISS) ] = { CACHE_OP_UNSUPPORTED },
158 [ C(OP_PREFETCH) ] = {
159 [ C(RESULT_ACCESS) ] = { CACHE_OP_UNSUPPORTED },
160 [ C(RESULT_MISS) ] = { CACHE_OP_UNSUPPORTED },
165 [C(RESULT_ACCESS)] = { CACHE_OP_UNSUPPORTED },
166 [C(RESULT_MISS)] = { 0x11, PIC_UPPER, },
169 [ C(RESULT_ACCESS) ] = { CACHE_OP_UNSUPPORTED },
170 [ C(RESULT_MISS) ] = { CACHE_OP_UNSUPPORTED },
172 [ C(OP_PREFETCH) ] = {
173 [ C(RESULT_ACCESS) ] = { CACHE_OP_UNSUPPORTED },
174 [ C(RESULT_MISS) ] = { CACHE_OP_UNSUPPORTED },
179 [C(RESULT_ACCESS)] = { CACHE_OP_UNSUPPORTED },
180 [C(RESULT_MISS)] = { CACHE_OP_UNSUPPORTED },
183 [ C(RESULT_ACCESS) ] = { CACHE_OP_UNSUPPORTED },
184 [ C(RESULT_MISS) ] = { CACHE_OP_UNSUPPORTED },
186 [ C(OP_PREFETCH) ] = {
187 [ C(RESULT_ACCESS) ] = { CACHE_OP_UNSUPPORTED },
188 [ C(RESULT_MISS) ] = { CACHE_OP_UNSUPPORTED },
193 static const struct sparc_pmu ultra3_pmu = {
194 .event_map = ultra3_event_map,
195 .cache_map = &ultra3_cache_map,
196 .max_events = ARRAY_SIZE(ultra3_perfmon_event_map),
204 /* Niagara1 is very limited. The upper PIC is hard-locked to count
205 * only instructions, so it is free running which creates all kinds of
206 * problems. Some hardware designs make one wonder if the creastor
207 * even looked at how this stuff gets used by software.
209 static const struct perf_event_map niagara1_perfmon_event_map[] = {
210 [PERF_COUNT_HW_CPU_CYCLES] = { 0x00, PIC_UPPER },
211 [PERF_COUNT_HW_INSTRUCTIONS] = { 0x00, PIC_UPPER },
212 [PERF_COUNT_HW_CACHE_REFERENCES] = { 0, PIC_NONE },
213 [PERF_COUNT_HW_CACHE_MISSES] = { 0x03, PIC_LOWER },
216 static const struct perf_event_map *niagara1_event_map(int event_id)
218 return &niagara1_perfmon_event_map[event_id];
221 static const cache_map_t niagara1_cache_map = {
224 [C(RESULT_ACCESS)] = { CACHE_OP_UNSUPPORTED },
225 [C(RESULT_MISS)] = { 0x03, PIC_LOWER, },
228 [C(RESULT_ACCESS)] = { CACHE_OP_UNSUPPORTED },
229 [C(RESULT_MISS)] = { 0x03, PIC_LOWER, },
232 [C(RESULT_ACCESS)] = { CACHE_OP_UNSUPPORTED },
233 [C(RESULT_MISS)] = { CACHE_OP_UNSUPPORTED },
238 [C(RESULT_ACCESS)] = { 0x00, PIC_UPPER },
239 [C(RESULT_MISS)] = { 0x02, PIC_LOWER, },
242 [ C(RESULT_ACCESS) ] = { CACHE_OP_NONSENSE },
243 [ C(RESULT_MISS) ] = { CACHE_OP_NONSENSE },
245 [ C(OP_PREFETCH) ] = {
246 [ C(RESULT_ACCESS) ] = { CACHE_OP_UNSUPPORTED },
247 [ C(RESULT_MISS) ] = { CACHE_OP_UNSUPPORTED },
252 [C(RESULT_ACCESS)] = { CACHE_OP_UNSUPPORTED },
253 [C(RESULT_MISS)] = { 0x07, PIC_LOWER, },
256 [C(RESULT_ACCESS)] = { CACHE_OP_UNSUPPORTED },
257 [C(RESULT_MISS)] = { 0x07, PIC_LOWER, },
260 [C(RESULT_ACCESS)] = { CACHE_OP_UNSUPPORTED },
261 [C(RESULT_MISS)] = { CACHE_OP_UNSUPPORTED },
266 [C(RESULT_ACCESS)] = { CACHE_OP_UNSUPPORTED },
267 [C(RESULT_MISS)] = { 0x05, PIC_LOWER, },
270 [ C(RESULT_ACCESS) ] = { CACHE_OP_UNSUPPORTED },
271 [ C(RESULT_MISS) ] = { CACHE_OP_UNSUPPORTED },
273 [ C(OP_PREFETCH) ] = {
274 [ C(RESULT_ACCESS) ] = { CACHE_OP_UNSUPPORTED },
275 [ C(RESULT_MISS) ] = { CACHE_OP_UNSUPPORTED },
280 [C(RESULT_ACCESS)] = { CACHE_OP_UNSUPPORTED },
281 [C(RESULT_MISS)] = { 0x04, PIC_LOWER, },
284 [ C(RESULT_ACCESS) ] = { CACHE_OP_UNSUPPORTED },
285 [ C(RESULT_MISS) ] = { CACHE_OP_UNSUPPORTED },
287 [ C(OP_PREFETCH) ] = {
288 [ C(RESULT_ACCESS) ] = { CACHE_OP_UNSUPPORTED },
289 [ C(RESULT_MISS) ] = { CACHE_OP_UNSUPPORTED },
294 [C(RESULT_ACCESS)] = { CACHE_OP_UNSUPPORTED },
295 [C(RESULT_MISS)] = { CACHE_OP_UNSUPPORTED },
298 [ C(RESULT_ACCESS) ] = { CACHE_OP_UNSUPPORTED },
299 [ C(RESULT_MISS) ] = { CACHE_OP_UNSUPPORTED },
301 [ C(OP_PREFETCH) ] = {
302 [ C(RESULT_ACCESS) ] = { CACHE_OP_UNSUPPORTED },
303 [ C(RESULT_MISS) ] = { CACHE_OP_UNSUPPORTED },
308 static const struct sparc_pmu niagara1_pmu = {
309 .event_map = niagara1_event_map,
310 .cache_map = &niagara1_cache_map,
311 .max_events = ARRAY_SIZE(niagara1_perfmon_event_map),
319 static const struct perf_event_map niagara2_perfmon_event_map[] = {
320 [PERF_COUNT_HW_CPU_CYCLES] = { 0x02ff, PIC_UPPER | PIC_LOWER },
321 [PERF_COUNT_HW_INSTRUCTIONS] = { 0x02ff, PIC_UPPER | PIC_LOWER },
322 [PERF_COUNT_HW_CACHE_REFERENCES] = { 0x0208, PIC_UPPER | PIC_LOWER },
323 [PERF_COUNT_HW_CACHE_MISSES] = { 0x0302, PIC_UPPER | PIC_LOWER },
324 [PERF_COUNT_HW_BRANCH_INSTRUCTIONS] = { 0x0201, PIC_UPPER | PIC_LOWER },
325 [PERF_COUNT_HW_BRANCH_MISSES] = { 0x0202, PIC_UPPER | PIC_LOWER },
328 static const struct perf_event_map *niagara2_event_map(int event_id)
330 return &niagara2_perfmon_event_map[event_id];
333 static const cache_map_t niagara2_cache_map = {
336 [C(RESULT_ACCESS)] = { 0x0208, PIC_UPPER | PIC_LOWER, },
337 [C(RESULT_MISS)] = { 0x0302, PIC_UPPER | PIC_LOWER, },
340 [C(RESULT_ACCESS)] = { 0x0210, PIC_UPPER | PIC_LOWER, },
341 [C(RESULT_MISS)] = { 0x0302, PIC_UPPER | PIC_LOWER, },
344 [C(RESULT_ACCESS)] = { CACHE_OP_UNSUPPORTED },
345 [C(RESULT_MISS)] = { CACHE_OP_UNSUPPORTED },
350 [C(RESULT_ACCESS)] = { 0x02ff, PIC_UPPER | PIC_LOWER, },
351 [C(RESULT_MISS)] = { 0x0301, PIC_UPPER | PIC_LOWER, },
354 [ C(RESULT_ACCESS) ] = { CACHE_OP_NONSENSE },
355 [ C(RESULT_MISS) ] = { CACHE_OP_NONSENSE },
357 [ C(OP_PREFETCH) ] = {
358 [ C(RESULT_ACCESS) ] = { CACHE_OP_UNSUPPORTED },
359 [ C(RESULT_MISS) ] = { CACHE_OP_UNSUPPORTED },
364 [C(RESULT_ACCESS)] = { 0x0208, PIC_UPPER | PIC_LOWER, },
365 [C(RESULT_MISS)] = { 0x0330, PIC_UPPER | PIC_LOWER, },
368 [C(RESULT_ACCESS)] = { 0x0210, PIC_UPPER | PIC_LOWER, },
369 [C(RESULT_MISS)] = { 0x0320, PIC_UPPER | PIC_LOWER, },
372 [C(RESULT_ACCESS)] = { CACHE_OP_UNSUPPORTED },
373 [C(RESULT_MISS)] = { CACHE_OP_UNSUPPORTED },
378 [C(RESULT_ACCESS)] = { CACHE_OP_UNSUPPORTED },
379 [C(RESULT_MISS)] = { 0x0b08, PIC_UPPER | PIC_LOWER, },
382 [ C(RESULT_ACCESS) ] = { CACHE_OP_UNSUPPORTED },
383 [ C(RESULT_MISS) ] = { CACHE_OP_UNSUPPORTED },
385 [ C(OP_PREFETCH) ] = {
386 [ C(RESULT_ACCESS) ] = { CACHE_OP_UNSUPPORTED },
387 [ C(RESULT_MISS) ] = { CACHE_OP_UNSUPPORTED },
392 [C(RESULT_ACCESS)] = { CACHE_OP_UNSUPPORTED },
393 [C(RESULT_MISS)] = { 0xb04, PIC_UPPER | PIC_LOWER, },
396 [ C(RESULT_ACCESS) ] = { CACHE_OP_UNSUPPORTED },
397 [ C(RESULT_MISS) ] = { CACHE_OP_UNSUPPORTED },
399 [ C(OP_PREFETCH) ] = {
400 [ C(RESULT_ACCESS) ] = { CACHE_OP_UNSUPPORTED },
401 [ C(RESULT_MISS) ] = { CACHE_OP_UNSUPPORTED },
406 [C(RESULT_ACCESS)] = { CACHE_OP_UNSUPPORTED },
407 [C(RESULT_MISS)] = { CACHE_OP_UNSUPPORTED },
410 [ C(RESULT_ACCESS) ] = { CACHE_OP_UNSUPPORTED },
411 [ C(RESULT_MISS) ] = { CACHE_OP_UNSUPPORTED },
413 [ C(OP_PREFETCH) ] = {
414 [ C(RESULT_ACCESS) ] = { CACHE_OP_UNSUPPORTED },
415 [ C(RESULT_MISS) ] = { CACHE_OP_UNSUPPORTED },
420 static const struct sparc_pmu niagara2_pmu = {
421 .event_map = niagara2_event_map,
422 .cache_map = &niagara2_cache_map,
423 .max_events = ARRAY_SIZE(niagara2_perfmon_event_map),
433 static const struct sparc_pmu *sparc_pmu __read_mostly;
435 static u64 event_encoding(u64 event_id, int idx)
437 if (idx == PIC_UPPER_INDEX)
438 event_id <<= sparc_pmu->upper_shift;
440 event_id <<= sparc_pmu->lower_shift;
444 static u64 mask_for_index(int idx)
446 return event_encoding(sparc_pmu->event_mask, idx);
449 static u64 nop_for_index(int idx)
451 return event_encoding(idx == PIC_UPPER_INDEX ?
452 sparc_pmu->upper_nop :
453 sparc_pmu->lower_nop, idx);
456 static inline void sparc_pmu_enable_event(struct hw_perf_event *hwc,
459 u64 val, mask = mask_for_index(idx);
461 val = pcr_ops->read();
462 pcr_ops->write((val & ~mask) | hwc->config);
465 static inline void sparc_pmu_disable_event(struct hw_perf_event *hwc,
468 u64 mask = mask_for_index(idx);
469 u64 nop = nop_for_index(idx);
470 u64 val = pcr_ops->read();
472 pcr_ops->write((val & ~mask) | nop);
475 void hw_perf_enable(void)
477 struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events);
487 val = pcr_ops->read();
489 for (i = 0; i < MAX_HWEVENTS; i++) {
490 struct perf_event *cp = cpuc->events[i];
491 struct hw_perf_event *hwc;
496 val |= hwc->config_base;
502 void hw_perf_disable(void)
504 struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events);
512 val = pcr_ops->read();
513 val &= ~(PCR_UTRACE | PCR_STRACE |
514 sparc_pmu->hv_bit | sparc_pmu->irq_bit);
518 static u32 read_pmc(int idx)
523 if (idx == PIC_UPPER_INDEX)
526 return val & 0xffffffff;
529 static void write_pmc(int idx, u64 val)
531 u64 shift, mask, pic;
534 if (idx == PIC_UPPER_INDEX)
537 mask = ((u64) 0xffffffff) << shift;
546 static int sparc_perf_event_set_period(struct perf_event *event,
547 struct hw_perf_event *hwc, int idx)
549 s64 left = atomic64_read(&hwc->period_left);
550 s64 period = hwc->sample_period;
553 if (unlikely(left <= -period)) {
555 atomic64_set(&hwc->period_left, left);
556 hwc->last_period = period;
560 if (unlikely(left <= 0)) {
562 atomic64_set(&hwc->period_left, left);
563 hwc->last_period = period;
566 if (left > MAX_PERIOD)
569 atomic64_set(&hwc->prev_count, (u64)-left);
571 write_pmc(idx, (u64)(-left) & 0xffffffff);
573 perf_event_update_userpage(event);
578 static int sparc_pmu_enable(struct perf_event *event)
580 struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events);
581 struct hw_perf_event *hwc = &event->hw;
584 if (test_and_set_bit(idx, cpuc->used_mask))
587 sparc_pmu_disable_event(hwc, idx);
589 cpuc->events[idx] = event;
590 set_bit(idx, cpuc->active_mask);
592 sparc_perf_event_set_period(event, hwc, idx);
593 sparc_pmu_enable_event(hwc, idx);
594 perf_event_update_userpage(event);
598 static u64 sparc_perf_event_update(struct perf_event *event,
599 struct hw_perf_event *hwc, int idx)
602 u64 prev_raw_count, new_raw_count;
606 prev_raw_count = atomic64_read(&hwc->prev_count);
607 new_raw_count = read_pmc(idx);
609 if (atomic64_cmpxchg(&hwc->prev_count, prev_raw_count,
610 new_raw_count) != prev_raw_count)
613 delta = (new_raw_count << shift) - (prev_raw_count << shift);
616 atomic64_add(delta, &event->count);
617 atomic64_sub(delta, &hwc->period_left);
619 return new_raw_count;
622 static void sparc_pmu_disable(struct perf_event *event)
624 struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events);
625 struct hw_perf_event *hwc = &event->hw;
628 clear_bit(idx, cpuc->active_mask);
629 sparc_pmu_disable_event(hwc, idx);
633 sparc_perf_event_update(event, hwc, idx);
634 cpuc->events[idx] = NULL;
635 clear_bit(idx, cpuc->used_mask);
637 perf_event_update_userpage(event);
640 static void sparc_pmu_read(struct perf_event *event)
642 struct hw_perf_event *hwc = &event->hw;
643 sparc_perf_event_update(event, hwc, hwc->idx);
646 static void sparc_pmu_unthrottle(struct perf_event *event)
648 struct hw_perf_event *hwc = &event->hw;
649 sparc_pmu_enable_event(hwc, hwc->idx);
652 static atomic_t active_events = ATOMIC_INIT(0);
653 static DEFINE_MUTEX(pmc_grab_mutex);
655 void perf_event_grab_pmc(void)
657 if (atomic_inc_not_zero(&active_events))
660 mutex_lock(&pmc_grab_mutex);
661 if (atomic_read(&active_events) == 0) {
662 if (atomic_read(&nmi_active) > 0) {
663 on_each_cpu(stop_nmi_watchdog, NULL, 1);
664 BUG_ON(atomic_read(&nmi_active) != 0);
666 atomic_inc(&active_events);
668 mutex_unlock(&pmc_grab_mutex);
671 void perf_event_release_pmc(void)
673 if (atomic_dec_and_mutex_lock(&active_events, &pmc_grab_mutex)) {
674 if (atomic_read(&nmi_active) == 0)
675 on_each_cpu(start_nmi_watchdog, NULL, 1);
676 mutex_unlock(&pmc_grab_mutex);
680 static const struct perf_event_map *sparc_map_cache_event(u64 config)
682 unsigned int cache_type, cache_op, cache_result;
683 const struct perf_event_map *pmap;
685 if (!sparc_pmu->cache_map)
686 return ERR_PTR(-ENOENT);
688 cache_type = (config >> 0) & 0xff;
689 if (cache_type >= PERF_COUNT_HW_CACHE_MAX)
690 return ERR_PTR(-EINVAL);
692 cache_op = (config >> 8) & 0xff;
693 if (cache_op >= PERF_COUNT_HW_CACHE_OP_MAX)
694 return ERR_PTR(-EINVAL);
696 cache_result = (config >> 16) & 0xff;
697 if (cache_result >= PERF_COUNT_HW_CACHE_RESULT_MAX)
698 return ERR_PTR(-EINVAL);
700 pmap = &((*sparc_pmu->cache_map)[cache_type][cache_op][cache_result]);
702 if (pmap->encoding == CACHE_OP_UNSUPPORTED)
703 return ERR_PTR(-ENOENT);
705 if (pmap->encoding == CACHE_OP_NONSENSE)
706 return ERR_PTR(-EINVAL);
711 static void hw_perf_event_destroy(struct perf_event *event)
713 perf_event_release_pmc();
716 static int check_excludes(struct perf_event **evts, int n_prev, int n_new)
718 int eu = 0, ek = 0, eh = 0;
719 struct perf_event *event;
727 for (i = 0; i < n; i++) {
730 eu = event->attr.exclude_user;
731 ek = event->attr.exclude_kernel;
732 eh = event->attr.exclude_hv;
734 } else if (event->attr.exclude_user != eu ||
735 event->attr.exclude_kernel != ek ||
736 event->attr.exclude_hv != eh) {
744 static int collect_events(struct perf_event *group, int max_count,
745 struct perf_event *evts[], u64 *events)
747 struct perf_event *event;
750 if (!is_software_event(group)) {
754 events[n++] = group->hw.config;
756 list_for_each_entry(event, &group->sibling_list, group_entry) {
757 if (!is_software_event(event) &&
758 event->state != PERF_EVENT_STATE_OFF) {
762 events[n++] = event->hw.config;
768 static int __hw_perf_event_init(struct perf_event *event)
770 struct perf_event_attr *attr = &event->attr;
771 struct perf_event *evts[MAX_HWEVENTS];
772 struct hw_perf_event *hwc = &event->hw;
773 const struct perf_event_map *pmap;
774 u64 enc, events[MAX_HWEVENTS];
777 if (atomic_read(&nmi_active) < 0)
780 if (attr->type == PERF_TYPE_HARDWARE) {
781 if (attr->config >= sparc_pmu->max_events)
783 pmap = sparc_pmu->event_map(attr->config);
784 } else if (attr->type == PERF_TYPE_HW_CACHE) {
785 pmap = sparc_map_cache_event(attr->config);
787 return PTR_ERR(pmap);
791 /* We save the enable bits in the config_base. So to
792 * turn off sampling just write 'config', and to enable
793 * things write 'config | config_base'.
795 hwc->config_base = sparc_pmu->irq_bit;
796 if (!attr->exclude_user)
797 hwc->config_base |= PCR_UTRACE;
798 if (!attr->exclude_kernel)
799 hwc->config_base |= PCR_STRACE;
800 if (!attr->exclude_hv)
801 hwc->config_base |= sparc_pmu->hv_bit;
803 enc = pmap->encoding;
806 if (event->group_leader != event) {
807 n = collect_events(event->group_leader,
816 if (check_excludes(evts, n, 1))
819 /* Try to do all error checking before this point, as unwinding
820 * state after grabbing the PMC is difficult.
822 perf_event_grab_pmc();
823 event->destroy = hw_perf_event_destroy;
825 if (!hwc->sample_period) {
826 hwc->sample_period = MAX_PERIOD;
827 hwc->last_period = hwc->sample_period;
828 atomic64_set(&hwc->period_left, hwc->sample_period);
831 if (pmap->pic_mask & PIC_UPPER) {
832 hwc->idx = PIC_UPPER_INDEX;
833 enc <<= sparc_pmu->upper_shift;
835 hwc->idx = PIC_LOWER_INDEX;
836 enc <<= sparc_pmu->lower_shift;
843 static const struct pmu pmu = {
844 .enable = sparc_pmu_enable,
845 .disable = sparc_pmu_disable,
846 .read = sparc_pmu_read,
847 .unthrottle = sparc_pmu_unthrottle,
850 const struct pmu *hw_perf_event_init(struct perf_event *event)
852 int err = __hw_perf_event_init(event);
859 void perf_event_print_debug(void)
868 local_irq_save(flags);
870 cpu = smp_processor_id();
872 pcr = pcr_ops->read();
876 pr_info("CPU#%d: PCR[%016llx] PIC[%016llx]\n",
879 local_irq_restore(flags);
882 static int __kprobes perf_event_nmi_handler(struct notifier_block *self,
883 unsigned long cmd, void *__args)
885 struct die_args *args = __args;
886 struct perf_sample_data data;
887 struct cpu_hw_events *cpuc;
888 struct pt_regs *regs;
891 if (!atomic_read(&active_events))
906 cpuc = &__get_cpu_var(cpu_hw_events);
907 for (idx = 0; idx < MAX_HWEVENTS; idx++) {
908 struct perf_event *event = cpuc->events[idx];
909 struct hw_perf_event *hwc;
912 if (!test_bit(idx, cpuc->active_mask))
915 val = sparc_perf_event_update(event, hwc, idx);
916 if (val & (1ULL << 31))
919 data.period = event->hw.last_period;
920 if (!sparc_perf_event_set_period(event, hwc, idx))
923 if (perf_event_overflow(event, 1, &data, regs))
924 sparc_pmu_disable_event(hwc, idx);
930 static __read_mostly struct notifier_block perf_event_nmi_notifier = {
931 .notifier_call = perf_event_nmi_handler,
934 static bool __init supported_pmu(void)
936 if (!strcmp(sparc_pmu_type, "ultra3") ||
937 !strcmp(sparc_pmu_type, "ultra3+") ||
938 !strcmp(sparc_pmu_type, "ultra3i") ||
939 !strcmp(sparc_pmu_type, "ultra4+")) {
940 sparc_pmu = &ultra3_pmu;
943 if (!strcmp(sparc_pmu_type, "niagara")) {
944 sparc_pmu = &niagara1_pmu;
947 if (!strcmp(sparc_pmu_type, "niagara2")) {
948 sparc_pmu = &niagara2_pmu;
954 void __init init_hw_perf_events(void)
956 pr_info("Performance events: ");
958 if (!supported_pmu()) {
959 pr_cont("No support for PMU type '%s'\n", sparc_pmu_type);
963 pr_cont("Supported PMU type is '%s'\n", sparc_pmu_type);
965 /* All sparc64 PMUs currently have 2 events. But this simple
966 * driver only supports one active event at a time.
970 register_die_notifier(&perf_event_nmi_notifier);