4 * ARM performance counter support.
6 * Copyright (C) 2009 picoChip Designs, Ltd., Jamie Iles
8 * ARMv7 support: Jean Pihet <jpihet@mvista.com>
9 * 2010 (c) MontaVista Software, LLC.
11 * This code is based on the sparc64 perf event code, which is in turn based
12 * on the x86 code. Callchain code is based on the ARM OProfile backtrace
15 #define pr_fmt(fmt) "hw perfevents: " fmt
17 #include <linux/interrupt.h>
18 #include <linux/kernel.h>
19 #include <linux/perf_event.h>
20 #include <linux/platform_device.h>
21 #include <linux/spinlock.h>
22 #include <linux/uaccess.h>
24 #include <asm/cputype.h>
26 #include <asm/irq_regs.h>
28 #include <asm/stacktrace.h>
30 static struct platform_device *pmu_device;
33 * Hardware lock to serialize accesses to PMU registers. Needed for the
34 * read/modify/write sequences.
36 DEFINE_SPINLOCK(pmu_lock);
39 * ARMv6 supports a maximum of 3 events, starting from index 1. If we add
40 * another platform that supports more, we need to increase this to be the
41 * largest of all platforms.
43 * ARMv7 supports up to 32 events:
44 * cycle counter CCNT + 31 events counters CNT0..30.
45 * Cortex-A8 has 1+4 counters, Cortex-A9 has 1+6 counters.
47 #define ARMPMU_MAX_HWEVENTS 33
49 /* The events for a given CPU. */
50 struct cpu_hw_events {
52 * The events that are active on the CPU for the given index. Index 0
55 struct perf_event *events[ARMPMU_MAX_HWEVENTS];
58 * A 1 bit for an index indicates that the counter is being used for
59 * an event. A 0 means that the counter can be used.
61 unsigned long used_mask[BITS_TO_LONGS(ARMPMU_MAX_HWEVENTS)];
64 * A 1 bit for an index indicates that the counter is actively being
67 unsigned long active_mask[BITS_TO_LONGS(ARMPMU_MAX_HWEVENTS)];
69 DEFINE_PER_CPU(struct cpu_hw_events, cpu_hw_events);
73 irqreturn_t (*handle_irq)(int irq_num, void *dev);
74 void (*enable)(struct hw_perf_event *evt, int idx);
75 void (*disable)(struct hw_perf_event *evt, int idx);
76 int (*event_map)(int evt);
77 u64 (*raw_event)(u64);
78 int (*get_event_idx)(struct cpu_hw_events *cpuc,
79 struct hw_perf_event *hwc);
80 u32 (*read_counter)(int idx);
81 void (*write_counter)(int idx, u32 val);
88 /* Set at runtime when we know what CPU type we are. */
89 static const struct arm_pmu *armpmu;
91 #define HW_OP_UNSUPPORTED 0xFFFF
94 PERF_COUNT_HW_CACHE_##_x
96 #define CACHE_OP_UNSUPPORTED 0xFFFF
98 static unsigned armpmu_perf_cache_map[PERF_COUNT_HW_CACHE_MAX]
99 [PERF_COUNT_HW_CACHE_OP_MAX]
100 [PERF_COUNT_HW_CACHE_RESULT_MAX];
103 armpmu_map_cache_event(u64 config)
105 unsigned int cache_type, cache_op, cache_result, ret;
107 cache_type = (config >> 0) & 0xff;
108 if (cache_type >= PERF_COUNT_HW_CACHE_MAX)
111 cache_op = (config >> 8) & 0xff;
112 if (cache_op >= PERF_COUNT_HW_CACHE_OP_MAX)
115 cache_result = (config >> 16) & 0xff;
116 if (cache_result >= PERF_COUNT_HW_CACHE_RESULT_MAX)
119 ret = (int)armpmu_perf_cache_map[cache_type][cache_op][cache_result];
121 if (ret == CACHE_OP_UNSUPPORTED)
128 armpmu_event_set_period(struct perf_event *event,
129 struct hw_perf_event *hwc,
132 s64 left = atomic64_read(&hwc->period_left);
133 s64 period = hwc->sample_period;
136 if (unlikely(left <= -period)) {
138 atomic64_set(&hwc->period_left, left);
139 hwc->last_period = period;
143 if (unlikely(left <= 0)) {
145 atomic64_set(&hwc->period_left, left);
146 hwc->last_period = period;
150 if (left > (s64)armpmu->max_period)
151 left = armpmu->max_period;
153 atomic64_set(&hwc->prev_count, (u64)-left);
155 armpmu->write_counter(idx, (u64)(-left) & 0xffffffff);
157 perf_event_update_userpage(event);
163 armpmu_event_update(struct perf_event *event,
164 struct hw_perf_event *hwc,
168 s64 prev_raw_count, new_raw_count;
172 prev_raw_count = atomic64_read(&hwc->prev_count);
173 new_raw_count = armpmu->read_counter(idx);
175 if (atomic64_cmpxchg(&hwc->prev_count, prev_raw_count,
176 new_raw_count) != prev_raw_count)
179 delta = (new_raw_count << shift) - (prev_raw_count << shift);
182 atomic64_add(delta, &event->count);
183 atomic64_sub(delta, &hwc->period_left);
185 return new_raw_count;
189 armpmu_disable(struct perf_event *event)
191 struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events);
192 struct hw_perf_event *hwc = &event->hw;
197 clear_bit(idx, cpuc->active_mask);
198 armpmu->disable(hwc, idx);
202 armpmu_event_update(event, hwc, idx);
203 cpuc->events[idx] = NULL;
204 clear_bit(idx, cpuc->used_mask);
206 perf_event_update_userpage(event);
210 armpmu_read(struct perf_event *event)
212 struct hw_perf_event *hwc = &event->hw;
214 /* Don't read disabled counters! */
218 armpmu_event_update(event, hwc, hwc->idx);
222 armpmu_unthrottle(struct perf_event *event)
224 struct hw_perf_event *hwc = &event->hw;
227 * Set the period again. Some counters can't be stopped, so when we
228 * were throttled we simply disabled the IRQ source and the counter
229 * may have been left counting. If we don't do this step then we may
230 * get an interrupt too soon or *way* too late if the overflow has
231 * happened since disabling.
233 armpmu_event_set_period(event, hwc, hwc->idx);
234 armpmu->enable(hwc, hwc->idx);
238 armpmu_enable(struct perf_event *event)
240 struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events);
241 struct hw_perf_event *hwc = &event->hw;
245 /* If we don't have a space for the counter then finish early. */
246 idx = armpmu->get_event_idx(cpuc, hwc);
253 * If there is an event in the counter we are going to use then make
254 * sure it is disabled.
257 armpmu->disable(hwc, idx);
258 cpuc->events[idx] = event;
259 set_bit(idx, cpuc->active_mask);
261 /* Set the period for the event. */
262 armpmu_event_set_period(event, hwc, idx);
264 /* Enable the event. */
265 armpmu->enable(hwc, idx);
267 /* Propagate our changes to the userspace mapping. */
268 perf_event_update_userpage(event);
274 static struct pmu pmu = {
275 .enable = armpmu_enable,
276 .disable = armpmu_disable,
277 .unthrottle = armpmu_unthrottle,
282 validate_event(struct cpu_hw_events *cpuc,
283 struct perf_event *event)
285 struct hw_perf_event fake_event = event->hw;
287 if (event->pmu && event->pmu != &pmu)
290 return armpmu->get_event_idx(cpuc, &fake_event) >= 0;
294 validate_group(struct perf_event *event)
296 struct perf_event *sibling, *leader = event->group_leader;
297 struct cpu_hw_events fake_pmu;
299 memset(&fake_pmu, 0, sizeof(fake_pmu));
301 if (!validate_event(&fake_pmu, leader))
304 list_for_each_entry(sibling, &leader->sibling_list, group_entry) {
305 if (!validate_event(&fake_pmu, sibling))
309 if (!validate_event(&fake_pmu, event))
316 armpmu_reserve_hardware(void)
318 int i, err = -ENODEV, irq;
320 pmu_device = reserve_pmu(ARM_PMU_DEVICE_CPU);
321 if (IS_ERR(pmu_device)) {
322 pr_warning("unable to reserve pmu\n");
323 return PTR_ERR(pmu_device);
326 init_pmu(ARM_PMU_DEVICE_CPU);
328 if (pmu_device->num_resources < 1) {
329 pr_err("no irqs for PMUs defined\n");
333 for (i = 0; i < pmu_device->num_resources; ++i) {
334 irq = platform_get_irq(pmu_device, i);
338 err = request_irq(irq, armpmu->handle_irq,
339 IRQF_DISABLED | IRQF_NOBALANCING,
342 pr_warning("unable to request IRQ%d for ARM perf "
349 for (i = i - 1; i >= 0; --i) {
350 irq = platform_get_irq(pmu_device, i);
354 release_pmu(pmu_device);
362 armpmu_release_hardware(void)
366 for (i = pmu_device->num_resources - 1; i >= 0; --i) {
367 irq = platform_get_irq(pmu_device, i);
373 release_pmu(pmu_device);
377 static atomic_t active_events = ATOMIC_INIT(0);
378 static DEFINE_MUTEX(pmu_reserve_mutex);
381 hw_perf_event_destroy(struct perf_event *event)
383 if (atomic_dec_and_mutex_lock(&active_events, &pmu_reserve_mutex)) {
384 armpmu_release_hardware();
385 mutex_unlock(&pmu_reserve_mutex);
390 __hw_perf_event_init(struct perf_event *event)
392 struct hw_perf_event *hwc = &event->hw;
395 /* Decode the generic type into an ARM event identifier. */
396 if (PERF_TYPE_HARDWARE == event->attr.type) {
397 mapping = armpmu->event_map(event->attr.config);
398 } else if (PERF_TYPE_HW_CACHE == event->attr.type) {
399 mapping = armpmu_map_cache_event(event->attr.config);
400 } else if (PERF_TYPE_RAW == event->attr.type) {
401 mapping = armpmu->raw_event(event->attr.config);
403 pr_debug("event type %x not supported\n", event->attr.type);
408 pr_debug("event %x:%llx not supported\n", event->attr.type,
414 * Check whether we need to exclude the counter from certain modes.
415 * The ARM performance counters are on all of the time so if someone
416 * has asked us for some excludes then we have to fail.
418 if (event->attr.exclude_kernel || event->attr.exclude_user ||
419 event->attr.exclude_hv || event->attr.exclude_idle) {
420 pr_debug("ARM performance counters do not support "
426 * We don't assign an index until we actually place the event onto
427 * hardware. Use -1 to signify that we haven't decided where to put it
428 * yet. For SMP systems, each core has it's own PMU so we can't do any
429 * clever allocation or constraints checking at this point.
434 * Store the event encoding into the config_base field. config and
435 * event_base are unused as the only 2 things we need to know are
436 * the event mapping and the counter to use. The counter to use is
437 * also the indx and the config_base is the event type.
439 hwc->config_base = (unsigned long)mapping;
443 if (!hwc->sample_period) {
444 hwc->sample_period = armpmu->max_period;
445 hwc->last_period = hwc->sample_period;
446 atomic64_set(&hwc->period_left, hwc->sample_period);
450 if (event->group_leader != event) {
451 err = validate_group(event);
460 hw_perf_event_init(struct perf_event *event)
465 return ERR_PTR(-ENODEV);
467 event->destroy = hw_perf_event_destroy;
469 if (!atomic_inc_not_zero(&active_events)) {
470 if (atomic_read(&active_events) > perf_max_events) {
471 atomic_dec(&active_events);
472 return ERR_PTR(-ENOSPC);
475 mutex_lock(&pmu_reserve_mutex);
476 if (atomic_read(&active_events) == 0) {
477 err = armpmu_reserve_hardware();
481 atomic_inc(&active_events);
482 mutex_unlock(&pmu_reserve_mutex);
488 err = __hw_perf_event_init(event);
490 hw_perf_event_destroy(event);
492 return err ? ERR_PTR(err) : &pmu;
498 /* Enable all of the perf events on hardware. */
500 struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events);
505 for (idx = 0; idx <= armpmu->num_events; ++idx) {
506 struct perf_event *event = cpuc->events[idx];
511 armpmu->enable(&event->hw, idx);
518 hw_perf_disable(void)
525 * ARMv6 Performance counter handling code.
527 * ARMv6 has 2 configurable performance counters and a single cycle counter.
528 * They all share a single reset bit but can be written to zero so we can use
531 * The counters can't be individually enabled or disabled so when we remove
532 * one event and replace it with another we could get spurious counts from the
533 * wrong event. However, we can take advantage of the fact that the
534 * performance counters can export events to the event bus, and the event bus
535 * itself can be monitored. This requires that we *don't* export the events to
536 * the event bus. The procedure for disabling a configurable counter is:
537 * - change the counter to count the ETMEXTOUT[0] signal (0x20). This
538 * effectively stops the counter from counting.
539 * - disable the counter's interrupt generation (each counter has it's
540 * own interrupt enable bit).
541 * Once stopped, the counter value can be written as 0 to reset.
543 * To enable a counter:
544 * - enable the counter's interrupt generation.
545 * - set the new event type.
547 * Note: the dedicated cycle counter only counts cycles and can't be
548 * enabled/disabled independently of the others. When we want to disable the
549 * cycle counter, we have to just disable the interrupt reporting and start
550 * ignoring that counter. When re-enabling, we have to reset the value and
551 * enable the interrupt.
554 enum armv6_perf_types {
555 ARMV6_PERFCTR_ICACHE_MISS = 0x0,
556 ARMV6_PERFCTR_IBUF_STALL = 0x1,
557 ARMV6_PERFCTR_DDEP_STALL = 0x2,
558 ARMV6_PERFCTR_ITLB_MISS = 0x3,
559 ARMV6_PERFCTR_DTLB_MISS = 0x4,
560 ARMV6_PERFCTR_BR_EXEC = 0x5,
561 ARMV6_PERFCTR_BR_MISPREDICT = 0x6,
562 ARMV6_PERFCTR_INSTR_EXEC = 0x7,
563 ARMV6_PERFCTR_DCACHE_HIT = 0x9,
564 ARMV6_PERFCTR_DCACHE_ACCESS = 0xA,
565 ARMV6_PERFCTR_DCACHE_MISS = 0xB,
566 ARMV6_PERFCTR_DCACHE_WBACK = 0xC,
567 ARMV6_PERFCTR_SW_PC_CHANGE = 0xD,
568 ARMV6_PERFCTR_MAIN_TLB_MISS = 0xF,
569 ARMV6_PERFCTR_EXPL_D_ACCESS = 0x10,
570 ARMV6_PERFCTR_LSU_FULL_STALL = 0x11,
571 ARMV6_PERFCTR_WBUF_DRAINED = 0x12,
572 ARMV6_PERFCTR_CPU_CYCLES = 0xFF,
573 ARMV6_PERFCTR_NOP = 0x20,
576 enum armv6_counters {
577 ARMV6_CYCLE_COUNTER = 1,
583 * The hardware events that we support. We do support cache operations but
584 * we have harvard caches and no way to combine instruction and data
585 * accesses/misses in hardware.
587 static const unsigned armv6_perf_map[PERF_COUNT_HW_MAX] = {
588 [PERF_COUNT_HW_CPU_CYCLES] = ARMV6_PERFCTR_CPU_CYCLES,
589 [PERF_COUNT_HW_INSTRUCTIONS] = ARMV6_PERFCTR_INSTR_EXEC,
590 [PERF_COUNT_HW_CACHE_REFERENCES] = HW_OP_UNSUPPORTED,
591 [PERF_COUNT_HW_CACHE_MISSES] = HW_OP_UNSUPPORTED,
592 [PERF_COUNT_HW_BRANCH_INSTRUCTIONS] = ARMV6_PERFCTR_BR_EXEC,
593 [PERF_COUNT_HW_BRANCH_MISSES] = ARMV6_PERFCTR_BR_MISPREDICT,
594 [PERF_COUNT_HW_BUS_CYCLES] = HW_OP_UNSUPPORTED,
597 static const unsigned armv6_perf_cache_map[PERF_COUNT_HW_CACHE_MAX]
598 [PERF_COUNT_HW_CACHE_OP_MAX]
599 [PERF_COUNT_HW_CACHE_RESULT_MAX] = {
602 * The performance counters don't differentiate between read
603 * and write accesses/misses so this isn't strictly correct,
604 * but it's the best we can do. Writes and reads get
608 [C(RESULT_ACCESS)] = ARMV6_PERFCTR_DCACHE_ACCESS,
609 [C(RESULT_MISS)] = ARMV6_PERFCTR_DCACHE_MISS,
612 [C(RESULT_ACCESS)] = ARMV6_PERFCTR_DCACHE_ACCESS,
613 [C(RESULT_MISS)] = ARMV6_PERFCTR_DCACHE_MISS,
616 [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED,
617 [C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED,
622 [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED,
623 [C(RESULT_MISS)] = ARMV6_PERFCTR_ICACHE_MISS,
626 [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED,
627 [C(RESULT_MISS)] = ARMV6_PERFCTR_ICACHE_MISS,
630 [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED,
631 [C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED,
636 [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED,
637 [C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED,
640 [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED,
641 [C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED,
644 [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED,
645 [C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED,
650 * The ARM performance counters can count micro DTLB misses,
651 * micro ITLB misses and main TLB misses. There isn't an event
652 * for TLB misses, so use the micro misses here and if users
653 * want the main TLB misses they can use a raw counter.
656 [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED,
657 [C(RESULT_MISS)] = ARMV6_PERFCTR_DTLB_MISS,
660 [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED,
661 [C(RESULT_MISS)] = ARMV6_PERFCTR_DTLB_MISS,
664 [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED,
665 [C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED,
670 [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED,
671 [C(RESULT_MISS)] = ARMV6_PERFCTR_ITLB_MISS,
674 [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED,
675 [C(RESULT_MISS)] = ARMV6_PERFCTR_ITLB_MISS,
678 [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED,
679 [C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED,
684 [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED,
685 [C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED,
688 [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED,
689 [C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED,
692 [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED,
693 [C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED,
698 enum armv6mpcore_perf_types {
699 ARMV6MPCORE_PERFCTR_ICACHE_MISS = 0x0,
700 ARMV6MPCORE_PERFCTR_IBUF_STALL = 0x1,
701 ARMV6MPCORE_PERFCTR_DDEP_STALL = 0x2,
702 ARMV6MPCORE_PERFCTR_ITLB_MISS = 0x3,
703 ARMV6MPCORE_PERFCTR_DTLB_MISS = 0x4,
704 ARMV6MPCORE_PERFCTR_BR_EXEC = 0x5,
705 ARMV6MPCORE_PERFCTR_BR_NOTPREDICT = 0x6,
706 ARMV6MPCORE_PERFCTR_BR_MISPREDICT = 0x7,
707 ARMV6MPCORE_PERFCTR_INSTR_EXEC = 0x8,
708 ARMV6MPCORE_PERFCTR_DCACHE_RDACCESS = 0xA,
709 ARMV6MPCORE_PERFCTR_DCACHE_RDMISS = 0xB,
710 ARMV6MPCORE_PERFCTR_DCACHE_WRACCESS = 0xC,
711 ARMV6MPCORE_PERFCTR_DCACHE_WRMISS = 0xD,
712 ARMV6MPCORE_PERFCTR_DCACHE_EVICTION = 0xE,
713 ARMV6MPCORE_PERFCTR_SW_PC_CHANGE = 0xF,
714 ARMV6MPCORE_PERFCTR_MAIN_TLB_MISS = 0x10,
715 ARMV6MPCORE_PERFCTR_EXPL_MEM_ACCESS = 0x11,
716 ARMV6MPCORE_PERFCTR_LSU_FULL_STALL = 0x12,
717 ARMV6MPCORE_PERFCTR_WBUF_DRAINED = 0x13,
718 ARMV6MPCORE_PERFCTR_CPU_CYCLES = 0xFF,
722 * The hardware events that we support. We do support cache operations but
723 * we have harvard caches and no way to combine instruction and data
724 * accesses/misses in hardware.
726 static const unsigned armv6mpcore_perf_map[PERF_COUNT_HW_MAX] = {
727 [PERF_COUNT_HW_CPU_CYCLES] = ARMV6MPCORE_PERFCTR_CPU_CYCLES,
728 [PERF_COUNT_HW_INSTRUCTIONS] = ARMV6MPCORE_PERFCTR_INSTR_EXEC,
729 [PERF_COUNT_HW_CACHE_REFERENCES] = HW_OP_UNSUPPORTED,
730 [PERF_COUNT_HW_CACHE_MISSES] = HW_OP_UNSUPPORTED,
731 [PERF_COUNT_HW_BRANCH_INSTRUCTIONS] = ARMV6MPCORE_PERFCTR_BR_EXEC,
732 [PERF_COUNT_HW_BRANCH_MISSES] = ARMV6MPCORE_PERFCTR_BR_MISPREDICT,
733 [PERF_COUNT_HW_BUS_CYCLES] = HW_OP_UNSUPPORTED,
736 static const unsigned armv6mpcore_perf_cache_map[PERF_COUNT_HW_CACHE_MAX]
737 [PERF_COUNT_HW_CACHE_OP_MAX]
738 [PERF_COUNT_HW_CACHE_RESULT_MAX] = {
742 ARMV6MPCORE_PERFCTR_DCACHE_RDACCESS,
744 ARMV6MPCORE_PERFCTR_DCACHE_RDMISS,
748 ARMV6MPCORE_PERFCTR_DCACHE_WRACCESS,
750 ARMV6MPCORE_PERFCTR_DCACHE_WRMISS,
753 [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED,
754 [C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED,
759 [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED,
760 [C(RESULT_MISS)] = ARMV6MPCORE_PERFCTR_ICACHE_MISS,
763 [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED,
764 [C(RESULT_MISS)] = ARMV6MPCORE_PERFCTR_ICACHE_MISS,
767 [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED,
768 [C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED,
773 [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED,
774 [C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED,
777 [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED,
778 [C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED,
781 [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED,
782 [C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED,
787 * The ARM performance counters can count micro DTLB misses,
788 * micro ITLB misses and main TLB misses. There isn't an event
789 * for TLB misses, so use the micro misses here and if users
790 * want the main TLB misses they can use a raw counter.
793 [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED,
794 [C(RESULT_MISS)] = ARMV6MPCORE_PERFCTR_DTLB_MISS,
797 [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED,
798 [C(RESULT_MISS)] = ARMV6MPCORE_PERFCTR_DTLB_MISS,
801 [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED,
802 [C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED,
807 [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED,
808 [C(RESULT_MISS)] = ARMV6MPCORE_PERFCTR_ITLB_MISS,
811 [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED,
812 [C(RESULT_MISS)] = ARMV6MPCORE_PERFCTR_ITLB_MISS,
815 [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED,
816 [C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED,
821 [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED,
822 [C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED,
825 [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED,
826 [C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED,
829 [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED,
830 [C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED,
835 static inline unsigned long
836 armv6_pmcr_read(void)
839 asm volatile("mrc p15, 0, %0, c15, c12, 0" : "=r"(val));
844 armv6_pmcr_write(unsigned long val)
846 asm volatile("mcr p15, 0, %0, c15, c12, 0" : : "r"(val));
849 #define ARMV6_PMCR_ENABLE (1 << 0)
850 #define ARMV6_PMCR_CTR01_RESET (1 << 1)
851 #define ARMV6_PMCR_CCOUNT_RESET (1 << 2)
852 #define ARMV6_PMCR_CCOUNT_DIV (1 << 3)
853 #define ARMV6_PMCR_COUNT0_IEN (1 << 4)
854 #define ARMV6_PMCR_COUNT1_IEN (1 << 5)
855 #define ARMV6_PMCR_CCOUNT_IEN (1 << 6)
856 #define ARMV6_PMCR_COUNT0_OVERFLOW (1 << 8)
857 #define ARMV6_PMCR_COUNT1_OVERFLOW (1 << 9)
858 #define ARMV6_PMCR_CCOUNT_OVERFLOW (1 << 10)
859 #define ARMV6_PMCR_EVT_COUNT0_SHIFT 20
860 #define ARMV6_PMCR_EVT_COUNT0_MASK (0xFF << ARMV6_PMCR_EVT_COUNT0_SHIFT)
861 #define ARMV6_PMCR_EVT_COUNT1_SHIFT 12
862 #define ARMV6_PMCR_EVT_COUNT1_MASK (0xFF << ARMV6_PMCR_EVT_COUNT1_SHIFT)
864 #define ARMV6_PMCR_OVERFLOWED_MASK \
865 (ARMV6_PMCR_COUNT0_OVERFLOW | ARMV6_PMCR_COUNT1_OVERFLOW | \
866 ARMV6_PMCR_CCOUNT_OVERFLOW)
869 armv6_pmcr_has_overflowed(unsigned long pmcr)
871 return (pmcr & ARMV6_PMCR_OVERFLOWED_MASK);
875 armv6_pmcr_counter_has_overflowed(unsigned long pmcr,
876 enum armv6_counters counter)
880 if (ARMV6_CYCLE_COUNTER == counter)
881 ret = pmcr & ARMV6_PMCR_CCOUNT_OVERFLOW;
882 else if (ARMV6_COUNTER0 == counter)
883 ret = pmcr & ARMV6_PMCR_COUNT0_OVERFLOW;
884 else if (ARMV6_COUNTER1 == counter)
885 ret = pmcr & ARMV6_PMCR_COUNT1_OVERFLOW;
887 WARN_ONCE(1, "invalid counter number (%d)\n", counter);
893 armv6pmu_read_counter(int counter)
895 unsigned long value = 0;
897 if (ARMV6_CYCLE_COUNTER == counter)
898 asm volatile("mrc p15, 0, %0, c15, c12, 1" : "=r"(value));
899 else if (ARMV6_COUNTER0 == counter)
900 asm volatile("mrc p15, 0, %0, c15, c12, 2" : "=r"(value));
901 else if (ARMV6_COUNTER1 == counter)
902 asm volatile("mrc p15, 0, %0, c15, c12, 3" : "=r"(value));
904 WARN_ONCE(1, "invalid counter number (%d)\n", counter);
910 armv6pmu_write_counter(int counter,
913 if (ARMV6_CYCLE_COUNTER == counter)
914 asm volatile("mcr p15, 0, %0, c15, c12, 1" : : "r"(value));
915 else if (ARMV6_COUNTER0 == counter)
916 asm volatile("mcr p15, 0, %0, c15, c12, 2" : : "r"(value));
917 else if (ARMV6_COUNTER1 == counter)
918 asm volatile("mcr p15, 0, %0, c15, c12, 3" : : "r"(value));
920 WARN_ONCE(1, "invalid counter number (%d)\n", counter);
924 armv6pmu_enable_event(struct hw_perf_event *hwc,
927 unsigned long val, mask, evt, flags;
929 if (ARMV6_CYCLE_COUNTER == idx) {
931 evt = ARMV6_PMCR_CCOUNT_IEN;
932 } else if (ARMV6_COUNTER0 == idx) {
933 mask = ARMV6_PMCR_EVT_COUNT0_MASK;
934 evt = (hwc->config_base << ARMV6_PMCR_EVT_COUNT0_SHIFT) |
935 ARMV6_PMCR_COUNT0_IEN;
936 } else if (ARMV6_COUNTER1 == idx) {
937 mask = ARMV6_PMCR_EVT_COUNT1_MASK;
938 evt = (hwc->config_base << ARMV6_PMCR_EVT_COUNT1_SHIFT) |
939 ARMV6_PMCR_COUNT1_IEN;
941 WARN_ONCE(1, "invalid counter number (%d)\n", idx);
946 * Mask out the current event and set the counter to count the event
947 * that we're interested in.
949 spin_lock_irqsave(&pmu_lock, flags);
950 val = armv6_pmcr_read();
953 armv6_pmcr_write(val);
954 spin_unlock_irqrestore(&pmu_lock, flags);
958 armv6pmu_handle_irq(int irq_num,
961 unsigned long pmcr = armv6_pmcr_read();
962 struct perf_sample_data data;
963 struct cpu_hw_events *cpuc;
964 struct pt_regs *regs;
967 if (!armv6_pmcr_has_overflowed(pmcr))
970 regs = get_irq_regs();
973 * The interrupts are cleared by writing the overflow flags back to
974 * the control register. All of the other bits don't have any effect
975 * if they are rewritten, so write the whole value back.
977 armv6_pmcr_write(pmcr);
979 perf_sample_data_init(&data, 0);
981 cpuc = &__get_cpu_var(cpu_hw_events);
982 for (idx = 0; idx <= armpmu->num_events; ++idx) {
983 struct perf_event *event = cpuc->events[idx];
984 struct hw_perf_event *hwc;
986 if (!test_bit(idx, cpuc->active_mask))
990 * We have a single interrupt for all counters. Check that
991 * each counter has overflowed before we process it.
993 if (!armv6_pmcr_counter_has_overflowed(pmcr, idx))
997 armpmu_event_update(event, hwc, idx);
998 data.period = event->hw.last_period;
999 if (!armpmu_event_set_period(event, hwc, idx))
1002 if (perf_event_overflow(event, 0, &data, regs))
1003 armpmu->disable(hwc, idx);
1007 * Handle the pending perf events.
1009 * Note: this call *must* be run with interrupts enabled. For
1010 * platforms that can have the PMU interrupts raised as a PMI, this
1013 perf_event_do_pending();
1019 armv6pmu_start(void)
1021 unsigned long flags, val;
1023 spin_lock_irqsave(&pmu_lock, flags);
1024 val = armv6_pmcr_read();
1025 val |= ARMV6_PMCR_ENABLE;
1026 armv6_pmcr_write(val);
1027 spin_unlock_irqrestore(&pmu_lock, flags);
1033 unsigned long flags, val;
1035 spin_lock_irqsave(&pmu_lock, flags);
1036 val = armv6_pmcr_read();
1037 val &= ~ARMV6_PMCR_ENABLE;
1038 armv6_pmcr_write(val);
1039 spin_unlock_irqrestore(&pmu_lock, flags);
1043 armv6pmu_event_map(int config)
1045 int mapping = armv6_perf_map[config];
1046 if (HW_OP_UNSUPPORTED == mapping)
1047 mapping = -EOPNOTSUPP;
1052 armv6mpcore_pmu_event_map(int config)
1054 int mapping = armv6mpcore_perf_map[config];
1055 if (HW_OP_UNSUPPORTED == mapping)
1056 mapping = -EOPNOTSUPP;
1061 armv6pmu_raw_event(u64 config)
1063 return config & 0xff;
1067 armv6pmu_get_event_idx(struct cpu_hw_events *cpuc,
1068 struct hw_perf_event *event)
1070 /* Always place a cycle counter into the cycle counter. */
1071 if (ARMV6_PERFCTR_CPU_CYCLES == event->config_base) {
1072 if (test_and_set_bit(ARMV6_CYCLE_COUNTER, cpuc->used_mask))
1075 return ARMV6_CYCLE_COUNTER;
1078 * For anything other than a cycle counter, try and use
1079 * counter0 and counter1.
1081 if (!test_and_set_bit(ARMV6_COUNTER1, cpuc->used_mask)) {
1082 return ARMV6_COUNTER1;
1085 if (!test_and_set_bit(ARMV6_COUNTER0, cpuc->used_mask)) {
1086 return ARMV6_COUNTER0;
1089 /* The counters are all in use. */
1095 armv6pmu_disable_event(struct hw_perf_event *hwc,
1098 unsigned long val, mask, evt, flags;
1100 if (ARMV6_CYCLE_COUNTER == idx) {
1101 mask = ARMV6_PMCR_CCOUNT_IEN;
1103 } else if (ARMV6_COUNTER0 == idx) {
1104 mask = ARMV6_PMCR_COUNT0_IEN | ARMV6_PMCR_EVT_COUNT0_MASK;
1105 evt = ARMV6_PERFCTR_NOP << ARMV6_PMCR_EVT_COUNT0_SHIFT;
1106 } else if (ARMV6_COUNTER1 == idx) {
1107 mask = ARMV6_PMCR_COUNT1_IEN | ARMV6_PMCR_EVT_COUNT1_MASK;
1108 evt = ARMV6_PERFCTR_NOP << ARMV6_PMCR_EVT_COUNT1_SHIFT;
1110 WARN_ONCE(1, "invalid counter number (%d)\n", idx);
1115 * Mask out the current event and set the counter to count the number
1116 * of ETM bus signal assertion cycles. The external reporting should
1117 * be disabled and so this should never increment.
1119 spin_lock_irqsave(&pmu_lock, flags);
1120 val = armv6_pmcr_read();
1123 armv6_pmcr_write(val);
1124 spin_unlock_irqrestore(&pmu_lock, flags);
1128 armv6mpcore_pmu_disable_event(struct hw_perf_event *hwc,
1131 unsigned long val, mask, flags, evt = 0;
1133 if (ARMV6_CYCLE_COUNTER == idx) {
1134 mask = ARMV6_PMCR_CCOUNT_IEN;
1135 } else if (ARMV6_COUNTER0 == idx) {
1136 mask = ARMV6_PMCR_COUNT0_IEN;
1137 } else if (ARMV6_COUNTER1 == idx) {
1138 mask = ARMV6_PMCR_COUNT1_IEN;
1140 WARN_ONCE(1, "invalid counter number (%d)\n", idx);
1145 * Unlike UP ARMv6, we don't have a way of stopping the counters. We
1146 * simply disable the interrupt reporting.
1148 spin_lock_irqsave(&pmu_lock, flags);
1149 val = armv6_pmcr_read();
1152 armv6_pmcr_write(val);
1153 spin_unlock_irqrestore(&pmu_lock, flags);
1156 static const struct arm_pmu armv6pmu = {
1158 .handle_irq = armv6pmu_handle_irq,
1159 .enable = armv6pmu_enable_event,
1160 .disable = armv6pmu_disable_event,
1161 .event_map = armv6pmu_event_map,
1162 .raw_event = armv6pmu_raw_event,
1163 .read_counter = armv6pmu_read_counter,
1164 .write_counter = armv6pmu_write_counter,
1165 .get_event_idx = armv6pmu_get_event_idx,
1166 .start = armv6pmu_start,
1167 .stop = armv6pmu_stop,
1169 .max_period = (1LLU << 32) - 1,
1173 * ARMv6mpcore is almost identical to single core ARMv6 with the exception
1174 * that some of the events have different enumerations and that there is no
1175 * *hack* to stop the programmable counters. To stop the counters we simply
1176 * disable the interrupt reporting and update the event. When unthrottling we
1177 * reset the period and enable the interrupt reporting.
1179 static const struct arm_pmu armv6mpcore_pmu = {
1181 .handle_irq = armv6pmu_handle_irq,
1182 .enable = armv6pmu_enable_event,
1183 .disable = armv6mpcore_pmu_disable_event,
1184 .event_map = armv6mpcore_pmu_event_map,
1185 .raw_event = armv6pmu_raw_event,
1186 .read_counter = armv6pmu_read_counter,
1187 .write_counter = armv6pmu_write_counter,
1188 .get_event_idx = armv6pmu_get_event_idx,
1189 .start = armv6pmu_start,
1190 .stop = armv6pmu_stop,
1192 .max_period = (1LLU << 32) - 1,
1196 * ARMv7 Cortex-A8 and Cortex-A9 Performance Events handling code.
1198 * Copied from ARMv6 code, with the low level code inspired
1199 * by the ARMv7 Oprofile code.
1201 * Cortex-A8 has up to 4 configurable performance counters and
1202 * a single cycle counter.
1203 * Cortex-A9 has up to 31 configurable performance counters and
1204 * a single cycle counter.
1206 * All counters can be enabled/disabled and IRQ masked separately. The cycle
1207 * counter and all 4 performance counters together can be reset separately.
1210 #define ARMV7_PMU_CORTEX_A8_NAME "ARMv7 Cortex-A8"
1212 #define ARMV7_PMU_CORTEX_A9_NAME "ARMv7 Cortex-A9"
1214 /* Common ARMv7 event types */
1215 enum armv7_perf_types {
1216 ARMV7_PERFCTR_PMNC_SW_INCR = 0x00,
1217 ARMV7_PERFCTR_IFETCH_MISS = 0x01,
1218 ARMV7_PERFCTR_ITLB_MISS = 0x02,
1219 ARMV7_PERFCTR_DCACHE_REFILL = 0x03,
1220 ARMV7_PERFCTR_DCACHE_ACCESS = 0x04,
1221 ARMV7_PERFCTR_DTLB_REFILL = 0x05,
1222 ARMV7_PERFCTR_DREAD = 0x06,
1223 ARMV7_PERFCTR_DWRITE = 0x07,
1225 ARMV7_PERFCTR_EXC_TAKEN = 0x09,
1226 ARMV7_PERFCTR_EXC_EXECUTED = 0x0A,
1227 ARMV7_PERFCTR_CID_WRITE = 0x0B,
1228 /* ARMV7_PERFCTR_PC_WRITE is equivalent to HW_BRANCH_INSTRUCTIONS.
1230 * - all branch instructions,
1231 * - instructions that explicitly write the PC,
1232 * - exception generating instructions.
1234 ARMV7_PERFCTR_PC_WRITE = 0x0C,
1235 ARMV7_PERFCTR_PC_IMM_BRANCH = 0x0D,
1236 ARMV7_PERFCTR_UNALIGNED_ACCESS = 0x0F,
1237 ARMV7_PERFCTR_PC_BRANCH_MIS_PRED = 0x10,
1238 ARMV7_PERFCTR_CLOCK_CYCLES = 0x11,
1240 ARMV7_PERFCTR_PC_BRANCH_MIS_USED = 0x12,
1242 ARMV7_PERFCTR_CPU_CYCLES = 0xFF
1245 /* ARMv7 Cortex-A8 specific event types */
1246 enum armv7_a8_perf_types {
1247 ARMV7_PERFCTR_INSTR_EXECUTED = 0x08,
1249 ARMV7_PERFCTR_PC_PROC_RETURN = 0x0E,
1251 ARMV7_PERFCTR_WRITE_BUFFER_FULL = 0x40,
1252 ARMV7_PERFCTR_L2_STORE_MERGED = 0x41,
1253 ARMV7_PERFCTR_L2_STORE_BUFF = 0x42,
1254 ARMV7_PERFCTR_L2_ACCESS = 0x43,
1255 ARMV7_PERFCTR_L2_CACH_MISS = 0x44,
1256 ARMV7_PERFCTR_AXI_READ_CYCLES = 0x45,
1257 ARMV7_PERFCTR_AXI_WRITE_CYCLES = 0x46,
1258 ARMV7_PERFCTR_MEMORY_REPLAY = 0x47,
1259 ARMV7_PERFCTR_UNALIGNED_ACCESS_REPLAY = 0x48,
1260 ARMV7_PERFCTR_L1_DATA_MISS = 0x49,
1261 ARMV7_PERFCTR_L1_INST_MISS = 0x4A,
1262 ARMV7_PERFCTR_L1_DATA_COLORING = 0x4B,
1263 ARMV7_PERFCTR_L1_NEON_DATA = 0x4C,
1264 ARMV7_PERFCTR_L1_NEON_CACH_DATA = 0x4D,
1265 ARMV7_PERFCTR_L2_NEON = 0x4E,
1266 ARMV7_PERFCTR_L2_NEON_HIT = 0x4F,
1267 ARMV7_PERFCTR_L1_INST = 0x50,
1268 ARMV7_PERFCTR_PC_RETURN_MIS_PRED = 0x51,
1269 ARMV7_PERFCTR_PC_BRANCH_FAILED = 0x52,
1270 ARMV7_PERFCTR_PC_BRANCH_TAKEN = 0x53,
1271 ARMV7_PERFCTR_PC_BRANCH_EXECUTED = 0x54,
1272 ARMV7_PERFCTR_OP_EXECUTED = 0x55,
1273 ARMV7_PERFCTR_CYCLES_INST_STALL = 0x56,
1274 ARMV7_PERFCTR_CYCLES_INST = 0x57,
1275 ARMV7_PERFCTR_CYCLES_NEON_DATA_STALL = 0x58,
1276 ARMV7_PERFCTR_CYCLES_NEON_INST_STALL = 0x59,
1277 ARMV7_PERFCTR_NEON_CYCLES = 0x5A,
1279 ARMV7_PERFCTR_PMU0_EVENTS = 0x70,
1280 ARMV7_PERFCTR_PMU1_EVENTS = 0x71,
1281 ARMV7_PERFCTR_PMU_EVENTS = 0x72,
1284 /* ARMv7 Cortex-A9 specific event types */
1285 enum armv7_a9_perf_types {
1286 ARMV7_PERFCTR_JAVA_HW_BYTECODE_EXEC = 0x40,
1287 ARMV7_PERFCTR_JAVA_SW_BYTECODE_EXEC = 0x41,
1288 ARMV7_PERFCTR_JAZELLE_BRANCH_EXEC = 0x42,
1290 ARMV7_PERFCTR_COHERENT_LINE_MISS = 0x50,
1291 ARMV7_PERFCTR_COHERENT_LINE_HIT = 0x51,
1293 ARMV7_PERFCTR_ICACHE_DEP_STALL_CYCLES = 0x60,
1294 ARMV7_PERFCTR_DCACHE_DEP_STALL_CYCLES = 0x61,
1295 ARMV7_PERFCTR_TLB_MISS_DEP_STALL_CYCLES = 0x62,
1296 ARMV7_PERFCTR_STREX_EXECUTED_PASSED = 0x63,
1297 ARMV7_PERFCTR_STREX_EXECUTED_FAILED = 0x64,
1298 ARMV7_PERFCTR_DATA_EVICTION = 0x65,
1299 ARMV7_PERFCTR_ISSUE_STAGE_NO_INST = 0x66,
1300 ARMV7_PERFCTR_ISSUE_STAGE_EMPTY = 0x67,
1301 ARMV7_PERFCTR_INST_OUT_OF_RENAME_STAGE = 0x68,
1303 ARMV7_PERFCTR_PREDICTABLE_FUNCT_RETURNS = 0x6E,
1305 ARMV7_PERFCTR_MAIN_UNIT_EXECUTED_INST = 0x70,
1306 ARMV7_PERFCTR_SECOND_UNIT_EXECUTED_INST = 0x71,
1307 ARMV7_PERFCTR_LD_ST_UNIT_EXECUTED_INST = 0x72,
1308 ARMV7_PERFCTR_FP_EXECUTED_INST = 0x73,
1309 ARMV7_PERFCTR_NEON_EXECUTED_INST = 0x74,
1311 ARMV7_PERFCTR_PLD_FULL_DEP_STALL_CYCLES = 0x80,
1312 ARMV7_PERFCTR_DATA_WR_DEP_STALL_CYCLES = 0x81,
1313 ARMV7_PERFCTR_ITLB_MISS_DEP_STALL_CYCLES = 0x82,
1314 ARMV7_PERFCTR_DTLB_MISS_DEP_STALL_CYCLES = 0x83,
1315 ARMV7_PERFCTR_MICRO_ITLB_MISS_DEP_STALL_CYCLES = 0x84,
1316 ARMV7_PERFCTR_MICRO_DTLB_MISS_DEP_STALL_CYCLES = 0x85,
1317 ARMV7_PERFCTR_DMB_DEP_STALL_CYCLES = 0x86,
1319 ARMV7_PERFCTR_INTGR_CLK_ENABLED_CYCLES = 0x8A,
1320 ARMV7_PERFCTR_DATA_ENGINE_CLK_EN_CYCLES = 0x8B,
1322 ARMV7_PERFCTR_ISB_INST = 0x90,
1323 ARMV7_PERFCTR_DSB_INST = 0x91,
1324 ARMV7_PERFCTR_DMB_INST = 0x92,
1325 ARMV7_PERFCTR_EXT_INTERRUPTS = 0x93,
1327 ARMV7_PERFCTR_PLE_CACHE_LINE_RQST_COMPLETED = 0xA0,
1328 ARMV7_PERFCTR_PLE_CACHE_LINE_RQST_SKIPPED = 0xA1,
1329 ARMV7_PERFCTR_PLE_FIFO_FLUSH = 0xA2,
1330 ARMV7_PERFCTR_PLE_RQST_COMPLETED = 0xA3,
1331 ARMV7_PERFCTR_PLE_FIFO_OVERFLOW = 0xA4,
1332 ARMV7_PERFCTR_PLE_RQST_PROG = 0xA5
1336 * Cortex-A8 HW events mapping
1338 * The hardware events that we support. We do support cache operations but
1339 * we have harvard caches and no way to combine instruction and data
1340 * accesses/misses in hardware.
1342 static const unsigned armv7_a8_perf_map[PERF_COUNT_HW_MAX] = {
1343 [PERF_COUNT_HW_CPU_CYCLES] = ARMV7_PERFCTR_CPU_CYCLES,
1344 [PERF_COUNT_HW_INSTRUCTIONS] = ARMV7_PERFCTR_INSTR_EXECUTED,
1345 [PERF_COUNT_HW_CACHE_REFERENCES] = HW_OP_UNSUPPORTED,
1346 [PERF_COUNT_HW_CACHE_MISSES] = HW_OP_UNSUPPORTED,
1347 [PERF_COUNT_HW_BRANCH_INSTRUCTIONS] = ARMV7_PERFCTR_PC_WRITE,
1348 [PERF_COUNT_HW_BRANCH_MISSES] = ARMV7_PERFCTR_PC_BRANCH_MIS_PRED,
1349 [PERF_COUNT_HW_BUS_CYCLES] = ARMV7_PERFCTR_CLOCK_CYCLES,
1352 static const unsigned armv7_a8_perf_cache_map[PERF_COUNT_HW_CACHE_MAX]
1353 [PERF_COUNT_HW_CACHE_OP_MAX]
1354 [PERF_COUNT_HW_CACHE_RESULT_MAX] = {
1357 * The performance counters don't differentiate between read
1358 * and write accesses/misses so this isn't strictly correct,
1359 * but it's the best we can do. Writes and reads get
1363 [C(RESULT_ACCESS)] = ARMV7_PERFCTR_DCACHE_ACCESS,
1364 [C(RESULT_MISS)] = ARMV7_PERFCTR_DCACHE_REFILL,
1367 [C(RESULT_ACCESS)] = ARMV7_PERFCTR_DCACHE_ACCESS,
1368 [C(RESULT_MISS)] = ARMV7_PERFCTR_DCACHE_REFILL,
1370 [C(OP_PREFETCH)] = {
1371 [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED,
1372 [C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED,
1377 [C(RESULT_ACCESS)] = ARMV7_PERFCTR_L1_INST,
1378 [C(RESULT_MISS)] = ARMV7_PERFCTR_L1_INST_MISS,
1381 [C(RESULT_ACCESS)] = ARMV7_PERFCTR_L1_INST,
1382 [C(RESULT_MISS)] = ARMV7_PERFCTR_L1_INST_MISS,
1384 [C(OP_PREFETCH)] = {
1385 [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED,
1386 [C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED,
1391 [C(RESULT_ACCESS)] = ARMV7_PERFCTR_L2_ACCESS,
1392 [C(RESULT_MISS)] = ARMV7_PERFCTR_L2_CACH_MISS,
1395 [C(RESULT_ACCESS)] = ARMV7_PERFCTR_L2_ACCESS,
1396 [C(RESULT_MISS)] = ARMV7_PERFCTR_L2_CACH_MISS,
1398 [C(OP_PREFETCH)] = {
1399 [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED,
1400 [C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED,
1405 * Only ITLB misses and DTLB refills are supported.
1406 * If users want the DTLB refills misses a raw counter
1410 [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED,
1411 [C(RESULT_MISS)] = ARMV7_PERFCTR_DTLB_REFILL,
1414 [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED,
1415 [C(RESULT_MISS)] = ARMV7_PERFCTR_DTLB_REFILL,
1417 [C(OP_PREFETCH)] = {
1418 [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED,
1419 [C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED,
1424 [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED,
1425 [C(RESULT_MISS)] = ARMV7_PERFCTR_ITLB_MISS,
1428 [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED,
1429 [C(RESULT_MISS)] = ARMV7_PERFCTR_ITLB_MISS,
1431 [C(OP_PREFETCH)] = {
1432 [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED,
1433 [C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED,
1438 [C(RESULT_ACCESS)] = ARMV7_PERFCTR_PC_WRITE,
1440 = ARMV7_PERFCTR_PC_BRANCH_MIS_PRED,
1443 [C(RESULT_ACCESS)] = ARMV7_PERFCTR_PC_WRITE,
1445 = ARMV7_PERFCTR_PC_BRANCH_MIS_PRED,
1447 [C(OP_PREFETCH)] = {
1448 [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED,
1449 [C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED,
1455 * Cortex-A9 HW events mapping
1457 static const unsigned armv7_a9_perf_map[PERF_COUNT_HW_MAX] = {
1458 [PERF_COUNT_HW_CPU_CYCLES] = ARMV7_PERFCTR_CPU_CYCLES,
1459 [PERF_COUNT_HW_INSTRUCTIONS] =
1460 ARMV7_PERFCTR_INST_OUT_OF_RENAME_STAGE,
1461 [PERF_COUNT_HW_CACHE_REFERENCES] = ARMV7_PERFCTR_COHERENT_LINE_HIT,
1462 [PERF_COUNT_HW_CACHE_MISSES] = ARMV7_PERFCTR_COHERENT_LINE_MISS,
1463 [PERF_COUNT_HW_BRANCH_INSTRUCTIONS] = ARMV7_PERFCTR_PC_WRITE,
1464 [PERF_COUNT_HW_BRANCH_MISSES] = ARMV7_PERFCTR_PC_BRANCH_MIS_PRED,
1465 [PERF_COUNT_HW_BUS_CYCLES] = ARMV7_PERFCTR_CLOCK_CYCLES,
1468 static const unsigned armv7_a9_perf_cache_map[PERF_COUNT_HW_CACHE_MAX]
1469 [PERF_COUNT_HW_CACHE_OP_MAX]
1470 [PERF_COUNT_HW_CACHE_RESULT_MAX] = {
1473 * The performance counters don't differentiate between read
1474 * and write accesses/misses so this isn't strictly correct,
1475 * but it's the best we can do. Writes and reads get
1479 [C(RESULT_ACCESS)] = ARMV7_PERFCTR_DCACHE_ACCESS,
1480 [C(RESULT_MISS)] = ARMV7_PERFCTR_DCACHE_REFILL,
1483 [C(RESULT_ACCESS)] = ARMV7_PERFCTR_DCACHE_ACCESS,
1484 [C(RESULT_MISS)] = ARMV7_PERFCTR_DCACHE_REFILL,
1486 [C(OP_PREFETCH)] = {
1487 [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED,
1488 [C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED,
1493 [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED,
1494 [C(RESULT_MISS)] = ARMV7_PERFCTR_IFETCH_MISS,
1497 [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED,
1498 [C(RESULT_MISS)] = ARMV7_PERFCTR_IFETCH_MISS,
1500 [C(OP_PREFETCH)] = {
1501 [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED,
1502 [C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED,
1507 [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED,
1508 [C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED,
1511 [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED,
1512 [C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED,
1514 [C(OP_PREFETCH)] = {
1515 [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED,
1516 [C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED,
1521 * Only ITLB misses and DTLB refills are supported.
1522 * If users want the DTLB refills misses a raw counter
1526 [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED,
1527 [C(RESULT_MISS)] = ARMV7_PERFCTR_DTLB_REFILL,
1530 [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED,
1531 [C(RESULT_MISS)] = ARMV7_PERFCTR_DTLB_REFILL,
1533 [C(OP_PREFETCH)] = {
1534 [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED,
1535 [C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED,
1540 [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED,
1541 [C(RESULT_MISS)] = ARMV7_PERFCTR_ITLB_MISS,
1544 [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED,
1545 [C(RESULT_MISS)] = ARMV7_PERFCTR_ITLB_MISS,
1547 [C(OP_PREFETCH)] = {
1548 [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED,
1549 [C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED,
1554 [C(RESULT_ACCESS)] = ARMV7_PERFCTR_PC_WRITE,
1556 = ARMV7_PERFCTR_PC_BRANCH_MIS_PRED,
1559 [C(RESULT_ACCESS)] = ARMV7_PERFCTR_PC_WRITE,
1561 = ARMV7_PERFCTR_PC_BRANCH_MIS_PRED,
1563 [C(OP_PREFETCH)] = {
1564 [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED,
1565 [C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED,
1571 * Perf Events counters
1573 enum armv7_counters {
1574 ARMV7_CYCLE_COUNTER = 1, /* Cycle counter */
1575 ARMV7_COUNTER0 = 2, /* First event counter */
1579 * The cycle counter is ARMV7_CYCLE_COUNTER.
1580 * The first event counter is ARMV7_COUNTER0.
1581 * The last event counter is (ARMV7_COUNTER0 + armpmu->num_events - 1).
1583 #define ARMV7_COUNTER_LAST (ARMV7_COUNTER0 + armpmu->num_events - 1)
1586 * ARMv7 low level PMNC access
1590 * Per-CPU PMNC: config reg
1592 #define ARMV7_PMNC_E (1 << 0) /* Enable all counters */
1593 #define ARMV7_PMNC_P (1 << 1) /* Reset all counters */
1594 #define ARMV7_PMNC_C (1 << 2) /* Cycle counter reset */
1595 #define ARMV7_PMNC_D (1 << 3) /* CCNT counts every 64th cpu cycle */
1596 #define ARMV7_PMNC_X (1 << 4) /* Export to ETM */
1597 #define ARMV7_PMNC_DP (1 << 5) /* Disable CCNT if non-invasive debug*/
1598 #define ARMV7_PMNC_N_SHIFT 11 /* Number of counters supported */
1599 #define ARMV7_PMNC_N_MASK 0x1f
1600 #define ARMV7_PMNC_MASK 0x3f /* Mask for writable bits */
1603 * Available counters
1605 #define ARMV7_CNT0 0 /* First event counter */
1606 #define ARMV7_CCNT 31 /* Cycle counter */
1608 /* Perf Event to low level counters mapping */
1609 #define ARMV7_EVENT_CNT_TO_CNTx (ARMV7_COUNTER0 - ARMV7_CNT0)
1612 * CNTENS: counters enable reg
1614 #define ARMV7_CNTENS_P(idx) (1 << (idx - ARMV7_EVENT_CNT_TO_CNTx))
1615 #define ARMV7_CNTENS_C (1 << ARMV7_CCNT)
1618 * CNTENC: counters disable reg
1620 #define ARMV7_CNTENC_P(idx) (1 << (idx - ARMV7_EVENT_CNT_TO_CNTx))
1621 #define ARMV7_CNTENC_C (1 << ARMV7_CCNT)
1624 * INTENS: counters overflow interrupt enable reg
1626 #define ARMV7_INTENS_P(idx) (1 << (idx - ARMV7_EVENT_CNT_TO_CNTx))
1627 #define ARMV7_INTENS_C (1 << ARMV7_CCNT)
1630 * INTENC: counters overflow interrupt disable reg
1632 #define ARMV7_INTENC_P(idx) (1 << (idx - ARMV7_EVENT_CNT_TO_CNTx))
1633 #define ARMV7_INTENC_C (1 << ARMV7_CCNT)
1636 * EVTSEL: Event selection reg
1638 #define ARMV7_EVTSEL_MASK 0xff /* Mask for writable bits */
1641 * SELECT: Counter selection reg
1643 #define ARMV7_SELECT_MASK 0x1f /* Mask for writable bits */
1646 * FLAG: counters overflow flag status reg
1648 #define ARMV7_FLAG_P(idx) (1 << (idx - ARMV7_EVENT_CNT_TO_CNTx))
1649 #define ARMV7_FLAG_C (1 << ARMV7_CCNT)
1650 #define ARMV7_FLAG_MASK 0xffffffff /* Mask for writable bits */
1651 #define ARMV7_OVERFLOWED_MASK ARMV7_FLAG_MASK
1653 static inline unsigned long armv7_pmnc_read(void)
1656 asm volatile("mrc p15, 0, %0, c9, c12, 0" : "=r"(val));
1660 static inline void armv7_pmnc_write(unsigned long val)
1662 val &= ARMV7_PMNC_MASK;
1663 asm volatile("mcr p15, 0, %0, c9, c12, 0" : : "r"(val));
1666 static inline int armv7_pmnc_has_overflowed(unsigned long pmnc)
1668 return pmnc & ARMV7_OVERFLOWED_MASK;
1671 static inline int armv7_pmnc_counter_has_overflowed(unsigned long pmnc,
1672 enum armv7_counters counter)
1676 if (counter == ARMV7_CYCLE_COUNTER)
1677 ret = pmnc & ARMV7_FLAG_C;
1678 else if ((counter >= ARMV7_COUNTER0) && (counter <= ARMV7_COUNTER_LAST))
1679 ret = pmnc & ARMV7_FLAG_P(counter);
1681 pr_err("CPU%u checking wrong counter %d overflow status\n",
1682 smp_processor_id(), counter);
1687 static inline int armv7_pmnc_select_counter(unsigned int idx)
1691 if ((idx < ARMV7_COUNTER0) || (idx > ARMV7_COUNTER_LAST)) {
1692 pr_err("CPU%u selecting wrong PMNC counter"
1693 " %d\n", smp_processor_id(), idx);
1697 val = (idx - ARMV7_EVENT_CNT_TO_CNTx) & ARMV7_SELECT_MASK;
1698 asm volatile("mcr p15, 0, %0, c9, c12, 5" : : "r" (val));
1703 static inline u32 armv7pmu_read_counter(int idx)
1705 unsigned long value = 0;
1707 if (idx == ARMV7_CYCLE_COUNTER)
1708 asm volatile("mrc p15, 0, %0, c9, c13, 0" : "=r" (value));
1709 else if ((idx >= ARMV7_COUNTER0) && (idx <= ARMV7_COUNTER_LAST)) {
1710 if (armv7_pmnc_select_counter(idx) == idx)
1711 asm volatile("mrc p15, 0, %0, c9, c13, 2"
1714 pr_err("CPU%u reading wrong counter %d\n",
1715 smp_processor_id(), idx);
1720 static inline void armv7pmu_write_counter(int idx, u32 value)
1722 if (idx == ARMV7_CYCLE_COUNTER)
1723 asm volatile("mcr p15, 0, %0, c9, c13, 0" : : "r" (value));
1724 else if ((idx >= ARMV7_COUNTER0) && (idx <= ARMV7_COUNTER_LAST)) {
1725 if (armv7_pmnc_select_counter(idx) == idx)
1726 asm volatile("mcr p15, 0, %0, c9, c13, 2"
1729 pr_err("CPU%u writing wrong counter %d\n",
1730 smp_processor_id(), idx);
1733 static inline void armv7_pmnc_write_evtsel(unsigned int idx, u32 val)
1735 if (armv7_pmnc_select_counter(idx) == idx) {
1736 val &= ARMV7_EVTSEL_MASK;
1737 asm volatile("mcr p15, 0, %0, c9, c13, 1" : : "r" (val));
1741 static inline u32 armv7_pmnc_enable_counter(unsigned int idx)
1745 if ((idx != ARMV7_CYCLE_COUNTER) &&
1746 ((idx < ARMV7_COUNTER0) || (idx > ARMV7_COUNTER_LAST))) {
1747 pr_err("CPU%u enabling wrong PMNC counter"
1748 " %d\n", smp_processor_id(), idx);
1752 if (idx == ARMV7_CYCLE_COUNTER)
1753 val = ARMV7_CNTENS_C;
1755 val = ARMV7_CNTENS_P(idx);
1757 asm volatile("mcr p15, 0, %0, c9, c12, 1" : : "r" (val));
1762 static inline u32 armv7_pmnc_disable_counter(unsigned int idx)
1767 if ((idx != ARMV7_CYCLE_COUNTER) &&
1768 ((idx < ARMV7_COUNTER0) || (idx > ARMV7_COUNTER_LAST))) {
1769 pr_err("CPU%u disabling wrong PMNC counter"
1770 " %d\n", smp_processor_id(), idx);
1774 if (idx == ARMV7_CYCLE_COUNTER)
1775 val = ARMV7_CNTENC_C;
1777 val = ARMV7_CNTENC_P(idx);
1779 asm volatile("mcr p15, 0, %0, c9, c12, 2" : : "r" (val));
1784 static inline u32 armv7_pmnc_enable_intens(unsigned int idx)
1788 if ((idx != ARMV7_CYCLE_COUNTER) &&
1789 ((idx < ARMV7_COUNTER0) || (idx > ARMV7_COUNTER_LAST))) {
1790 pr_err("CPU%u enabling wrong PMNC counter"
1791 " interrupt enable %d\n", smp_processor_id(), idx);
1795 if (idx == ARMV7_CYCLE_COUNTER)
1796 val = ARMV7_INTENS_C;
1798 val = ARMV7_INTENS_P(idx);
1800 asm volatile("mcr p15, 0, %0, c9, c14, 1" : : "r" (val));
1805 static inline u32 armv7_pmnc_disable_intens(unsigned int idx)
1809 if ((idx != ARMV7_CYCLE_COUNTER) &&
1810 ((idx < ARMV7_COUNTER0) || (idx > ARMV7_COUNTER_LAST))) {
1811 pr_err("CPU%u disabling wrong PMNC counter"
1812 " interrupt enable %d\n", smp_processor_id(), idx);
1816 if (idx == ARMV7_CYCLE_COUNTER)
1817 val = ARMV7_INTENC_C;
1819 val = ARMV7_INTENC_P(idx);
1821 asm volatile("mcr p15, 0, %0, c9, c14, 2" : : "r" (val));
1826 static inline u32 armv7_pmnc_getreset_flags(void)
1831 asm volatile("mrc p15, 0, %0, c9, c12, 3" : "=r" (val));
1833 /* Write to clear flags */
1834 val &= ARMV7_FLAG_MASK;
1835 asm volatile("mcr p15, 0, %0, c9, c12, 3" : : "r" (val));
1841 static void armv7_pmnc_dump_regs(void)
1846 printk(KERN_INFO "PMNC registers dump:\n");
1848 asm volatile("mrc p15, 0, %0, c9, c12, 0" : "=r" (val));
1849 printk(KERN_INFO "PMNC =0x%08x\n", val);
1851 asm volatile("mrc p15, 0, %0, c9, c12, 1" : "=r" (val));
1852 printk(KERN_INFO "CNTENS=0x%08x\n", val);
1854 asm volatile("mrc p15, 0, %0, c9, c14, 1" : "=r" (val));
1855 printk(KERN_INFO "INTENS=0x%08x\n", val);
1857 asm volatile("mrc p15, 0, %0, c9, c12, 3" : "=r" (val));
1858 printk(KERN_INFO "FLAGS =0x%08x\n", val);
1860 asm volatile("mrc p15, 0, %0, c9, c12, 5" : "=r" (val));
1861 printk(KERN_INFO "SELECT=0x%08x\n", val);
1863 asm volatile("mrc p15, 0, %0, c9, c13, 0" : "=r" (val));
1864 printk(KERN_INFO "CCNT =0x%08x\n", val);
1866 for (cnt = ARMV7_COUNTER0; cnt < ARMV7_COUNTER_LAST; cnt++) {
1867 armv7_pmnc_select_counter(cnt);
1868 asm volatile("mrc p15, 0, %0, c9, c13, 2" : "=r" (val));
1869 printk(KERN_INFO "CNT[%d] count =0x%08x\n",
1870 cnt-ARMV7_EVENT_CNT_TO_CNTx, val);
1871 asm volatile("mrc p15, 0, %0, c9, c13, 1" : "=r" (val));
1872 printk(KERN_INFO "CNT[%d] evtsel=0x%08x\n",
1873 cnt-ARMV7_EVENT_CNT_TO_CNTx, val);
1878 void armv7pmu_enable_event(struct hw_perf_event *hwc, int idx)
1880 unsigned long flags;
1883 * Enable counter and interrupt, and set the counter to count
1884 * the event that we're interested in.
1886 spin_lock_irqsave(&pmu_lock, flags);
1891 armv7_pmnc_disable_counter(idx);
1894 * Set event (if destined for PMNx counters)
1895 * We don't need to set the event if it's a cycle count
1897 if (idx != ARMV7_CYCLE_COUNTER)
1898 armv7_pmnc_write_evtsel(idx, hwc->config_base);
1901 * Enable interrupt for this counter
1903 armv7_pmnc_enable_intens(idx);
1908 armv7_pmnc_enable_counter(idx);
1910 spin_unlock_irqrestore(&pmu_lock, flags);
1913 static void armv7pmu_disable_event(struct hw_perf_event *hwc, int idx)
1915 unsigned long flags;
1918 * Disable counter and interrupt
1920 spin_lock_irqsave(&pmu_lock, flags);
1925 armv7_pmnc_disable_counter(idx);
1928 * Disable interrupt for this counter
1930 armv7_pmnc_disable_intens(idx);
1932 spin_unlock_irqrestore(&pmu_lock, flags);
1935 static irqreturn_t armv7pmu_handle_irq(int irq_num, void *dev)
1938 struct perf_sample_data data;
1939 struct cpu_hw_events *cpuc;
1940 struct pt_regs *regs;
1944 * Get and reset the IRQ flags
1946 pmnc = armv7_pmnc_getreset_flags();
1949 * Did an overflow occur?
1951 if (!armv7_pmnc_has_overflowed(pmnc))
1955 * Handle the counter(s) overflow(s)
1957 regs = get_irq_regs();
1959 perf_sample_data_init(&data, 0);
1961 cpuc = &__get_cpu_var(cpu_hw_events);
1962 for (idx = 0; idx <= armpmu->num_events; ++idx) {
1963 struct perf_event *event = cpuc->events[idx];
1964 struct hw_perf_event *hwc;
1966 if (!test_bit(idx, cpuc->active_mask))
1970 * We have a single interrupt for all counters. Check that
1971 * each counter has overflowed before we process it.
1973 if (!armv7_pmnc_counter_has_overflowed(pmnc, idx))
1977 armpmu_event_update(event, hwc, idx);
1978 data.period = event->hw.last_period;
1979 if (!armpmu_event_set_period(event, hwc, idx))
1982 if (perf_event_overflow(event, 0, &data, regs))
1983 armpmu->disable(hwc, idx);
1987 * Handle the pending perf events.
1989 * Note: this call *must* be run with interrupts enabled. For
1990 * platforms that can have the PMU interrupts raised as a PMI, this
1993 perf_event_do_pending();
1998 static void armv7pmu_start(void)
2000 unsigned long flags;
2002 spin_lock_irqsave(&pmu_lock, flags);
2003 /* Enable all counters */
2004 armv7_pmnc_write(armv7_pmnc_read() | ARMV7_PMNC_E);
2005 spin_unlock_irqrestore(&pmu_lock, flags);
2008 static void armv7pmu_stop(void)
2010 unsigned long flags;
2012 spin_lock_irqsave(&pmu_lock, flags);
2013 /* Disable all counters */
2014 armv7_pmnc_write(armv7_pmnc_read() & ~ARMV7_PMNC_E);
2015 spin_unlock_irqrestore(&pmu_lock, flags);
2018 static inline int armv7_a8_pmu_event_map(int config)
2020 int mapping = armv7_a8_perf_map[config];
2021 if (HW_OP_UNSUPPORTED == mapping)
2022 mapping = -EOPNOTSUPP;
2026 static inline int armv7_a9_pmu_event_map(int config)
2028 int mapping = armv7_a9_perf_map[config];
2029 if (HW_OP_UNSUPPORTED == mapping)
2030 mapping = -EOPNOTSUPP;
2034 static u64 armv7pmu_raw_event(u64 config)
2036 return config & 0xff;
2039 static int armv7pmu_get_event_idx(struct cpu_hw_events *cpuc,
2040 struct hw_perf_event *event)
2044 /* Always place a cycle counter into the cycle counter. */
2045 if (event->config_base == ARMV7_PERFCTR_CPU_CYCLES) {
2046 if (test_and_set_bit(ARMV7_CYCLE_COUNTER, cpuc->used_mask))
2049 return ARMV7_CYCLE_COUNTER;
2052 * For anything other than a cycle counter, try and use
2053 * the events counters
2055 for (idx = ARMV7_COUNTER0; idx <= armpmu->num_events; ++idx) {
2056 if (!test_and_set_bit(idx, cpuc->used_mask))
2060 /* The counters are all in use. */
2065 static struct arm_pmu armv7pmu = {
2066 .handle_irq = armv7pmu_handle_irq,
2067 .enable = armv7pmu_enable_event,
2068 .disable = armv7pmu_disable_event,
2069 .raw_event = armv7pmu_raw_event,
2070 .read_counter = armv7pmu_read_counter,
2071 .write_counter = armv7pmu_write_counter,
2072 .get_event_idx = armv7pmu_get_event_idx,
2073 .start = armv7pmu_start,
2074 .stop = armv7pmu_stop,
2075 .max_period = (1LLU << 32) - 1,
2078 static u32 __init armv7_reset_read_pmnc(void)
2082 /* Initialize & Reset PMNC: C and P bits */
2083 armv7_pmnc_write(ARMV7_PMNC_P | ARMV7_PMNC_C);
2085 /* Read the nb of CNTx counters supported from PMNC */
2086 nb_cnt = (armv7_pmnc_read() >> ARMV7_PMNC_N_SHIFT) & ARMV7_PMNC_N_MASK;
2088 /* Add the CPU cycles counter and return */
2093 init_hw_perf_events(void)
2095 unsigned long cpuid = read_cpuid_id();
2096 unsigned long implementor = (cpuid & 0xFF000000) >> 24;
2097 unsigned long part_number = (cpuid & 0xFFF0);
2099 /* We only support ARM CPUs implemented by ARM at the moment. */
2100 if (0x41 == implementor) {
2101 switch (part_number) {
2102 case 0xB360: /* ARM1136 */
2103 case 0xB560: /* ARM1156 */
2104 case 0xB760: /* ARM1176 */
2106 memcpy(armpmu_perf_cache_map, armv6_perf_cache_map,
2107 sizeof(armv6_perf_cache_map));
2108 perf_max_events = armv6pmu.num_events;
2110 case 0xB020: /* ARM11mpcore */
2111 armpmu = &armv6mpcore_pmu;
2112 memcpy(armpmu_perf_cache_map,
2113 armv6mpcore_perf_cache_map,
2114 sizeof(armv6mpcore_perf_cache_map));
2115 perf_max_events = armv6mpcore_pmu.num_events;
2117 case 0xC080: /* Cortex-A8 */
2118 armv7pmu.name = ARMV7_PMU_CORTEX_A8_NAME;
2119 memcpy(armpmu_perf_cache_map, armv7_a8_perf_cache_map,
2120 sizeof(armv7_a8_perf_cache_map));
2121 armv7pmu.event_map = armv7_a8_pmu_event_map;
2124 /* Reset PMNC and read the nb of CNTx counters
2126 armv7pmu.num_events = armv7_reset_read_pmnc();
2127 perf_max_events = armv7pmu.num_events;
2129 case 0xC090: /* Cortex-A9 */
2130 armv7pmu.name = ARMV7_PMU_CORTEX_A9_NAME;
2131 memcpy(armpmu_perf_cache_map, armv7_a9_perf_cache_map,
2132 sizeof(armv7_a9_perf_cache_map));
2133 armv7pmu.event_map = armv7_a9_pmu_event_map;
2136 /* Reset PMNC and read the nb of CNTx counters
2138 armv7pmu.num_events = armv7_reset_read_pmnc();
2139 perf_max_events = armv7pmu.num_events;
2142 pr_info("no hardware support available\n");
2143 perf_max_events = -1;
2148 pr_info("enabled with %s PMU driver, %d counters available\n",
2149 armpmu->name, armpmu->num_events);
2153 arch_initcall(init_hw_perf_events);
2156 * Callchain handling code.
2159 callchain_store(struct perf_callchain_entry *entry,
2162 if (entry->nr < PERF_MAX_STACK_DEPTH)
2163 entry->ip[entry->nr++] = ip;
2167 * The registers we're interested in are at the end of the variable
2168 * length saved register structure. The fp points at the end of this
2169 * structure so the address of this struct is:
2170 * (struct frame_tail *)(xxx->fp)-1
2172 * This code has been adapted from the ARM OProfile support.
2175 struct frame_tail *fp;
2178 } __attribute__((packed));
2181 * Get the return address for a single stackframe and return a pointer to the
2184 static struct frame_tail *
2185 user_backtrace(struct frame_tail *tail,
2186 struct perf_callchain_entry *entry)
2188 struct frame_tail buftail;
2190 /* Also check accessibility of one struct frame_tail beyond */
2191 if (!access_ok(VERIFY_READ, tail, sizeof(buftail)))
2193 if (__copy_from_user_inatomic(&buftail, tail, sizeof(buftail)))
2196 callchain_store(entry, buftail.lr);
2199 * Frame pointers should strictly progress back up the stack
2200 * (towards higher addresses).
2202 if (tail >= buftail.fp)
2205 return buftail.fp - 1;
2209 perf_callchain_user(struct pt_regs *regs,
2210 struct perf_callchain_entry *entry)
2212 struct frame_tail *tail;
2214 callchain_store(entry, PERF_CONTEXT_USER);
2216 if (!user_mode(regs))
2217 regs = task_pt_regs(current);
2219 tail = (struct frame_tail *)regs->ARM_fp - 1;
2221 while (tail && !((unsigned long)tail & 0x3))
2222 tail = user_backtrace(tail, entry);
2226 * Gets called by walk_stackframe() for every stackframe. This will be called
2227 * whist unwinding the stackframe and is like a subroutine return so we use
2231 callchain_trace(struct stackframe *fr,
2234 struct perf_callchain_entry *entry = data;
2235 callchain_store(entry, fr->pc);
2240 perf_callchain_kernel(struct pt_regs *regs,
2241 struct perf_callchain_entry *entry)
2243 struct stackframe fr;
2245 callchain_store(entry, PERF_CONTEXT_KERNEL);
2246 fr.fp = regs->ARM_fp;
2247 fr.sp = regs->ARM_sp;
2248 fr.lr = regs->ARM_lr;
2249 fr.pc = regs->ARM_pc;
2250 walk_stackframe(&fr, callchain_trace, entry);
2254 perf_do_callchain(struct pt_regs *regs,
2255 struct perf_callchain_entry *entry)
2262 is_user = user_mode(regs);
2264 if (!current || !current->pid)
2267 if (is_user && current->state != TASK_RUNNING)
2271 perf_callchain_kernel(regs, entry);
2274 perf_callchain_user(regs, entry);
2277 static DEFINE_PER_CPU(struct perf_callchain_entry, pmc_irq_entry);
2279 struct perf_callchain_entry *
2280 perf_callchain(struct pt_regs *regs)
2282 struct perf_callchain_entry *entry = &__get_cpu_var(pmc_irq_entry);
2285 perf_do_callchain(regs, entry);