ARM: 6064/1: pmu: register IRQs at runtime
[safe/jmp/linux-2.6] / arch / arm / kernel / perf_event.c
1 #undef DEBUG
2
3 /*
4  * ARM performance counter support.
5  *
6  * Copyright (C) 2009 picoChip Designs, Ltd., Jamie Iles
7  *
8  * ARMv7 support: Jean Pihet <jpihet@mvista.com>
9  * 2010 (c) MontaVista Software, LLC.
10  *
11  * This code is based on the sparc64 perf event code, which is in turn based
12  * on the x86 code. Callchain code is based on the ARM OProfile backtrace
13  * code.
14  */
15 #define pr_fmt(fmt) "hw perfevents: " fmt
16
17 #include <linux/interrupt.h>
18 #include <linux/kernel.h>
19 #include <linux/perf_event.h>
20 #include <linux/platform_device.h>
21 #include <linux/spinlock.h>
22 #include <linux/uaccess.h>
23
24 #include <asm/cputype.h>
25 #include <asm/irq.h>
26 #include <asm/irq_regs.h>
27 #include <asm/pmu.h>
28 #include <asm/stacktrace.h>
29
30 static struct platform_device *pmu_device;
31
32 /*
33  * Hardware lock to serialize accesses to PMU registers. Needed for the
34  * read/modify/write sequences.
35  */
36 DEFINE_SPINLOCK(pmu_lock);
37
38 /*
39  * ARMv6 supports a maximum of 3 events, starting from index 1. If we add
40  * another platform that supports more, we need to increase this to be the
41  * largest of all platforms.
42  *
43  * ARMv7 supports up to 32 events:
44  *  cycle counter CCNT + 31 events counters CNT0..30.
45  *  Cortex-A8 has 1+4 counters, Cortex-A9 has 1+6 counters.
46  */
47 #define ARMPMU_MAX_HWEVENTS             33
48
49 /* The events for a given CPU. */
50 struct cpu_hw_events {
51         /*
52          * The events that are active on the CPU for the given index. Index 0
53          * is reserved.
54          */
55         struct perf_event       *events[ARMPMU_MAX_HWEVENTS];
56
57         /*
58          * A 1 bit for an index indicates that the counter is being used for
59          * an event. A 0 means that the counter can be used.
60          */
61         unsigned long           used_mask[BITS_TO_LONGS(ARMPMU_MAX_HWEVENTS)];
62
63         /*
64          * A 1 bit for an index indicates that the counter is actively being
65          * used.
66          */
67         unsigned long           active_mask[BITS_TO_LONGS(ARMPMU_MAX_HWEVENTS)];
68 };
69 DEFINE_PER_CPU(struct cpu_hw_events, cpu_hw_events);
70
71 struct arm_pmu {
72         char            *name;
73         irqreturn_t     (*handle_irq)(int irq_num, void *dev);
74         void            (*enable)(struct hw_perf_event *evt, int idx);
75         void            (*disable)(struct hw_perf_event *evt, int idx);
76         int             (*event_map)(int evt);
77         u64             (*raw_event)(u64);
78         int             (*get_event_idx)(struct cpu_hw_events *cpuc,
79                                          struct hw_perf_event *hwc);
80         u32             (*read_counter)(int idx);
81         void            (*write_counter)(int idx, u32 val);
82         void            (*start)(void);
83         void            (*stop)(void);
84         int             num_events;
85         u64             max_period;
86 };
87
88 /* Set at runtime when we know what CPU type we are. */
89 static const struct arm_pmu *armpmu;
90
91 #define HW_OP_UNSUPPORTED               0xFFFF
92
93 #define C(_x) \
94         PERF_COUNT_HW_CACHE_##_x
95
96 #define CACHE_OP_UNSUPPORTED            0xFFFF
97
98 static unsigned armpmu_perf_cache_map[PERF_COUNT_HW_CACHE_MAX]
99                                      [PERF_COUNT_HW_CACHE_OP_MAX]
100                                      [PERF_COUNT_HW_CACHE_RESULT_MAX];
101
102 static int
103 armpmu_map_cache_event(u64 config)
104 {
105         unsigned int cache_type, cache_op, cache_result, ret;
106
107         cache_type = (config >>  0) & 0xff;
108         if (cache_type >= PERF_COUNT_HW_CACHE_MAX)
109                 return -EINVAL;
110
111         cache_op = (config >>  8) & 0xff;
112         if (cache_op >= PERF_COUNT_HW_CACHE_OP_MAX)
113                 return -EINVAL;
114
115         cache_result = (config >> 16) & 0xff;
116         if (cache_result >= PERF_COUNT_HW_CACHE_RESULT_MAX)
117                 return -EINVAL;
118
119         ret = (int)armpmu_perf_cache_map[cache_type][cache_op][cache_result];
120
121         if (ret == CACHE_OP_UNSUPPORTED)
122                 return -ENOENT;
123
124         return ret;
125 }
126
127 static int
128 armpmu_event_set_period(struct perf_event *event,
129                         struct hw_perf_event *hwc,
130                         int idx)
131 {
132         s64 left = atomic64_read(&hwc->period_left);
133         s64 period = hwc->sample_period;
134         int ret = 0;
135
136         if (unlikely(left <= -period)) {
137                 left = period;
138                 atomic64_set(&hwc->period_left, left);
139                 hwc->last_period = period;
140                 ret = 1;
141         }
142
143         if (unlikely(left <= 0)) {
144                 left += period;
145                 atomic64_set(&hwc->period_left, left);
146                 hwc->last_period = period;
147                 ret = 1;
148         }
149
150         if (left > (s64)armpmu->max_period)
151                 left = armpmu->max_period;
152
153         atomic64_set(&hwc->prev_count, (u64)-left);
154
155         armpmu->write_counter(idx, (u64)(-left) & 0xffffffff);
156
157         perf_event_update_userpage(event);
158
159         return ret;
160 }
161
162 static u64
163 armpmu_event_update(struct perf_event *event,
164                     struct hw_perf_event *hwc,
165                     int idx)
166 {
167         int shift = 64 - 32;
168         s64 prev_raw_count, new_raw_count;
169         s64 delta;
170
171 again:
172         prev_raw_count = atomic64_read(&hwc->prev_count);
173         new_raw_count = armpmu->read_counter(idx);
174
175         if (atomic64_cmpxchg(&hwc->prev_count, prev_raw_count,
176                              new_raw_count) != prev_raw_count)
177                 goto again;
178
179         delta = (new_raw_count << shift) - (prev_raw_count << shift);
180         delta >>= shift;
181
182         atomic64_add(delta, &event->count);
183         atomic64_sub(delta, &hwc->period_left);
184
185         return new_raw_count;
186 }
187
188 static void
189 armpmu_disable(struct perf_event *event)
190 {
191         struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events);
192         struct hw_perf_event *hwc = &event->hw;
193         int idx = hwc->idx;
194
195         WARN_ON(idx < 0);
196
197         clear_bit(idx, cpuc->active_mask);
198         armpmu->disable(hwc, idx);
199
200         barrier();
201
202         armpmu_event_update(event, hwc, idx);
203         cpuc->events[idx] = NULL;
204         clear_bit(idx, cpuc->used_mask);
205
206         perf_event_update_userpage(event);
207 }
208
209 static void
210 armpmu_read(struct perf_event *event)
211 {
212         struct hw_perf_event *hwc = &event->hw;
213
214         /* Don't read disabled counters! */
215         if (hwc->idx < 0)
216                 return;
217
218         armpmu_event_update(event, hwc, hwc->idx);
219 }
220
221 static void
222 armpmu_unthrottle(struct perf_event *event)
223 {
224         struct hw_perf_event *hwc = &event->hw;
225
226         /*
227          * Set the period again. Some counters can't be stopped, so when we
228          * were throttled we simply disabled the IRQ source and the counter
229          * may have been left counting. If we don't do this step then we may
230          * get an interrupt too soon or *way* too late if the overflow has
231          * happened since disabling.
232          */
233         armpmu_event_set_period(event, hwc, hwc->idx);
234         armpmu->enable(hwc, hwc->idx);
235 }
236
237 static int
238 armpmu_enable(struct perf_event *event)
239 {
240         struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events);
241         struct hw_perf_event *hwc = &event->hw;
242         int idx;
243         int err = 0;
244
245         /* If we don't have a space for the counter then finish early. */
246         idx = armpmu->get_event_idx(cpuc, hwc);
247         if (idx < 0) {
248                 err = idx;
249                 goto out;
250         }
251
252         /*
253          * If there is an event in the counter we are going to use then make
254          * sure it is disabled.
255          */
256         event->hw.idx = idx;
257         armpmu->disable(hwc, idx);
258         cpuc->events[idx] = event;
259         set_bit(idx, cpuc->active_mask);
260
261         /* Set the period for the event. */
262         armpmu_event_set_period(event, hwc, idx);
263
264         /* Enable the event. */
265         armpmu->enable(hwc, idx);
266
267         /* Propagate our changes to the userspace mapping. */
268         perf_event_update_userpage(event);
269
270 out:
271         return err;
272 }
273
274 static struct pmu pmu = {
275         .enable     = armpmu_enable,
276         .disable    = armpmu_disable,
277         .unthrottle = armpmu_unthrottle,
278         .read       = armpmu_read,
279 };
280
281 static int
282 validate_event(struct cpu_hw_events *cpuc,
283                struct perf_event *event)
284 {
285         struct hw_perf_event fake_event = event->hw;
286
287         if (event->pmu && event->pmu != &pmu)
288                 return 0;
289
290         return armpmu->get_event_idx(cpuc, &fake_event) >= 0;
291 }
292
293 static int
294 validate_group(struct perf_event *event)
295 {
296         struct perf_event *sibling, *leader = event->group_leader;
297         struct cpu_hw_events fake_pmu;
298
299         memset(&fake_pmu, 0, sizeof(fake_pmu));
300
301         if (!validate_event(&fake_pmu, leader))
302                 return -ENOSPC;
303
304         list_for_each_entry(sibling, &leader->sibling_list, group_entry) {
305                 if (!validate_event(&fake_pmu, sibling))
306                         return -ENOSPC;
307         }
308
309         if (!validate_event(&fake_pmu, event))
310                 return -ENOSPC;
311
312         return 0;
313 }
314
315 static int
316 armpmu_reserve_hardware(void)
317 {
318         int i, err = -ENODEV, irq;
319
320         pmu_device = reserve_pmu(ARM_PMU_DEVICE_CPU);
321         if (IS_ERR(pmu_device)) {
322                 pr_warning("unable to reserve pmu\n");
323                 return PTR_ERR(pmu_device);
324         }
325
326         init_pmu(ARM_PMU_DEVICE_CPU);
327
328         if (pmu_device->num_resources < 1) {
329                 pr_err("no irqs for PMUs defined\n");
330                 return -ENODEV;
331         }
332
333         for (i = 0; i < pmu_device->num_resources; ++i) {
334                 irq = platform_get_irq(pmu_device, i);
335                 if (irq < 0)
336                         continue;
337
338                 err = request_irq(irq, armpmu->handle_irq,
339                                   IRQF_DISABLED | IRQF_NOBALANCING,
340                                   "armpmu", NULL);
341                 if (err) {
342                         pr_warning("unable to request IRQ%d for ARM perf "
343                                 "counters\n", irq);
344                         break;
345                 }
346         }
347
348         if (err) {
349                 for (i = i - 1; i >= 0; --i) {
350                         irq = platform_get_irq(pmu_device, i);
351                         if (irq >= 0)
352                                 free_irq(irq, NULL);
353                 }
354                 release_pmu(pmu_device);
355                 pmu_device = NULL;
356         }
357
358         return err;
359 }
360
361 static void
362 armpmu_release_hardware(void)
363 {
364         int i, irq;
365
366         for (i = pmu_device->num_resources - 1; i >= 0; --i) {
367                 irq = platform_get_irq(pmu_device, i);
368                 if (irq >= 0)
369                         free_irq(irq, NULL);
370         }
371         armpmu->stop();
372
373         release_pmu(pmu_device);
374         pmu_device = NULL;
375 }
376
377 static atomic_t active_events = ATOMIC_INIT(0);
378 static DEFINE_MUTEX(pmu_reserve_mutex);
379
380 static void
381 hw_perf_event_destroy(struct perf_event *event)
382 {
383         if (atomic_dec_and_mutex_lock(&active_events, &pmu_reserve_mutex)) {
384                 armpmu_release_hardware();
385                 mutex_unlock(&pmu_reserve_mutex);
386         }
387 }
388
389 static int
390 __hw_perf_event_init(struct perf_event *event)
391 {
392         struct hw_perf_event *hwc = &event->hw;
393         int mapping, err;
394
395         /* Decode the generic type into an ARM event identifier. */
396         if (PERF_TYPE_HARDWARE == event->attr.type) {
397                 mapping = armpmu->event_map(event->attr.config);
398         } else if (PERF_TYPE_HW_CACHE == event->attr.type) {
399                 mapping = armpmu_map_cache_event(event->attr.config);
400         } else if (PERF_TYPE_RAW == event->attr.type) {
401                 mapping = armpmu->raw_event(event->attr.config);
402         } else {
403                 pr_debug("event type %x not supported\n", event->attr.type);
404                 return -EOPNOTSUPP;
405         }
406
407         if (mapping < 0) {
408                 pr_debug("event %x:%llx not supported\n", event->attr.type,
409                          event->attr.config);
410                 return mapping;
411         }
412
413         /*
414          * Check whether we need to exclude the counter from certain modes.
415          * The ARM performance counters are on all of the time so if someone
416          * has asked us for some excludes then we have to fail.
417          */
418         if (event->attr.exclude_kernel || event->attr.exclude_user ||
419             event->attr.exclude_hv || event->attr.exclude_idle) {
420                 pr_debug("ARM performance counters do not support "
421                          "mode exclusion\n");
422                 return -EPERM;
423         }
424
425         /*
426          * We don't assign an index until we actually place the event onto
427          * hardware. Use -1 to signify that we haven't decided where to put it
428          * yet. For SMP systems, each core has it's own PMU so we can't do any
429          * clever allocation or constraints checking at this point.
430          */
431         hwc->idx = -1;
432
433         /*
434          * Store the event encoding into the config_base field. config and
435          * event_base are unused as the only 2 things we need to know are
436          * the event mapping and the counter to use. The counter to use is
437          * also the indx and the config_base is the event type.
438          */
439         hwc->config_base            = (unsigned long)mapping;
440         hwc->config                 = 0;
441         hwc->event_base             = 0;
442
443         if (!hwc->sample_period) {
444                 hwc->sample_period  = armpmu->max_period;
445                 hwc->last_period    = hwc->sample_period;
446                 atomic64_set(&hwc->period_left, hwc->sample_period);
447         }
448
449         err = 0;
450         if (event->group_leader != event) {
451                 err = validate_group(event);
452                 if (err)
453                         return -EINVAL;
454         }
455
456         return err;
457 }
458
459 const struct pmu *
460 hw_perf_event_init(struct perf_event *event)
461 {
462         int err = 0;
463
464         if (!armpmu)
465                 return ERR_PTR(-ENODEV);
466
467         event->destroy = hw_perf_event_destroy;
468
469         if (!atomic_inc_not_zero(&active_events)) {
470                 if (atomic_read(&active_events) > perf_max_events) {
471                         atomic_dec(&active_events);
472                         return ERR_PTR(-ENOSPC);
473                 }
474
475                 mutex_lock(&pmu_reserve_mutex);
476                 if (atomic_read(&active_events) == 0) {
477                         err = armpmu_reserve_hardware();
478                 }
479
480                 if (!err)
481                         atomic_inc(&active_events);
482                 mutex_unlock(&pmu_reserve_mutex);
483         }
484
485         if (err)
486                 return ERR_PTR(err);
487
488         err = __hw_perf_event_init(event);
489         if (err)
490                 hw_perf_event_destroy(event);
491
492         return err ? ERR_PTR(err) : &pmu;
493 }
494
495 void
496 hw_perf_enable(void)
497 {
498         /* Enable all of the perf events on hardware. */
499         int idx;
500         struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events);
501
502         if (!armpmu)
503                 return;
504
505         for (idx = 0; idx <= armpmu->num_events; ++idx) {
506                 struct perf_event *event = cpuc->events[idx];
507
508                 if (!event)
509                         continue;
510
511                 armpmu->enable(&event->hw, idx);
512         }
513
514         armpmu->start();
515 }
516
517 void
518 hw_perf_disable(void)
519 {
520         if (armpmu)
521                 armpmu->stop();
522 }
523
524 /*
525  * ARMv6 Performance counter handling code.
526  *
527  * ARMv6 has 2 configurable performance counters and a single cycle counter.
528  * They all share a single reset bit but can be written to zero so we can use
529  * that for a reset.
530  *
531  * The counters can't be individually enabled or disabled so when we remove
532  * one event and replace it with another we could get spurious counts from the
533  * wrong event. However, we can take advantage of the fact that the
534  * performance counters can export events to the event bus, and the event bus
535  * itself can be monitored. This requires that we *don't* export the events to
536  * the event bus. The procedure for disabling a configurable counter is:
537  *      - change the counter to count the ETMEXTOUT[0] signal (0x20). This
538  *        effectively stops the counter from counting.
539  *      - disable the counter's interrupt generation (each counter has it's
540  *        own interrupt enable bit).
541  * Once stopped, the counter value can be written as 0 to reset.
542  *
543  * To enable a counter:
544  *      - enable the counter's interrupt generation.
545  *      - set the new event type.
546  *
547  * Note: the dedicated cycle counter only counts cycles and can't be
548  * enabled/disabled independently of the others. When we want to disable the
549  * cycle counter, we have to just disable the interrupt reporting and start
550  * ignoring that counter. When re-enabling, we have to reset the value and
551  * enable the interrupt.
552  */
553
554 enum armv6_perf_types {
555         ARMV6_PERFCTR_ICACHE_MISS           = 0x0,
556         ARMV6_PERFCTR_IBUF_STALL            = 0x1,
557         ARMV6_PERFCTR_DDEP_STALL            = 0x2,
558         ARMV6_PERFCTR_ITLB_MISS             = 0x3,
559         ARMV6_PERFCTR_DTLB_MISS             = 0x4,
560         ARMV6_PERFCTR_BR_EXEC               = 0x5,
561         ARMV6_PERFCTR_BR_MISPREDICT         = 0x6,
562         ARMV6_PERFCTR_INSTR_EXEC            = 0x7,
563         ARMV6_PERFCTR_DCACHE_HIT            = 0x9,
564         ARMV6_PERFCTR_DCACHE_ACCESS         = 0xA,
565         ARMV6_PERFCTR_DCACHE_MISS           = 0xB,
566         ARMV6_PERFCTR_DCACHE_WBACK          = 0xC,
567         ARMV6_PERFCTR_SW_PC_CHANGE          = 0xD,
568         ARMV6_PERFCTR_MAIN_TLB_MISS         = 0xF,
569         ARMV6_PERFCTR_EXPL_D_ACCESS         = 0x10,
570         ARMV6_PERFCTR_LSU_FULL_STALL        = 0x11,
571         ARMV6_PERFCTR_WBUF_DRAINED          = 0x12,
572         ARMV6_PERFCTR_CPU_CYCLES            = 0xFF,
573         ARMV6_PERFCTR_NOP                   = 0x20,
574 };
575
576 enum armv6_counters {
577         ARMV6_CYCLE_COUNTER = 1,
578         ARMV6_COUNTER0,
579         ARMV6_COUNTER1,
580 };
581
582 /*
583  * The hardware events that we support. We do support cache operations but
584  * we have harvard caches and no way to combine instruction and data
585  * accesses/misses in hardware.
586  */
587 static const unsigned armv6_perf_map[PERF_COUNT_HW_MAX] = {
588         [PERF_COUNT_HW_CPU_CYCLES]          = ARMV6_PERFCTR_CPU_CYCLES,
589         [PERF_COUNT_HW_INSTRUCTIONS]        = ARMV6_PERFCTR_INSTR_EXEC,
590         [PERF_COUNT_HW_CACHE_REFERENCES]    = HW_OP_UNSUPPORTED,
591         [PERF_COUNT_HW_CACHE_MISSES]        = HW_OP_UNSUPPORTED,
592         [PERF_COUNT_HW_BRANCH_INSTRUCTIONS] = ARMV6_PERFCTR_BR_EXEC,
593         [PERF_COUNT_HW_BRANCH_MISSES]       = ARMV6_PERFCTR_BR_MISPREDICT,
594         [PERF_COUNT_HW_BUS_CYCLES]          = HW_OP_UNSUPPORTED,
595 };
596
597 static const unsigned armv6_perf_cache_map[PERF_COUNT_HW_CACHE_MAX]
598                                           [PERF_COUNT_HW_CACHE_OP_MAX]
599                                           [PERF_COUNT_HW_CACHE_RESULT_MAX] = {
600         [C(L1D)] = {
601                 /*
602                  * The performance counters don't differentiate between read
603                  * and write accesses/misses so this isn't strictly correct,
604                  * but it's the best we can do. Writes and reads get
605                  * combined.
606                  */
607                 [C(OP_READ)] = {
608                         [C(RESULT_ACCESS)]      = ARMV6_PERFCTR_DCACHE_ACCESS,
609                         [C(RESULT_MISS)]        = ARMV6_PERFCTR_DCACHE_MISS,
610                 },
611                 [C(OP_WRITE)] = {
612                         [C(RESULT_ACCESS)]      = ARMV6_PERFCTR_DCACHE_ACCESS,
613                         [C(RESULT_MISS)]        = ARMV6_PERFCTR_DCACHE_MISS,
614                 },
615                 [C(OP_PREFETCH)] = {
616                         [C(RESULT_ACCESS)]      = CACHE_OP_UNSUPPORTED,
617                         [C(RESULT_MISS)]        = CACHE_OP_UNSUPPORTED,
618                 },
619         },
620         [C(L1I)] = {
621                 [C(OP_READ)] = {
622                         [C(RESULT_ACCESS)]      = CACHE_OP_UNSUPPORTED,
623                         [C(RESULT_MISS)]        = ARMV6_PERFCTR_ICACHE_MISS,
624                 },
625                 [C(OP_WRITE)] = {
626                         [C(RESULT_ACCESS)]      = CACHE_OP_UNSUPPORTED,
627                         [C(RESULT_MISS)]        = ARMV6_PERFCTR_ICACHE_MISS,
628                 },
629                 [C(OP_PREFETCH)] = {
630                         [C(RESULT_ACCESS)]      = CACHE_OP_UNSUPPORTED,
631                         [C(RESULT_MISS)]        = CACHE_OP_UNSUPPORTED,
632                 },
633         },
634         [C(LL)] = {
635                 [C(OP_READ)] = {
636                         [C(RESULT_ACCESS)]      = CACHE_OP_UNSUPPORTED,
637                         [C(RESULT_MISS)]        = CACHE_OP_UNSUPPORTED,
638                 },
639                 [C(OP_WRITE)] = {
640                         [C(RESULT_ACCESS)]      = CACHE_OP_UNSUPPORTED,
641                         [C(RESULT_MISS)]        = CACHE_OP_UNSUPPORTED,
642                 },
643                 [C(OP_PREFETCH)] = {
644                         [C(RESULT_ACCESS)]      = CACHE_OP_UNSUPPORTED,
645                         [C(RESULT_MISS)]        = CACHE_OP_UNSUPPORTED,
646                 },
647         },
648         [C(DTLB)] = {
649                 /*
650                  * The ARM performance counters can count micro DTLB misses,
651                  * micro ITLB misses and main TLB misses. There isn't an event
652                  * for TLB misses, so use the micro misses here and if users
653                  * want the main TLB misses they can use a raw counter.
654                  */
655                 [C(OP_READ)] = {
656                         [C(RESULT_ACCESS)]      = CACHE_OP_UNSUPPORTED,
657                         [C(RESULT_MISS)]        = ARMV6_PERFCTR_DTLB_MISS,
658                 },
659                 [C(OP_WRITE)] = {
660                         [C(RESULT_ACCESS)]      = CACHE_OP_UNSUPPORTED,
661                         [C(RESULT_MISS)]        = ARMV6_PERFCTR_DTLB_MISS,
662                 },
663                 [C(OP_PREFETCH)] = {
664                         [C(RESULT_ACCESS)]      = CACHE_OP_UNSUPPORTED,
665                         [C(RESULT_MISS)]        = CACHE_OP_UNSUPPORTED,
666                 },
667         },
668         [C(ITLB)] = {
669                 [C(OP_READ)] = {
670                         [C(RESULT_ACCESS)]      = CACHE_OP_UNSUPPORTED,
671                         [C(RESULT_MISS)]        = ARMV6_PERFCTR_ITLB_MISS,
672                 },
673                 [C(OP_WRITE)] = {
674                         [C(RESULT_ACCESS)]      = CACHE_OP_UNSUPPORTED,
675                         [C(RESULT_MISS)]        = ARMV6_PERFCTR_ITLB_MISS,
676                 },
677                 [C(OP_PREFETCH)] = {
678                         [C(RESULT_ACCESS)]      = CACHE_OP_UNSUPPORTED,
679                         [C(RESULT_MISS)]        = CACHE_OP_UNSUPPORTED,
680                 },
681         },
682         [C(BPU)] = {
683                 [C(OP_READ)] = {
684                         [C(RESULT_ACCESS)]      = CACHE_OP_UNSUPPORTED,
685                         [C(RESULT_MISS)]        = CACHE_OP_UNSUPPORTED,
686                 },
687                 [C(OP_WRITE)] = {
688                         [C(RESULT_ACCESS)]      = CACHE_OP_UNSUPPORTED,
689                         [C(RESULT_MISS)]        = CACHE_OP_UNSUPPORTED,
690                 },
691                 [C(OP_PREFETCH)] = {
692                         [C(RESULT_ACCESS)]      = CACHE_OP_UNSUPPORTED,
693                         [C(RESULT_MISS)]        = CACHE_OP_UNSUPPORTED,
694                 },
695         },
696 };
697
698 enum armv6mpcore_perf_types {
699         ARMV6MPCORE_PERFCTR_ICACHE_MISS     = 0x0,
700         ARMV6MPCORE_PERFCTR_IBUF_STALL      = 0x1,
701         ARMV6MPCORE_PERFCTR_DDEP_STALL      = 0x2,
702         ARMV6MPCORE_PERFCTR_ITLB_MISS       = 0x3,
703         ARMV6MPCORE_PERFCTR_DTLB_MISS       = 0x4,
704         ARMV6MPCORE_PERFCTR_BR_EXEC         = 0x5,
705         ARMV6MPCORE_PERFCTR_BR_NOTPREDICT   = 0x6,
706         ARMV6MPCORE_PERFCTR_BR_MISPREDICT   = 0x7,
707         ARMV6MPCORE_PERFCTR_INSTR_EXEC      = 0x8,
708         ARMV6MPCORE_PERFCTR_DCACHE_RDACCESS = 0xA,
709         ARMV6MPCORE_PERFCTR_DCACHE_RDMISS   = 0xB,
710         ARMV6MPCORE_PERFCTR_DCACHE_WRACCESS = 0xC,
711         ARMV6MPCORE_PERFCTR_DCACHE_WRMISS   = 0xD,
712         ARMV6MPCORE_PERFCTR_DCACHE_EVICTION = 0xE,
713         ARMV6MPCORE_PERFCTR_SW_PC_CHANGE    = 0xF,
714         ARMV6MPCORE_PERFCTR_MAIN_TLB_MISS   = 0x10,
715         ARMV6MPCORE_PERFCTR_EXPL_MEM_ACCESS = 0x11,
716         ARMV6MPCORE_PERFCTR_LSU_FULL_STALL  = 0x12,
717         ARMV6MPCORE_PERFCTR_WBUF_DRAINED    = 0x13,
718         ARMV6MPCORE_PERFCTR_CPU_CYCLES      = 0xFF,
719 };
720
721 /*
722  * The hardware events that we support. We do support cache operations but
723  * we have harvard caches and no way to combine instruction and data
724  * accesses/misses in hardware.
725  */
726 static const unsigned armv6mpcore_perf_map[PERF_COUNT_HW_MAX] = {
727         [PERF_COUNT_HW_CPU_CYCLES]          = ARMV6MPCORE_PERFCTR_CPU_CYCLES,
728         [PERF_COUNT_HW_INSTRUCTIONS]        = ARMV6MPCORE_PERFCTR_INSTR_EXEC,
729         [PERF_COUNT_HW_CACHE_REFERENCES]    = HW_OP_UNSUPPORTED,
730         [PERF_COUNT_HW_CACHE_MISSES]        = HW_OP_UNSUPPORTED,
731         [PERF_COUNT_HW_BRANCH_INSTRUCTIONS] = ARMV6MPCORE_PERFCTR_BR_EXEC,
732         [PERF_COUNT_HW_BRANCH_MISSES]       = ARMV6MPCORE_PERFCTR_BR_MISPREDICT,
733         [PERF_COUNT_HW_BUS_CYCLES]          = HW_OP_UNSUPPORTED,
734 };
735
736 static const unsigned armv6mpcore_perf_cache_map[PERF_COUNT_HW_CACHE_MAX]
737                                         [PERF_COUNT_HW_CACHE_OP_MAX]
738                                         [PERF_COUNT_HW_CACHE_RESULT_MAX] = {
739         [C(L1D)] = {
740                 [C(OP_READ)] = {
741                         [C(RESULT_ACCESS)]  =
742                                 ARMV6MPCORE_PERFCTR_DCACHE_RDACCESS,
743                         [C(RESULT_MISS)]    =
744                                 ARMV6MPCORE_PERFCTR_DCACHE_RDMISS,
745                 },
746                 [C(OP_WRITE)] = {
747                         [C(RESULT_ACCESS)]  =
748                                 ARMV6MPCORE_PERFCTR_DCACHE_WRACCESS,
749                         [C(RESULT_MISS)]    =
750                                 ARMV6MPCORE_PERFCTR_DCACHE_WRMISS,
751                 },
752                 [C(OP_PREFETCH)] = {
753                         [C(RESULT_ACCESS)]  = CACHE_OP_UNSUPPORTED,
754                         [C(RESULT_MISS)]    = CACHE_OP_UNSUPPORTED,
755                 },
756         },
757         [C(L1I)] = {
758                 [C(OP_READ)] = {
759                         [C(RESULT_ACCESS)]  = CACHE_OP_UNSUPPORTED,
760                         [C(RESULT_MISS)]    = ARMV6MPCORE_PERFCTR_ICACHE_MISS,
761                 },
762                 [C(OP_WRITE)] = {
763                         [C(RESULT_ACCESS)]  = CACHE_OP_UNSUPPORTED,
764                         [C(RESULT_MISS)]    = ARMV6MPCORE_PERFCTR_ICACHE_MISS,
765                 },
766                 [C(OP_PREFETCH)] = {
767                         [C(RESULT_ACCESS)]  = CACHE_OP_UNSUPPORTED,
768                         [C(RESULT_MISS)]    = CACHE_OP_UNSUPPORTED,
769                 },
770         },
771         [C(LL)] = {
772                 [C(OP_READ)] = {
773                         [C(RESULT_ACCESS)]  = CACHE_OP_UNSUPPORTED,
774                         [C(RESULT_MISS)]    = CACHE_OP_UNSUPPORTED,
775                 },
776                 [C(OP_WRITE)] = {
777                         [C(RESULT_ACCESS)]  = CACHE_OP_UNSUPPORTED,
778                         [C(RESULT_MISS)]    = CACHE_OP_UNSUPPORTED,
779                 },
780                 [C(OP_PREFETCH)] = {
781                         [C(RESULT_ACCESS)]  = CACHE_OP_UNSUPPORTED,
782                         [C(RESULT_MISS)]    = CACHE_OP_UNSUPPORTED,
783                 },
784         },
785         [C(DTLB)] = {
786                 /*
787                  * The ARM performance counters can count micro DTLB misses,
788                  * micro ITLB misses and main TLB misses. There isn't an event
789                  * for TLB misses, so use the micro misses here and if users
790                  * want the main TLB misses they can use a raw counter.
791                  */
792                 [C(OP_READ)] = {
793                         [C(RESULT_ACCESS)]  = CACHE_OP_UNSUPPORTED,
794                         [C(RESULT_MISS)]    = ARMV6MPCORE_PERFCTR_DTLB_MISS,
795                 },
796                 [C(OP_WRITE)] = {
797                         [C(RESULT_ACCESS)]  = CACHE_OP_UNSUPPORTED,
798                         [C(RESULT_MISS)]    = ARMV6MPCORE_PERFCTR_DTLB_MISS,
799                 },
800                 [C(OP_PREFETCH)] = {
801                         [C(RESULT_ACCESS)]  = CACHE_OP_UNSUPPORTED,
802                         [C(RESULT_MISS)]    = CACHE_OP_UNSUPPORTED,
803                 },
804         },
805         [C(ITLB)] = {
806                 [C(OP_READ)] = {
807                         [C(RESULT_ACCESS)]  = CACHE_OP_UNSUPPORTED,
808                         [C(RESULT_MISS)]    = ARMV6MPCORE_PERFCTR_ITLB_MISS,
809                 },
810                 [C(OP_WRITE)] = {
811                         [C(RESULT_ACCESS)]  = CACHE_OP_UNSUPPORTED,
812                         [C(RESULT_MISS)]    = ARMV6MPCORE_PERFCTR_ITLB_MISS,
813                 },
814                 [C(OP_PREFETCH)] = {
815                         [C(RESULT_ACCESS)]  = CACHE_OP_UNSUPPORTED,
816                         [C(RESULT_MISS)]    = CACHE_OP_UNSUPPORTED,
817                 },
818         },
819         [C(BPU)] = {
820                 [C(OP_READ)] = {
821                         [C(RESULT_ACCESS)]  = CACHE_OP_UNSUPPORTED,
822                         [C(RESULT_MISS)]    = CACHE_OP_UNSUPPORTED,
823                 },
824                 [C(OP_WRITE)] = {
825                         [C(RESULT_ACCESS)]  = CACHE_OP_UNSUPPORTED,
826                         [C(RESULT_MISS)]    = CACHE_OP_UNSUPPORTED,
827                 },
828                 [C(OP_PREFETCH)] = {
829                         [C(RESULT_ACCESS)]  = CACHE_OP_UNSUPPORTED,
830                         [C(RESULT_MISS)]    = CACHE_OP_UNSUPPORTED,
831                 },
832         },
833 };
834
835 static inline unsigned long
836 armv6_pmcr_read(void)
837 {
838         u32 val;
839         asm volatile("mrc   p15, 0, %0, c15, c12, 0" : "=r"(val));
840         return val;
841 }
842
843 static inline void
844 armv6_pmcr_write(unsigned long val)
845 {
846         asm volatile("mcr   p15, 0, %0, c15, c12, 0" : : "r"(val));
847 }
848
849 #define ARMV6_PMCR_ENABLE               (1 << 0)
850 #define ARMV6_PMCR_CTR01_RESET          (1 << 1)
851 #define ARMV6_PMCR_CCOUNT_RESET         (1 << 2)
852 #define ARMV6_PMCR_CCOUNT_DIV           (1 << 3)
853 #define ARMV6_PMCR_COUNT0_IEN           (1 << 4)
854 #define ARMV6_PMCR_COUNT1_IEN           (1 << 5)
855 #define ARMV6_PMCR_CCOUNT_IEN           (1 << 6)
856 #define ARMV6_PMCR_COUNT0_OVERFLOW      (1 << 8)
857 #define ARMV6_PMCR_COUNT1_OVERFLOW      (1 << 9)
858 #define ARMV6_PMCR_CCOUNT_OVERFLOW      (1 << 10)
859 #define ARMV6_PMCR_EVT_COUNT0_SHIFT     20
860 #define ARMV6_PMCR_EVT_COUNT0_MASK      (0xFF << ARMV6_PMCR_EVT_COUNT0_SHIFT)
861 #define ARMV6_PMCR_EVT_COUNT1_SHIFT     12
862 #define ARMV6_PMCR_EVT_COUNT1_MASK      (0xFF << ARMV6_PMCR_EVT_COUNT1_SHIFT)
863
864 #define ARMV6_PMCR_OVERFLOWED_MASK \
865         (ARMV6_PMCR_COUNT0_OVERFLOW | ARMV6_PMCR_COUNT1_OVERFLOW | \
866          ARMV6_PMCR_CCOUNT_OVERFLOW)
867
868 static inline int
869 armv6_pmcr_has_overflowed(unsigned long pmcr)
870 {
871         return (pmcr & ARMV6_PMCR_OVERFLOWED_MASK);
872 }
873
874 static inline int
875 armv6_pmcr_counter_has_overflowed(unsigned long pmcr,
876                                   enum armv6_counters counter)
877 {
878         int ret = 0;
879
880         if (ARMV6_CYCLE_COUNTER == counter)
881                 ret = pmcr & ARMV6_PMCR_CCOUNT_OVERFLOW;
882         else if (ARMV6_COUNTER0 == counter)
883                 ret = pmcr & ARMV6_PMCR_COUNT0_OVERFLOW;
884         else if (ARMV6_COUNTER1 == counter)
885                 ret = pmcr & ARMV6_PMCR_COUNT1_OVERFLOW;
886         else
887                 WARN_ONCE(1, "invalid counter number (%d)\n", counter);
888
889         return ret;
890 }
891
892 static inline u32
893 armv6pmu_read_counter(int counter)
894 {
895         unsigned long value = 0;
896
897         if (ARMV6_CYCLE_COUNTER == counter)
898                 asm volatile("mrc   p15, 0, %0, c15, c12, 1" : "=r"(value));
899         else if (ARMV6_COUNTER0 == counter)
900                 asm volatile("mrc   p15, 0, %0, c15, c12, 2" : "=r"(value));
901         else if (ARMV6_COUNTER1 == counter)
902                 asm volatile("mrc   p15, 0, %0, c15, c12, 3" : "=r"(value));
903         else
904                 WARN_ONCE(1, "invalid counter number (%d)\n", counter);
905
906         return value;
907 }
908
909 static inline void
910 armv6pmu_write_counter(int counter,
911                        u32 value)
912 {
913         if (ARMV6_CYCLE_COUNTER == counter)
914                 asm volatile("mcr   p15, 0, %0, c15, c12, 1" : : "r"(value));
915         else if (ARMV6_COUNTER0 == counter)
916                 asm volatile("mcr   p15, 0, %0, c15, c12, 2" : : "r"(value));
917         else if (ARMV6_COUNTER1 == counter)
918                 asm volatile("mcr   p15, 0, %0, c15, c12, 3" : : "r"(value));
919         else
920                 WARN_ONCE(1, "invalid counter number (%d)\n", counter);
921 }
922
923 void
924 armv6pmu_enable_event(struct hw_perf_event *hwc,
925                       int idx)
926 {
927         unsigned long val, mask, evt, flags;
928
929         if (ARMV6_CYCLE_COUNTER == idx) {
930                 mask    = 0;
931                 evt     = ARMV6_PMCR_CCOUNT_IEN;
932         } else if (ARMV6_COUNTER0 == idx) {
933                 mask    = ARMV6_PMCR_EVT_COUNT0_MASK;
934                 evt     = (hwc->config_base << ARMV6_PMCR_EVT_COUNT0_SHIFT) |
935                           ARMV6_PMCR_COUNT0_IEN;
936         } else if (ARMV6_COUNTER1 == idx) {
937                 mask    = ARMV6_PMCR_EVT_COUNT1_MASK;
938                 evt     = (hwc->config_base << ARMV6_PMCR_EVT_COUNT1_SHIFT) |
939                           ARMV6_PMCR_COUNT1_IEN;
940         } else {
941                 WARN_ONCE(1, "invalid counter number (%d)\n", idx);
942                 return;
943         }
944
945         /*
946          * Mask out the current event and set the counter to count the event
947          * that we're interested in.
948          */
949         spin_lock_irqsave(&pmu_lock, flags);
950         val = armv6_pmcr_read();
951         val &= ~mask;
952         val |= evt;
953         armv6_pmcr_write(val);
954         spin_unlock_irqrestore(&pmu_lock, flags);
955 }
956
957 static irqreturn_t
958 armv6pmu_handle_irq(int irq_num,
959                     void *dev)
960 {
961         unsigned long pmcr = armv6_pmcr_read();
962         struct perf_sample_data data;
963         struct cpu_hw_events *cpuc;
964         struct pt_regs *regs;
965         int idx;
966
967         if (!armv6_pmcr_has_overflowed(pmcr))
968                 return IRQ_NONE;
969
970         regs = get_irq_regs();
971
972         /*
973          * The interrupts are cleared by writing the overflow flags back to
974          * the control register. All of the other bits don't have any effect
975          * if they are rewritten, so write the whole value back.
976          */
977         armv6_pmcr_write(pmcr);
978
979         perf_sample_data_init(&data, 0);
980
981         cpuc = &__get_cpu_var(cpu_hw_events);
982         for (idx = 0; idx <= armpmu->num_events; ++idx) {
983                 struct perf_event *event = cpuc->events[idx];
984                 struct hw_perf_event *hwc;
985
986                 if (!test_bit(idx, cpuc->active_mask))
987                         continue;
988
989                 /*
990                  * We have a single interrupt for all counters. Check that
991                  * each counter has overflowed before we process it.
992                  */
993                 if (!armv6_pmcr_counter_has_overflowed(pmcr, idx))
994                         continue;
995
996                 hwc = &event->hw;
997                 armpmu_event_update(event, hwc, idx);
998                 data.period = event->hw.last_period;
999                 if (!armpmu_event_set_period(event, hwc, idx))
1000                         continue;
1001
1002                 if (perf_event_overflow(event, 0, &data, regs))
1003                         armpmu->disable(hwc, idx);
1004         }
1005
1006         /*
1007          * Handle the pending perf events.
1008          *
1009          * Note: this call *must* be run with interrupts enabled. For
1010          * platforms that can have the PMU interrupts raised as a PMI, this
1011          * will not work.
1012          */
1013         perf_event_do_pending();
1014
1015         return IRQ_HANDLED;
1016 }
1017
1018 static void
1019 armv6pmu_start(void)
1020 {
1021         unsigned long flags, val;
1022
1023         spin_lock_irqsave(&pmu_lock, flags);
1024         val = armv6_pmcr_read();
1025         val |= ARMV6_PMCR_ENABLE;
1026         armv6_pmcr_write(val);
1027         spin_unlock_irqrestore(&pmu_lock, flags);
1028 }
1029
1030 void
1031 armv6pmu_stop(void)
1032 {
1033         unsigned long flags, val;
1034
1035         spin_lock_irqsave(&pmu_lock, flags);
1036         val = armv6_pmcr_read();
1037         val &= ~ARMV6_PMCR_ENABLE;
1038         armv6_pmcr_write(val);
1039         spin_unlock_irqrestore(&pmu_lock, flags);
1040 }
1041
1042 static inline int
1043 armv6pmu_event_map(int config)
1044 {
1045         int mapping = armv6_perf_map[config];
1046         if (HW_OP_UNSUPPORTED == mapping)
1047                 mapping = -EOPNOTSUPP;
1048         return mapping;
1049 }
1050
1051 static inline int
1052 armv6mpcore_pmu_event_map(int config)
1053 {
1054         int mapping = armv6mpcore_perf_map[config];
1055         if (HW_OP_UNSUPPORTED == mapping)
1056                 mapping = -EOPNOTSUPP;
1057         return mapping;
1058 }
1059
1060 static u64
1061 armv6pmu_raw_event(u64 config)
1062 {
1063         return config & 0xff;
1064 }
1065
1066 static int
1067 armv6pmu_get_event_idx(struct cpu_hw_events *cpuc,
1068                        struct hw_perf_event *event)
1069 {
1070         /* Always place a cycle counter into the cycle counter. */
1071         if (ARMV6_PERFCTR_CPU_CYCLES == event->config_base) {
1072                 if (test_and_set_bit(ARMV6_CYCLE_COUNTER, cpuc->used_mask))
1073                         return -EAGAIN;
1074
1075                 return ARMV6_CYCLE_COUNTER;
1076         } else {
1077                 /*
1078                  * For anything other than a cycle counter, try and use
1079                  * counter0 and counter1.
1080                  */
1081                 if (!test_and_set_bit(ARMV6_COUNTER1, cpuc->used_mask)) {
1082                         return ARMV6_COUNTER1;
1083                 }
1084
1085                 if (!test_and_set_bit(ARMV6_COUNTER0, cpuc->used_mask)) {
1086                         return ARMV6_COUNTER0;
1087                 }
1088
1089                 /* The counters are all in use. */
1090                 return -EAGAIN;
1091         }
1092 }
1093
1094 static void
1095 armv6pmu_disable_event(struct hw_perf_event *hwc,
1096                        int idx)
1097 {
1098         unsigned long val, mask, evt, flags;
1099
1100         if (ARMV6_CYCLE_COUNTER == idx) {
1101                 mask    = ARMV6_PMCR_CCOUNT_IEN;
1102                 evt     = 0;
1103         } else if (ARMV6_COUNTER0 == idx) {
1104                 mask    = ARMV6_PMCR_COUNT0_IEN | ARMV6_PMCR_EVT_COUNT0_MASK;
1105                 evt     = ARMV6_PERFCTR_NOP << ARMV6_PMCR_EVT_COUNT0_SHIFT;
1106         } else if (ARMV6_COUNTER1 == idx) {
1107                 mask    = ARMV6_PMCR_COUNT1_IEN | ARMV6_PMCR_EVT_COUNT1_MASK;
1108                 evt     = ARMV6_PERFCTR_NOP << ARMV6_PMCR_EVT_COUNT1_SHIFT;
1109         } else {
1110                 WARN_ONCE(1, "invalid counter number (%d)\n", idx);
1111                 return;
1112         }
1113
1114         /*
1115          * Mask out the current event and set the counter to count the number
1116          * of ETM bus signal assertion cycles. The external reporting should
1117          * be disabled and so this should never increment.
1118          */
1119         spin_lock_irqsave(&pmu_lock, flags);
1120         val = armv6_pmcr_read();
1121         val &= ~mask;
1122         val |= evt;
1123         armv6_pmcr_write(val);
1124         spin_unlock_irqrestore(&pmu_lock, flags);
1125 }
1126
1127 static void
1128 armv6mpcore_pmu_disable_event(struct hw_perf_event *hwc,
1129                               int idx)
1130 {
1131         unsigned long val, mask, flags, evt = 0;
1132
1133         if (ARMV6_CYCLE_COUNTER == idx) {
1134                 mask    = ARMV6_PMCR_CCOUNT_IEN;
1135         } else if (ARMV6_COUNTER0 == idx) {
1136                 mask    = ARMV6_PMCR_COUNT0_IEN;
1137         } else if (ARMV6_COUNTER1 == idx) {
1138                 mask    = ARMV6_PMCR_COUNT1_IEN;
1139         } else {
1140                 WARN_ONCE(1, "invalid counter number (%d)\n", idx);
1141                 return;
1142         }
1143
1144         /*
1145          * Unlike UP ARMv6, we don't have a way of stopping the counters. We
1146          * simply disable the interrupt reporting.
1147          */
1148         spin_lock_irqsave(&pmu_lock, flags);
1149         val = armv6_pmcr_read();
1150         val &= ~mask;
1151         val |= evt;
1152         armv6_pmcr_write(val);
1153         spin_unlock_irqrestore(&pmu_lock, flags);
1154 }
1155
1156 static const struct arm_pmu armv6pmu = {
1157         .name                   = "v6",
1158         .handle_irq             = armv6pmu_handle_irq,
1159         .enable                 = armv6pmu_enable_event,
1160         .disable                = armv6pmu_disable_event,
1161         .event_map              = armv6pmu_event_map,
1162         .raw_event              = armv6pmu_raw_event,
1163         .read_counter           = armv6pmu_read_counter,
1164         .write_counter          = armv6pmu_write_counter,
1165         .get_event_idx          = armv6pmu_get_event_idx,
1166         .start                  = armv6pmu_start,
1167         .stop                   = armv6pmu_stop,
1168         .num_events             = 3,
1169         .max_period             = (1LLU << 32) - 1,
1170 };
1171
1172 /*
1173  * ARMv6mpcore is almost identical to single core ARMv6 with the exception
1174  * that some of the events have different enumerations and that there is no
1175  * *hack* to stop the programmable counters. To stop the counters we simply
1176  * disable the interrupt reporting and update the event. When unthrottling we
1177  * reset the period and enable the interrupt reporting.
1178  */
1179 static const struct arm_pmu armv6mpcore_pmu = {
1180         .name                   = "v6mpcore",
1181         .handle_irq             = armv6pmu_handle_irq,
1182         .enable                 = armv6pmu_enable_event,
1183         .disable                = armv6mpcore_pmu_disable_event,
1184         .event_map              = armv6mpcore_pmu_event_map,
1185         .raw_event              = armv6pmu_raw_event,
1186         .read_counter           = armv6pmu_read_counter,
1187         .write_counter          = armv6pmu_write_counter,
1188         .get_event_idx          = armv6pmu_get_event_idx,
1189         .start                  = armv6pmu_start,
1190         .stop                   = armv6pmu_stop,
1191         .num_events             = 3,
1192         .max_period             = (1LLU << 32) - 1,
1193 };
1194
1195 /*
1196  * ARMv7 Cortex-A8 and Cortex-A9 Performance Events handling code.
1197  *
1198  * Copied from ARMv6 code, with the low level code inspired
1199  *  by the ARMv7 Oprofile code.
1200  *
1201  * Cortex-A8 has up to 4 configurable performance counters and
1202  *  a single cycle counter.
1203  * Cortex-A9 has up to 31 configurable performance counters and
1204  *  a single cycle counter.
1205  *
1206  * All counters can be enabled/disabled and IRQ masked separately. The cycle
1207  *  counter and all 4 performance counters together can be reset separately.
1208  */
1209
1210 #define ARMV7_PMU_CORTEX_A8_NAME                "ARMv7 Cortex-A8"
1211
1212 #define ARMV7_PMU_CORTEX_A9_NAME                "ARMv7 Cortex-A9"
1213
1214 /* Common ARMv7 event types */
1215 enum armv7_perf_types {
1216         ARMV7_PERFCTR_PMNC_SW_INCR              = 0x00,
1217         ARMV7_PERFCTR_IFETCH_MISS               = 0x01,
1218         ARMV7_PERFCTR_ITLB_MISS                 = 0x02,
1219         ARMV7_PERFCTR_DCACHE_REFILL             = 0x03,
1220         ARMV7_PERFCTR_DCACHE_ACCESS             = 0x04,
1221         ARMV7_PERFCTR_DTLB_REFILL               = 0x05,
1222         ARMV7_PERFCTR_DREAD                     = 0x06,
1223         ARMV7_PERFCTR_DWRITE                    = 0x07,
1224
1225         ARMV7_PERFCTR_EXC_TAKEN                 = 0x09,
1226         ARMV7_PERFCTR_EXC_EXECUTED              = 0x0A,
1227         ARMV7_PERFCTR_CID_WRITE                 = 0x0B,
1228         /* ARMV7_PERFCTR_PC_WRITE is equivalent to HW_BRANCH_INSTRUCTIONS.
1229          * It counts:
1230          *  - all branch instructions,
1231          *  - instructions that explicitly write the PC,
1232          *  - exception generating instructions.
1233          */
1234         ARMV7_PERFCTR_PC_WRITE                  = 0x0C,
1235         ARMV7_PERFCTR_PC_IMM_BRANCH             = 0x0D,
1236         ARMV7_PERFCTR_UNALIGNED_ACCESS          = 0x0F,
1237         ARMV7_PERFCTR_PC_BRANCH_MIS_PRED        = 0x10,
1238         ARMV7_PERFCTR_CLOCK_CYCLES              = 0x11,
1239
1240         ARMV7_PERFCTR_PC_BRANCH_MIS_USED        = 0x12,
1241
1242         ARMV7_PERFCTR_CPU_CYCLES                = 0xFF
1243 };
1244
1245 /* ARMv7 Cortex-A8 specific event types */
1246 enum armv7_a8_perf_types {
1247         ARMV7_PERFCTR_INSTR_EXECUTED            = 0x08,
1248
1249         ARMV7_PERFCTR_PC_PROC_RETURN            = 0x0E,
1250
1251         ARMV7_PERFCTR_WRITE_BUFFER_FULL         = 0x40,
1252         ARMV7_PERFCTR_L2_STORE_MERGED           = 0x41,
1253         ARMV7_PERFCTR_L2_STORE_BUFF             = 0x42,
1254         ARMV7_PERFCTR_L2_ACCESS                 = 0x43,
1255         ARMV7_PERFCTR_L2_CACH_MISS              = 0x44,
1256         ARMV7_PERFCTR_AXI_READ_CYCLES           = 0x45,
1257         ARMV7_PERFCTR_AXI_WRITE_CYCLES          = 0x46,
1258         ARMV7_PERFCTR_MEMORY_REPLAY             = 0x47,
1259         ARMV7_PERFCTR_UNALIGNED_ACCESS_REPLAY   = 0x48,
1260         ARMV7_PERFCTR_L1_DATA_MISS              = 0x49,
1261         ARMV7_PERFCTR_L1_INST_MISS              = 0x4A,
1262         ARMV7_PERFCTR_L1_DATA_COLORING          = 0x4B,
1263         ARMV7_PERFCTR_L1_NEON_DATA              = 0x4C,
1264         ARMV7_PERFCTR_L1_NEON_CACH_DATA         = 0x4D,
1265         ARMV7_PERFCTR_L2_NEON                   = 0x4E,
1266         ARMV7_PERFCTR_L2_NEON_HIT               = 0x4F,
1267         ARMV7_PERFCTR_L1_INST                   = 0x50,
1268         ARMV7_PERFCTR_PC_RETURN_MIS_PRED        = 0x51,
1269         ARMV7_PERFCTR_PC_BRANCH_FAILED          = 0x52,
1270         ARMV7_PERFCTR_PC_BRANCH_TAKEN           = 0x53,
1271         ARMV7_PERFCTR_PC_BRANCH_EXECUTED        = 0x54,
1272         ARMV7_PERFCTR_OP_EXECUTED               = 0x55,
1273         ARMV7_PERFCTR_CYCLES_INST_STALL         = 0x56,
1274         ARMV7_PERFCTR_CYCLES_INST               = 0x57,
1275         ARMV7_PERFCTR_CYCLES_NEON_DATA_STALL    = 0x58,
1276         ARMV7_PERFCTR_CYCLES_NEON_INST_STALL    = 0x59,
1277         ARMV7_PERFCTR_NEON_CYCLES               = 0x5A,
1278
1279         ARMV7_PERFCTR_PMU0_EVENTS               = 0x70,
1280         ARMV7_PERFCTR_PMU1_EVENTS               = 0x71,
1281         ARMV7_PERFCTR_PMU_EVENTS                = 0x72,
1282 };
1283
1284 /* ARMv7 Cortex-A9 specific event types */
1285 enum armv7_a9_perf_types {
1286         ARMV7_PERFCTR_JAVA_HW_BYTECODE_EXEC     = 0x40,
1287         ARMV7_PERFCTR_JAVA_SW_BYTECODE_EXEC     = 0x41,
1288         ARMV7_PERFCTR_JAZELLE_BRANCH_EXEC       = 0x42,
1289
1290         ARMV7_PERFCTR_COHERENT_LINE_MISS        = 0x50,
1291         ARMV7_PERFCTR_COHERENT_LINE_HIT         = 0x51,
1292
1293         ARMV7_PERFCTR_ICACHE_DEP_STALL_CYCLES   = 0x60,
1294         ARMV7_PERFCTR_DCACHE_DEP_STALL_CYCLES   = 0x61,
1295         ARMV7_PERFCTR_TLB_MISS_DEP_STALL_CYCLES = 0x62,
1296         ARMV7_PERFCTR_STREX_EXECUTED_PASSED     = 0x63,
1297         ARMV7_PERFCTR_STREX_EXECUTED_FAILED     = 0x64,
1298         ARMV7_PERFCTR_DATA_EVICTION             = 0x65,
1299         ARMV7_PERFCTR_ISSUE_STAGE_NO_INST       = 0x66,
1300         ARMV7_PERFCTR_ISSUE_STAGE_EMPTY         = 0x67,
1301         ARMV7_PERFCTR_INST_OUT_OF_RENAME_STAGE  = 0x68,
1302
1303         ARMV7_PERFCTR_PREDICTABLE_FUNCT_RETURNS = 0x6E,
1304
1305         ARMV7_PERFCTR_MAIN_UNIT_EXECUTED_INST   = 0x70,
1306         ARMV7_PERFCTR_SECOND_UNIT_EXECUTED_INST = 0x71,
1307         ARMV7_PERFCTR_LD_ST_UNIT_EXECUTED_INST  = 0x72,
1308         ARMV7_PERFCTR_FP_EXECUTED_INST          = 0x73,
1309         ARMV7_PERFCTR_NEON_EXECUTED_INST        = 0x74,
1310
1311         ARMV7_PERFCTR_PLD_FULL_DEP_STALL_CYCLES = 0x80,
1312         ARMV7_PERFCTR_DATA_WR_DEP_STALL_CYCLES  = 0x81,
1313         ARMV7_PERFCTR_ITLB_MISS_DEP_STALL_CYCLES        = 0x82,
1314         ARMV7_PERFCTR_DTLB_MISS_DEP_STALL_CYCLES        = 0x83,
1315         ARMV7_PERFCTR_MICRO_ITLB_MISS_DEP_STALL_CYCLES  = 0x84,
1316         ARMV7_PERFCTR_MICRO_DTLB_MISS_DEP_STALL_CYCLES  = 0x85,
1317         ARMV7_PERFCTR_DMB_DEP_STALL_CYCLES      = 0x86,
1318
1319         ARMV7_PERFCTR_INTGR_CLK_ENABLED_CYCLES  = 0x8A,
1320         ARMV7_PERFCTR_DATA_ENGINE_CLK_EN_CYCLES = 0x8B,
1321
1322         ARMV7_PERFCTR_ISB_INST                  = 0x90,
1323         ARMV7_PERFCTR_DSB_INST                  = 0x91,
1324         ARMV7_PERFCTR_DMB_INST                  = 0x92,
1325         ARMV7_PERFCTR_EXT_INTERRUPTS            = 0x93,
1326
1327         ARMV7_PERFCTR_PLE_CACHE_LINE_RQST_COMPLETED     = 0xA0,
1328         ARMV7_PERFCTR_PLE_CACHE_LINE_RQST_SKIPPED       = 0xA1,
1329         ARMV7_PERFCTR_PLE_FIFO_FLUSH            = 0xA2,
1330         ARMV7_PERFCTR_PLE_RQST_COMPLETED        = 0xA3,
1331         ARMV7_PERFCTR_PLE_FIFO_OVERFLOW         = 0xA4,
1332         ARMV7_PERFCTR_PLE_RQST_PROG             = 0xA5
1333 };
1334
1335 /*
1336  * Cortex-A8 HW events mapping
1337  *
1338  * The hardware events that we support. We do support cache operations but
1339  * we have harvard caches and no way to combine instruction and data
1340  * accesses/misses in hardware.
1341  */
1342 static const unsigned armv7_a8_perf_map[PERF_COUNT_HW_MAX] = {
1343         [PERF_COUNT_HW_CPU_CYCLES]          = ARMV7_PERFCTR_CPU_CYCLES,
1344         [PERF_COUNT_HW_INSTRUCTIONS]        = ARMV7_PERFCTR_INSTR_EXECUTED,
1345         [PERF_COUNT_HW_CACHE_REFERENCES]    = HW_OP_UNSUPPORTED,
1346         [PERF_COUNT_HW_CACHE_MISSES]        = HW_OP_UNSUPPORTED,
1347         [PERF_COUNT_HW_BRANCH_INSTRUCTIONS] = ARMV7_PERFCTR_PC_WRITE,
1348         [PERF_COUNT_HW_BRANCH_MISSES]       = ARMV7_PERFCTR_PC_BRANCH_MIS_PRED,
1349         [PERF_COUNT_HW_BUS_CYCLES]          = ARMV7_PERFCTR_CLOCK_CYCLES,
1350 };
1351
1352 static const unsigned armv7_a8_perf_cache_map[PERF_COUNT_HW_CACHE_MAX]
1353                                           [PERF_COUNT_HW_CACHE_OP_MAX]
1354                                           [PERF_COUNT_HW_CACHE_RESULT_MAX] = {
1355         [C(L1D)] = {
1356                 /*
1357                  * The performance counters don't differentiate between read
1358                  * and write accesses/misses so this isn't strictly correct,
1359                  * but it's the best we can do. Writes and reads get
1360                  * combined.
1361                  */
1362                 [C(OP_READ)] = {
1363                         [C(RESULT_ACCESS)]      = ARMV7_PERFCTR_DCACHE_ACCESS,
1364                         [C(RESULT_MISS)]        = ARMV7_PERFCTR_DCACHE_REFILL,
1365                 },
1366                 [C(OP_WRITE)] = {
1367                         [C(RESULT_ACCESS)]      = ARMV7_PERFCTR_DCACHE_ACCESS,
1368                         [C(RESULT_MISS)]        = ARMV7_PERFCTR_DCACHE_REFILL,
1369                 },
1370                 [C(OP_PREFETCH)] = {
1371                         [C(RESULT_ACCESS)]      = CACHE_OP_UNSUPPORTED,
1372                         [C(RESULT_MISS)]        = CACHE_OP_UNSUPPORTED,
1373                 },
1374         },
1375         [C(L1I)] = {
1376                 [C(OP_READ)] = {
1377                         [C(RESULT_ACCESS)]      = ARMV7_PERFCTR_L1_INST,
1378                         [C(RESULT_MISS)]        = ARMV7_PERFCTR_L1_INST_MISS,
1379                 },
1380                 [C(OP_WRITE)] = {
1381                         [C(RESULT_ACCESS)]      = ARMV7_PERFCTR_L1_INST,
1382                         [C(RESULT_MISS)]        = ARMV7_PERFCTR_L1_INST_MISS,
1383                 },
1384                 [C(OP_PREFETCH)] = {
1385                         [C(RESULT_ACCESS)]      = CACHE_OP_UNSUPPORTED,
1386                         [C(RESULT_MISS)]        = CACHE_OP_UNSUPPORTED,
1387                 },
1388         },
1389         [C(LL)] = {
1390                 [C(OP_READ)] = {
1391                         [C(RESULT_ACCESS)]      = ARMV7_PERFCTR_L2_ACCESS,
1392                         [C(RESULT_MISS)]        = ARMV7_PERFCTR_L2_CACH_MISS,
1393                 },
1394                 [C(OP_WRITE)] = {
1395                         [C(RESULT_ACCESS)]      = ARMV7_PERFCTR_L2_ACCESS,
1396                         [C(RESULT_MISS)]        = ARMV7_PERFCTR_L2_CACH_MISS,
1397                 },
1398                 [C(OP_PREFETCH)] = {
1399                         [C(RESULT_ACCESS)]      = CACHE_OP_UNSUPPORTED,
1400                         [C(RESULT_MISS)]        = CACHE_OP_UNSUPPORTED,
1401                 },
1402         },
1403         [C(DTLB)] = {
1404                 /*
1405                  * Only ITLB misses and DTLB refills are supported.
1406                  * If users want the DTLB refills misses a raw counter
1407                  * must be used.
1408                  */
1409                 [C(OP_READ)] = {
1410                         [C(RESULT_ACCESS)]      = CACHE_OP_UNSUPPORTED,
1411                         [C(RESULT_MISS)]        = ARMV7_PERFCTR_DTLB_REFILL,
1412                 },
1413                 [C(OP_WRITE)] = {
1414                         [C(RESULT_ACCESS)]      = CACHE_OP_UNSUPPORTED,
1415                         [C(RESULT_MISS)]        = ARMV7_PERFCTR_DTLB_REFILL,
1416                 },
1417                 [C(OP_PREFETCH)] = {
1418                         [C(RESULT_ACCESS)]      = CACHE_OP_UNSUPPORTED,
1419                         [C(RESULT_MISS)]        = CACHE_OP_UNSUPPORTED,
1420                 },
1421         },
1422         [C(ITLB)] = {
1423                 [C(OP_READ)] = {
1424                         [C(RESULT_ACCESS)]      = CACHE_OP_UNSUPPORTED,
1425                         [C(RESULT_MISS)]        = ARMV7_PERFCTR_ITLB_MISS,
1426                 },
1427                 [C(OP_WRITE)] = {
1428                         [C(RESULT_ACCESS)]      = CACHE_OP_UNSUPPORTED,
1429                         [C(RESULT_MISS)]        = ARMV7_PERFCTR_ITLB_MISS,
1430                 },
1431                 [C(OP_PREFETCH)] = {
1432                         [C(RESULT_ACCESS)]      = CACHE_OP_UNSUPPORTED,
1433                         [C(RESULT_MISS)]        = CACHE_OP_UNSUPPORTED,
1434                 },
1435         },
1436         [C(BPU)] = {
1437                 [C(OP_READ)] = {
1438                         [C(RESULT_ACCESS)]      = ARMV7_PERFCTR_PC_WRITE,
1439                         [C(RESULT_MISS)]
1440                                         = ARMV7_PERFCTR_PC_BRANCH_MIS_PRED,
1441                 },
1442                 [C(OP_WRITE)] = {
1443                         [C(RESULT_ACCESS)]      = ARMV7_PERFCTR_PC_WRITE,
1444                         [C(RESULT_MISS)]
1445                                         = ARMV7_PERFCTR_PC_BRANCH_MIS_PRED,
1446                 },
1447                 [C(OP_PREFETCH)] = {
1448                         [C(RESULT_ACCESS)]      = CACHE_OP_UNSUPPORTED,
1449                         [C(RESULT_MISS)]        = CACHE_OP_UNSUPPORTED,
1450                 },
1451         },
1452 };
1453
1454 /*
1455  * Cortex-A9 HW events mapping
1456  */
1457 static const unsigned armv7_a9_perf_map[PERF_COUNT_HW_MAX] = {
1458         [PERF_COUNT_HW_CPU_CYCLES]          = ARMV7_PERFCTR_CPU_CYCLES,
1459         [PERF_COUNT_HW_INSTRUCTIONS]        =
1460                                         ARMV7_PERFCTR_INST_OUT_OF_RENAME_STAGE,
1461         [PERF_COUNT_HW_CACHE_REFERENCES]    = ARMV7_PERFCTR_COHERENT_LINE_HIT,
1462         [PERF_COUNT_HW_CACHE_MISSES]        = ARMV7_PERFCTR_COHERENT_LINE_MISS,
1463         [PERF_COUNT_HW_BRANCH_INSTRUCTIONS] = ARMV7_PERFCTR_PC_WRITE,
1464         [PERF_COUNT_HW_BRANCH_MISSES]       = ARMV7_PERFCTR_PC_BRANCH_MIS_PRED,
1465         [PERF_COUNT_HW_BUS_CYCLES]          = ARMV7_PERFCTR_CLOCK_CYCLES,
1466 };
1467
1468 static const unsigned armv7_a9_perf_cache_map[PERF_COUNT_HW_CACHE_MAX]
1469                                           [PERF_COUNT_HW_CACHE_OP_MAX]
1470                                           [PERF_COUNT_HW_CACHE_RESULT_MAX] = {
1471         [C(L1D)] = {
1472                 /*
1473                  * The performance counters don't differentiate between read
1474                  * and write accesses/misses so this isn't strictly correct,
1475                  * but it's the best we can do. Writes and reads get
1476                  * combined.
1477                  */
1478                 [C(OP_READ)] = {
1479                         [C(RESULT_ACCESS)]      = ARMV7_PERFCTR_DCACHE_ACCESS,
1480                         [C(RESULT_MISS)]        = ARMV7_PERFCTR_DCACHE_REFILL,
1481                 },
1482                 [C(OP_WRITE)] = {
1483                         [C(RESULT_ACCESS)]      = ARMV7_PERFCTR_DCACHE_ACCESS,
1484                         [C(RESULT_MISS)]        = ARMV7_PERFCTR_DCACHE_REFILL,
1485                 },
1486                 [C(OP_PREFETCH)] = {
1487                         [C(RESULT_ACCESS)]      = CACHE_OP_UNSUPPORTED,
1488                         [C(RESULT_MISS)]        = CACHE_OP_UNSUPPORTED,
1489                 },
1490         },
1491         [C(L1I)] = {
1492                 [C(OP_READ)] = {
1493                         [C(RESULT_ACCESS)]      = CACHE_OP_UNSUPPORTED,
1494                         [C(RESULT_MISS)]        = ARMV7_PERFCTR_IFETCH_MISS,
1495                 },
1496                 [C(OP_WRITE)] = {
1497                         [C(RESULT_ACCESS)]      = CACHE_OP_UNSUPPORTED,
1498                         [C(RESULT_MISS)]        = ARMV7_PERFCTR_IFETCH_MISS,
1499                 },
1500                 [C(OP_PREFETCH)] = {
1501                         [C(RESULT_ACCESS)]      = CACHE_OP_UNSUPPORTED,
1502                         [C(RESULT_MISS)]        = CACHE_OP_UNSUPPORTED,
1503                 },
1504         },
1505         [C(LL)] = {
1506                 [C(OP_READ)] = {
1507                         [C(RESULT_ACCESS)]      = CACHE_OP_UNSUPPORTED,
1508                         [C(RESULT_MISS)]        = CACHE_OP_UNSUPPORTED,
1509                 },
1510                 [C(OP_WRITE)] = {
1511                         [C(RESULT_ACCESS)]      = CACHE_OP_UNSUPPORTED,
1512                         [C(RESULT_MISS)]        = CACHE_OP_UNSUPPORTED,
1513                 },
1514                 [C(OP_PREFETCH)] = {
1515                         [C(RESULT_ACCESS)]      = CACHE_OP_UNSUPPORTED,
1516                         [C(RESULT_MISS)]        = CACHE_OP_UNSUPPORTED,
1517                 },
1518         },
1519         [C(DTLB)] = {
1520                 /*
1521                  * Only ITLB misses and DTLB refills are supported.
1522                  * If users want the DTLB refills misses a raw counter
1523                  * must be used.
1524                  */
1525                 [C(OP_READ)] = {
1526                         [C(RESULT_ACCESS)]      = CACHE_OP_UNSUPPORTED,
1527                         [C(RESULT_MISS)]        = ARMV7_PERFCTR_DTLB_REFILL,
1528                 },
1529                 [C(OP_WRITE)] = {
1530                         [C(RESULT_ACCESS)]      = CACHE_OP_UNSUPPORTED,
1531                         [C(RESULT_MISS)]        = ARMV7_PERFCTR_DTLB_REFILL,
1532                 },
1533                 [C(OP_PREFETCH)] = {
1534                         [C(RESULT_ACCESS)]      = CACHE_OP_UNSUPPORTED,
1535                         [C(RESULT_MISS)]        = CACHE_OP_UNSUPPORTED,
1536                 },
1537         },
1538         [C(ITLB)] = {
1539                 [C(OP_READ)] = {
1540                         [C(RESULT_ACCESS)]      = CACHE_OP_UNSUPPORTED,
1541                         [C(RESULT_MISS)]        = ARMV7_PERFCTR_ITLB_MISS,
1542                 },
1543                 [C(OP_WRITE)] = {
1544                         [C(RESULT_ACCESS)]      = CACHE_OP_UNSUPPORTED,
1545                         [C(RESULT_MISS)]        = ARMV7_PERFCTR_ITLB_MISS,
1546                 },
1547                 [C(OP_PREFETCH)] = {
1548                         [C(RESULT_ACCESS)]      = CACHE_OP_UNSUPPORTED,
1549                         [C(RESULT_MISS)]        = CACHE_OP_UNSUPPORTED,
1550                 },
1551         },
1552         [C(BPU)] = {
1553                 [C(OP_READ)] = {
1554                         [C(RESULT_ACCESS)]      = ARMV7_PERFCTR_PC_WRITE,
1555                         [C(RESULT_MISS)]
1556                                         = ARMV7_PERFCTR_PC_BRANCH_MIS_PRED,
1557                 },
1558                 [C(OP_WRITE)] = {
1559                         [C(RESULT_ACCESS)]      = ARMV7_PERFCTR_PC_WRITE,
1560                         [C(RESULT_MISS)]
1561                                         = ARMV7_PERFCTR_PC_BRANCH_MIS_PRED,
1562                 },
1563                 [C(OP_PREFETCH)] = {
1564                         [C(RESULT_ACCESS)]      = CACHE_OP_UNSUPPORTED,
1565                         [C(RESULT_MISS)]        = CACHE_OP_UNSUPPORTED,
1566                 },
1567         },
1568 };
1569
1570 /*
1571  * Perf Events counters
1572  */
1573 enum armv7_counters {
1574         ARMV7_CYCLE_COUNTER             = 1,    /* Cycle counter */
1575         ARMV7_COUNTER0                  = 2,    /* First event counter */
1576 };
1577
1578 /*
1579  * The cycle counter is ARMV7_CYCLE_COUNTER.
1580  * The first event counter is ARMV7_COUNTER0.
1581  * The last event counter is (ARMV7_COUNTER0 + armpmu->num_events - 1).
1582  */
1583 #define ARMV7_COUNTER_LAST      (ARMV7_COUNTER0 + armpmu->num_events - 1)
1584
1585 /*
1586  * ARMv7 low level PMNC access
1587  */
1588
1589 /*
1590  * Per-CPU PMNC: config reg
1591  */
1592 #define ARMV7_PMNC_E            (1 << 0) /* Enable all counters */
1593 #define ARMV7_PMNC_P            (1 << 1) /* Reset all counters */
1594 #define ARMV7_PMNC_C            (1 << 2) /* Cycle counter reset */
1595 #define ARMV7_PMNC_D            (1 << 3) /* CCNT counts every 64th cpu cycle */
1596 #define ARMV7_PMNC_X            (1 << 4) /* Export to ETM */
1597 #define ARMV7_PMNC_DP           (1 << 5) /* Disable CCNT if non-invasive debug*/
1598 #define ARMV7_PMNC_N_SHIFT      11       /* Number of counters supported */
1599 #define ARMV7_PMNC_N_MASK       0x1f
1600 #define ARMV7_PMNC_MASK         0x3f     /* Mask for writable bits */
1601
1602 /*
1603  * Available counters
1604  */
1605 #define ARMV7_CNT0              0       /* First event counter */
1606 #define ARMV7_CCNT              31      /* Cycle counter */
1607
1608 /* Perf Event to low level counters mapping */
1609 #define ARMV7_EVENT_CNT_TO_CNTx (ARMV7_COUNTER0 - ARMV7_CNT0)
1610
1611 /*
1612  * CNTENS: counters enable reg
1613  */
1614 #define ARMV7_CNTENS_P(idx)     (1 << (idx - ARMV7_EVENT_CNT_TO_CNTx))
1615 #define ARMV7_CNTENS_C          (1 << ARMV7_CCNT)
1616
1617 /*
1618  * CNTENC: counters disable reg
1619  */
1620 #define ARMV7_CNTENC_P(idx)     (1 << (idx - ARMV7_EVENT_CNT_TO_CNTx))
1621 #define ARMV7_CNTENC_C          (1 << ARMV7_CCNT)
1622
1623 /*
1624  * INTENS: counters overflow interrupt enable reg
1625  */
1626 #define ARMV7_INTENS_P(idx)     (1 << (idx - ARMV7_EVENT_CNT_TO_CNTx))
1627 #define ARMV7_INTENS_C          (1 << ARMV7_CCNT)
1628
1629 /*
1630  * INTENC: counters overflow interrupt disable reg
1631  */
1632 #define ARMV7_INTENC_P(idx)     (1 << (idx - ARMV7_EVENT_CNT_TO_CNTx))
1633 #define ARMV7_INTENC_C          (1 << ARMV7_CCNT)
1634
1635 /*
1636  * EVTSEL: Event selection reg
1637  */
1638 #define ARMV7_EVTSEL_MASK       0xff            /* Mask for writable bits */
1639
1640 /*
1641  * SELECT: Counter selection reg
1642  */
1643 #define ARMV7_SELECT_MASK       0x1f            /* Mask for writable bits */
1644
1645 /*
1646  * FLAG: counters overflow flag status reg
1647  */
1648 #define ARMV7_FLAG_P(idx)       (1 << (idx - ARMV7_EVENT_CNT_TO_CNTx))
1649 #define ARMV7_FLAG_C            (1 << ARMV7_CCNT)
1650 #define ARMV7_FLAG_MASK         0xffffffff      /* Mask for writable bits */
1651 #define ARMV7_OVERFLOWED_MASK   ARMV7_FLAG_MASK
1652
1653 static inline unsigned long armv7_pmnc_read(void)
1654 {
1655         u32 val;
1656         asm volatile("mrc p15, 0, %0, c9, c12, 0" : "=r"(val));
1657         return val;
1658 }
1659
1660 static inline void armv7_pmnc_write(unsigned long val)
1661 {
1662         val &= ARMV7_PMNC_MASK;
1663         asm volatile("mcr p15, 0, %0, c9, c12, 0" : : "r"(val));
1664 }
1665
1666 static inline int armv7_pmnc_has_overflowed(unsigned long pmnc)
1667 {
1668         return pmnc & ARMV7_OVERFLOWED_MASK;
1669 }
1670
1671 static inline int armv7_pmnc_counter_has_overflowed(unsigned long pmnc,
1672                                         enum armv7_counters counter)
1673 {
1674         int ret;
1675
1676         if (counter == ARMV7_CYCLE_COUNTER)
1677                 ret = pmnc & ARMV7_FLAG_C;
1678         else if ((counter >= ARMV7_COUNTER0) && (counter <= ARMV7_COUNTER_LAST))
1679                 ret = pmnc & ARMV7_FLAG_P(counter);
1680         else
1681                 pr_err("CPU%u checking wrong counter %d overflow status\n",
1682                         smp_processor_id(), counter);
1683
1684         return ret;
1685 }
1686
1687 static inline int armv7_pmnc_select_counter(unsigned int idx)
1688 {
1689         u32 val;
1690
1691         if ((idx < ARMV7_COUNTER0) || (idx > ARMV7_COUNTER_LAST)) {
1692                 pr_err("CPU%u selecting wrong PMNC counter"
1693                         " %d\n", smp_processor_id(), idx);
1694                 return -1;
1695         }
1696
1697         val = (idx - ARMV7_EVENT_CNT_TO_CNTx) & ARMV7_SELECT_MASK;
1698         asm volatile("mcr p15, 0, %0, c9, c12, 5" : : "r" (val));
1699
1700         return idx;
1701 }
1702
1703 static inline u32 armv7pmu_read_counter(int idx)
1704 {
1705         unsigned long value = 0;
1706
1707         if (idx == ARMV7_CYCLE_COUNTER)
1708                 asm volatile("mrc p15, 0, %0, c9, c13, 0" : "=r" (value));
1709         else if ((idx >= ARMV7_COUNTER0) && (idx <= ARMV7_COUNTER_LAST)) {
1710                 if (armv7_pmnc_select_counter(idx) == idx)
1711                         asm volatile("mrc p15, 0, %0, c9, c13, 2"
1712                                      : "=r" (value));
1713         } else
1714                 pr_err("CPU%u reading wrong counter %d\n",
1715                         smp_processor_id(), idx);
1716
1717         return value;
1718 }
1719
1720 static inline void armv7pmu_write_counter(int idx, u32 value)
1721 {
1722         if (idx == ARMV7_CYCLE_COUNTER)
1723                 asm volatile("mcr p15, 0, %0, c9, c13, 0" : : "r" (value));
1724         else if ((idx >= ARMV7_COUNTER0) && (idx <= ARMV7_COUNTER_LAST)) {
1725                 if (armv7_pmnc_select_counter(idx) == idx)
1726                         asm volatile("mcr p15, 0, %0, c9, c13, 2"
1727                                      : : "r" (value));
1728         } else
1729                 pr_err("CPU%u writing wrong counter %d\n",
1730                         smp_processor_id(), idx);
1731 }
1732
1733 static inline void armv7_pmnc_write_evtsel(unsigned int idx, u32 val)
1734 {
1735         if (armv7_pmnc_select_counter(idx) == idx) {
1736                 val &= ARMV7_EVTSEL_MASK;
1737                 asm volatile("mcr p15, 0, %0, c9, c13, 1" : : "r" (val));
1738         }
1739 }
1740
1741 static inline u32 armv7_pmnc_enable_counter(unsigned int idx)
1742 {
1743         u32 val;
1744
1745         if ((idx != ARMV7_CYCLE_COUNTER) &&
1746             ((idx < ARMV7_COUNTER0) || (idx > ARMV7_COUNTER_LAST))) {
1747                 pr_err("CPU%u enabling wrong PMNC counter"
1748                         " %d\n", smp_processor_id(), idx);
1749                 return -1;
1750         }
1751
1752         if (idx == ARMV7_CYCLE_COUNTER)
1753                 val = ARMV7_CNTENS_C;
1754         else
1755                 val = ARMV7_CNTENS_P(idx);
1756
1757         asm volatile("mcr p15, 0, %0, c9, c12, 1" : : "r" (val));
1758
1759         return idx;
1760 }
1761
1762 static inline u32 armv7_pmnc_disable_counter(unsigned int idx)
1763 {
1764         u32 val;
1765
1766
1767         if ((idx != ARMV7_CYCLE_COUNTER) &&
1768             ((idx < ARMV7_COUNTER0) || (idx > ARMV7_COUNTER_LAST))) {
1769                 pr_err("CPU%u disabling wrong PMNC counter"
1770                         " %d\n", smp_processor_id(), idx);
1771                 return -1;
1772         }
1773
1774         if (idx == ARMV7_CYCLE_COUNTER)
1775                 val = ARMV7_CNTENC_C;
1776         else
1777                 val = ARMV7_CNTENC_P(idx);
1778
1779         asm volatile("mcr p15, 0, %0, c9, c12, 2" : : "r" (val));
1780
1781         return idx;
1782 }
1783
1784 static inline u32 armv7_pmnc_enable_intens(unsigned int idx)
1785 {
1786         u32 val;
1787
1788         if ((idx != ARMV7_CYCLE_COUNTER) &&
1789             ((idx < ARMV7_COUNTER0) || (idx > ARMV7_COUNTER_LAST))) {
1790                 pr_err("CPU%u enabling wrong PMNC counter"
1791                         " interrupt enable %d\n", smp_processor_id(), idx);
1792                 return -1;
1793         }
1794
1795         if (idx == ARMV7_CYCLE_COUNTER)
1796                 val = ARMV7_INTENS_C;
1797         else
1798                 val = ARMV7_INTENS_P(idx);
1799
1800         asm volatile("mcr p15, 0, %0, c9, c14, 1" : : "r" (val));
1801
1802         return idx;
1803 }
1804
1805 static inline u32 armv7_pmnc_disable_intens(unsigned int idx)
1806 {
1807         u32 val;
1808
1809         if ((idx != ARMV7_CYCLE_COUNTER) &&
1810             ((idx < ARMV7_COUNTER0) || (idx > ARMV7_COUNTER_LAST))) {
1811                 pr_err("CPU%u disabling wrong PMNC counter"
1812                         " interrupt enable %d\n", smp_processor_id(), idx);
1813                 return -1;
1814         }
1815
1816         if (idx == ARMV7_CYCLE_COUNTER)
1817                 val = ARMV7_INTENC_C;
1818         else
1819                 val = ARMV7_INTENC_P(idx);
1820
1821         asm volatile("mcr p15, 0, %0, c9, c14, 2" : : "r" (val));
1822
1823         return idx;
1824 }
1825
1826 static inline u32 armv7_pmnc_getreset_flags(void)
1827 {
1828         u32 val;
1829
1830         /* Read */
1831         asm volatile("mrc p15, 0, %0, c9, c12, 3" : "=r" (val));
1832
1833         /* Write to clear flags */
1834         val &= ARMV7_FLAG_MASK;
1835         asm volatile("mcr p15, 0, %0, c9, c12, 3" : : "r" (val));
1836
1837         return val;
1838 }
1839
1840 #ifdef DEBUG
1841 static void armv7_pmnc_dump_regs(void)
1842 {
1843         u32 val;
1844         unsigned int cnt;
1845
1846         printk(KERN_INFO "PMNC registers dump:\n");
1847
1848         asm volatile("mrc p15, 0, %0, c9, c12, 0" : "=r" (val));
1849         printk(KERN_INFO "PMNC  =0x%08x\n", val);
1850
1851         asm volatile("mrc p15, 0, %0, c9, c12, 1" : "=r" (val));
1852         printk(KERN_INFO "CNTENS=0x%08x\n", val);
1853
1854         asm volatile("mrc p15, 0, %0, c9, c14, 1" : "=r" (val));
1855         printk(KERN_INFO "INTENS=0x%08x\n", val);
1856
1857         asm volatile("mrc p15, 0, %0, c9, c12, 3" : "=r" (val));
1858         printk(KERN_INFO "FLAGS =0x%08x\n", val);
1859
1860         asm volatile("mrc p15, 0, %0, c9, c12, 5" : "=r" (val));
1861         printk(KERN_INFO "SELECT=0x%08x\n", val);
1862
1863         asm volatile("mrc p15, 0, %0, c9, c13, 0" : "=r" (val));
1864         printk(KERN_INFO "CCNT  =0x%08x\n", val);
1865
1866         for (cnt = ARMV7_COUNTER0; cnt < ARMV7_COUNTER_LAST; cnt++) {
1867                 armv7_pmnc_select_counter(cnt);
1868                 asm volatile("mrc p15, 0, %0, c9, c13, 2" : "=r" (val));
1869                 printk(KERN_INFO "CNT[%d] count =0x%08x\n",
1870                         cnt-ARMV7_EVENT_CNT_TO_CNTx, val);
1871                 asm volatile("mrc p15, 0, %0, c9, c13, 1" : "=r" (val));
1872                 printk(KERN_INFO "CNT[%d] evtsel=0x%08x\n",
1873                         cnt-ARMV7_EVENT_CNT_TO_CNTx, val);
1874         }
1875 }
1876 #endif
1877
1878 void armv7pmu_enable_event(struct hw_perf_event *hwc, int idx)
1879 {
1880         unsigned long flags;
1881
1882         /*
1883          * Enable counter and interrupt, and set the counter to count
1884          * the event that we're interested in.
1885          */
1886         spin_lock_irqsave(&pmu_lock, flags);
1887
1888         /*
1889          * Disable counter
1890          */
1891         armv7_pmnc_disable_counter(idx);
1892
1893         /*
1894          * Set event (if destined for PMNx counters)
1895          * We don't need to set the event if it's a cycle count
1896          */
1897         if (idx != ARMV7_CYCLE_COUNTER)
1898                 armv7_pmnc_write_evtsel(idx, hwc->config_base);
1899
1900         /*
1901          * Enable interrupt for this counter
1902          */
1903         armv7_pmnc_enable_intens(idx);
1904
1905         /*
1906          * Enable counter
1907          */
1908         armv7_pmnc_enable_counter(idx);
1909
1910         spin_unlock_irqrestore(&pmu_lock, flags);
1911 }
1912
1913 static void armv7pmu_disable_event(struct hw_perf_event *hwc, int idx)
1914 {
1915         unsigned long flags;
1916
1917         /*
1918          * Disable counter and interrupt
1919          */
1920         spin_lock_irqsave(&pmu_lock, flags);
1921
1922         /*
1923          * Disable counter
1924          */
1925         armv7_pmnc_disable_counter(idx);
1926
1927         /*
1928          * Disable interrupt for this counter
1929          */
1930         armv7_pmnc_disable_intens(idx);
1931
1932         spin_unlock_irqrestore(&pmu_lock, flags);
1933 }
1934
1935 static irqreturn_t armv7pmu_handle_irq(int irq_num, void *dev)
1936 {
1937         unsigned long pmnc;
1938         struct perf_sample_data data;
1939         struct cpu_hw_events *cpuc;
1940         struct pt_regs *regs;
1941         int idx;
1942
1943         /*
1944          * Get and reset the IRQ flags
1945          */
1946         pmnc = armv7_pmnc_getreset_flags();
1947
1948         /*
1949          * Did an overflow occur?
1950          */
1951         if (!armv7_pmnc_has_overflowed(pmnc))
1952                 return IRQ_NONE;
1953
1954         /*
1955          * Handle the counter(s) overflow(s)
1956          */
1957         regs = get_irq_regs();
1958
1959         perf_sample_data_init(&data, 0);
1960
1961         cpuc = &__get_cpu_var(cpu_hw_events);
1962         for (idx = 0; idx <= armpmu->num_events; ++idx) {
1963                 struct perf_event *event = cpuc->events[idx];
1964                 struct hw_perf_event *hwc;
1965
1966                 if (!test_bit(idx, cpuc->active_mask))
1967                         continue;
1968
1969                 /*
1970                  * We have a single interrupt for all counters. Check that
1971                  * each counter has overflowed before we process it.
1972                  */
1973                 if (!armv7_pmnc_counter_has_overflowed(pmnc, idx))
1974                         continue;
1975
1976                 hwc = &event->hw;
1977                 armpmu_event_update(event, hwc, idx);
1978                 data.period = event->hw.last_period;
1979                 if (!armpmu_event_set_period(event, hwc, idx))
1980                         continue;
1981
1982                 if (perf_event_overflow(event, 0, &data, regs))
1983                         armpmu->disable(hwc, idx);
1984         }
1985
1986         /*
1987          * Handle the pending perf events.
1988          *
1989          * Note: this call *must* be run with interrupts enabled. For
1990          * platforms that can have the PMU interrupts raised as a PMI, this
1991          * will not work.
1992          */
1993         perf_event_do_pending();
1994
1995         return IRQ_HANDLED;
1996 }
1997
1998 static void armv7pmu_start(void)
1999 {
2000         unsigned long flags;
2001
2002         spin_lock_irqsave(&pmu_lock, flags);
2003         /* Enable all counters */
2004         armv7_pmnc_write(armv7_pmnc_read() | ARMV7_PMNC_E);
2005         spin_unlock_irqrestore(&pmu_lock, flags);
2006 }
2007
2008 static void armv7pmu_stop(void)
2009 {
2010         unsigned long flags;
2011
2012         spin_lock_irqsave(&pmu_lock, flags);
2013         /* Disable all counters */
2014         armv7_pmnc_write(armv7_pmnc_read() & ~ARMV7_PMNC_E);
2015         spin_unlock_irqrestore(&pmu_lock, flags);
2016 }
2017
2018 static inline int armv7_a8_pmu_event_map(int config)
2019 {
2020         int mapping = armv7_a8_perf_map[config];
2021         if (HW_OP_UNSUPPORTED == mapping)
2022                 mapping = -EOPNOTSUPP;
2023         return mapping;
2024 }
2025
2026 static inline int armv7_a9_pmu_event_map(int config)
2027 {
2028         int mapping = armv7_a9_perf_map[config];
2029         if (HW_OP_UNSUPPORTED == mapping)
2030                 mapping = -EOPNOTSUPP;
2031         return mapping;
2032 }
2033
2034 static u64 armv7pmu_raw_event(u64 config)
2035 {
2036         return config & 0xff;
2037 }
2038
2039 static int armv7pmu_get_event_idx(struct cpu_hw_events *cpuc,
2040                                   struct hw_perf_event *event)
2041 {
2042         int idx;
2043
2044         /* Always place a cycle counter into the cycle counter. */
2045         if (event->config_base == ARMV7_PERFCTR_CPU_CYCLES) {
2046                 if (test_and_set_bit(ARMV7_CYCLE_COUNTER, cpuc->used_mask))
2047                         return -EAGAIN;
2048
2049                 return ARMV7_CYCLE_COUNTER;
2050         } else {
2051                 /*
2052                  * For anything other than a cycle counter, try and use
2053                  * the events counters
2054                  */
2055                 for (idx = ARMV7_COUNTER0; idx <= armpmu->num_events; ++idx) {
2056                         if (!test_and_set_bit(idx, cpuc->used_mask))
2057                                 return idx;
2058                 }
2059
2060                 /* The counters are all in use. */
2061                 return -EAGAIN;
2062         }
2063 }
2064
2065 static struct arm_pmu armv7pmu = {
2066         .handle_irq             = armv7pmu_handle_irq,
2067         .enable                 = armv7pmu_enable_event,
2068         .disable                = armv7pmu_disable_event,
2069         .raw_event              = armv7pmu_raw_event,
2070         .read_counter           = armv7pmu_read_counter,
2071         .write_counter          = armv7pmu_write_counter,
2072         .get_event_idx          = armv7pmu_get_event_idx,
2073         .start                  = armv7pmu_start,
2074         .stop                   = armv7pmu_stop,
2075         .max_period             = (1LLU << 32) - 1,
2076 };
2077
2078 static u32 __init armv7_reset_read_pmnc(void)
2079 {
2080         u32 nb_cnt;
2081
2082         /* Initialize & Reset PMNC: C and P bits */
2083         armv7_pmnc_write(ARMV7_PMNC_P | ARMV7_PMNC_C);
2084
2085         /* Read the nb of CNTx counters supported from PMNC */
2086         nb_cnt = (armv7_pmnc_read() >> ARMV7_PMNC_N_SHIFT) & ARMV7_PMNC_N_MASK;
2087
2088         /* Add the CPU cycles counter and return */
2089         return nb_cnt + 1;
2090 }
2091
2092 static int __init
2093 init_hw_perf_events(void)
2094 {
2095         unsigned long cpuid = read_cpuid_id();
2096         unsigned long implementor = (cpuid & 0xFF000000) >> 24;
2097         unsigned long part_number = (cpuid & 0xFFF0);
2098
2099         /* We only support ARM CPUs implemented by ARM at the moment. */
2100         if (0x41 == implementor) {
2101                 switch (part_number) {
2102                 case 0xB360:    /* ARM1136 */
2103                 case 0xB560:    /* ARM1156 */
2104                 case 0xB760:    /* ARM1176 */
2105                         armpmu = &armv6pmu;
2106                         memcpy(armpmu_perf_cache_map, armv6_perf_cache_map,
2107                                         sizeof(armv6_perf_cache_map));
2108                         perf_max_events = armv6pmu.num_events;
2109                         break;
2110                 case 0xB020:    /* ARM11mpcore */
2111                         armpmu = &armv6mpcore_pmu;
2112                         memcpy(armpmu_perf_cache_map,
2113                                armv6mpcore_perf_cache_map,
2114                                sizeof(armv6mpcore_perf_cache_map));
2115                         perf_max_events = armv6mpcore_pmu.num_events;
2116                         break;
2117                 case 0xC080:    /* Cortex-A8 */
2118                         armv7pmu.name = ARMV7_PMU_CORTEX_A8_NAME;
2119                         memcpy(armpmu_perf_cache_map, armv7_a8_perf_cache_map,
2120                                 sizeof(armv7_a8_perf_cache_map));
2121                         armv7pmu.event_map = armv7_a8_pmu_event_map;
2122                         armpmu = &armv7pmu;
2123
2124                         /* Reset PMNC and read the nb of CNTx counters
2125                             supported */
2126                         armv7pmu.num_events = armv7_reset_read_pmnc();
2127                         perf_max_events = armv7pmu.num_events;
2128                         break;
2129                 case 0xC090:    /* Cortex-A9 */
2130                         armv7pmu.name = ARMV7_PMU_CORTEX_A9_NAME;
2131                         memcpy(armpmu_perf_cache_map, armv7_a9_perf_cache_map,
2132                                 sizeof(armv7_a9_perf_cache_map));
2133                         armv7pmu.event_map = armv7_a9_pmu_event_map;
2134                         armpmu = &armv7pmu;
2135
2136                         /* Reset PMNC and read the nb of CNTx counters
2137                             supported */
2138                         armv7pmu.num_events = armv7_reset_read_pmnc();
2139                         perf_max_events = armv7pmu.num_events;
2140                         break;
2141                 default:
2142                         pr_info("no hardware support available\n");
2143                         perf_max_events = -1;
2144                 }
2145         }
2146
2147         if (armpmu)
2148                 pr_info("enabled with %s PMU driver, %d counters available\n",
2149                         armpmu->name, armpmu->num_events);
2150
2151         return 0;
2152 }
2153 arch_initcall(init_hw_perf_events);
2154
2155 /*
2156  * Callchain handling code.
2157  */
2158 static inline void
2159 callchain_store(struct perf_callchain_entry *entry,
2160                 u64 ip)
2161 {
2162         if (entry->nr < PERF_MAX_STACK_DEPTH)
2163                 entry->ip[entry->nr++] = ip;
2164 }
2165
2166 /*
2167  * The registers we're interested in are at the end of the variable
2168  * length saved register structure. The fp points at the end of this
2169  * structure so the address of this struct is:
2170  * (struct frame_tail *)(xxx->fp)-1
2171  *
2172  * This code has been adapted from the ARM OProfile support.
2173  */
2174 struct frame_tail {
2175         struct frame_tail   *fp;
2176         unsigned long       sp;
2177         unsigned long       lr;
2178 } __attribute__((packed));
2179
2180 /*
2181  * Get the return address for a single stackframe and return a pointer to the
2182  * next frame tail.
2183  */
2184 static struct frame_tail *
2185 user_backtrace(struct frame_tail *tail,
2186                struct perf_callchain_entry *entry)
2187 {
2188         struct frame_tail buftail;
2189
2190         /* Also check accessibility of one struct frame_tail beyond */
2191         if (!access_ok(VERIFY_READ, tail, sizeof(buftail)))
2192                 return NULL;
2193         if (__copy_from_user_inatomic(&buftail, tail, sizeof(buftail)))
2194                 return NULL;
2195
2196         callchain_store(entry, buftail.lr);
2197
2198         /*
2199          * Frame pointers should strictly progress back up the stack
2200          * (towards higher addresses).
2201          */
2202         if (tail >= buftail.fp)
2203                 return NULL;
2204
2205         return buftail.fp - 1;
2206 }
2207
2208 static void
2209 perf_callchain_user(struct pt_regs *regs,
2210                     struct perf_callchain_entry *entry)
2211 {
2212         struct frame_tail *tail;
2213
2214         callchain_store(entry, PERF_CONTEXT_USER);
2215
2216         if (!user_mode(regs))
2217                 regs = task_pt_regs(current);
2218
2219         tail = (struct frame_tail *)regs->ARM_fp - 1;
2220
2221         while (tail && !((unsigned long)tail & 0x3))
2222                 tail = user_backtrace(tail, entry);
2223 }
2224
2225 /*
2226  * Gets called by walk_stackframe() for every stackframe. This will be called
2227  * whist unwinding the stackframe and is like a subroutine return so we use
2228  * the PC.
2229  */
2230 static int
2231 callchain_trace(struct stackframe *fr,
2232                 void *data)
2233 {
2234         struct perf_callchain_entry *entry = data;
2235         callchain_store(entry, fr->pc);
2236         return 0;
2237 }
2238
2239 static void
2240 perf_callchain_kernel(struct pt_regs *regs,
2241                       struct perf_callchain_entry *entry)
2242 {
2243         struct stackframe fr;
2244
2245         callchain_store(entry, PERF_CONTEXT_KERNEL);
2246         fr.fp = regs->ARM_fp;
2247         fr.sp = regs->ARM_sp;
2248         fr.lr = regs->ARM_lr;
2249         fr.pc = regs->ARM_pc;
2250         walk_stackframe(&fr, callchain_trace, entry);
2251 }
2252
2253 static void
2254 perf_do_callchain(struct pt_regs *regs,
2255                   struct perf_callchain_entry *entry)
2256 {
2257         int is_user;
2258
2259         if (!regs)
2260                 return;
2261
2262         is_user = user_mode(regs);
2263
2264         if (!current || !current->pid)
2265                 return;
2266
2267         if (is_user && current->state != TASK_RUNNING)
2268                 return;
2269
2270         if (!is_user)
2271                 perf_callchain_kernel(regs, entry);
2272
2273         if (current->mm)
2274                 perf_callchain_user(regs, entry);
2275 }
2276
2277 static DEFINE_PER_CPU(struct perf_callchain_entry, pmc_irq_entry);
2278
2279 struct perf_callchain_entry *
2280 perf_callchain(struct pt_regs *regs)
2281 {
2282         struct perf_callchain_entry *entry = &__get_cpu_var(pmc_irq_entry);
2283
2284         entry->nr = 0;
2285         perf_do_callchain(regs, entry);
2286         return entry;
2287 }