blob: 10a0bcdf21588f002f1e903d704a81fa26e0e311 [file] [log] [blame]
Jamie Iles1b8873a2010-02-02 20:25:44 +01001#undef DEBUG
2
3/*
4 * ARM performance counter support.
5 *
6 * Copyright (C) 2009 picoChip Designs, Ltd., Jamie Iles
7 *
Jean PIHET796d1292010-01-26 18:51:05 +01008 * ARMv7 support: Jean Pihet <jpihet@mvista.com>
9 * 2010 (c) MontaVista Software, LLC.
10 *
Jamie Iles1b8873a2010-02-02 20:25:44 +010011 * This code is based on the sparc64 perf event code, which is in turn based
12 * on the x86 code. Callchain code is based on the ARM OProfile backtrace
13 * code.
14 */
15#define pr_fmt(fmt) "hw perfevents: " fmt
16
17#include <linux/interrupt.h>
18#include <linux/kernel.h>
Will Deacon181193f2010-04-30 11:32:44 +010019#include <linux/module.h>
Jamie Iles1b8873a2010-02-02 20:25:44 +010020#include <linux/perf_event.h>
Will Deacon49c006b2010-04-29 17:13:24 +010021#include <linux/platform_device.h>
Jamie Iles1b8873a2010-02-02 20:25:44 +010022#include <linux/spinlock.h>
23#include <linux/uaccess.h>
24
25#include <asm/cputype.h>
26#include <asm/irq.h>
27#include <asm/irq_regs.h>
28#include <asm/pmu.h>
29#include <asm/stacktrace.h>
30
Will Deacon49c006b2010-04-29 17:13:24 +010031static struct platform_device *pmu_device;
Jamie Iles1b8873a2010-02-02 20:25:44 +010032
33/*
34 * Hardware lock to serialize accesses to PMU registers. Needed for the
35 * read/modify/write sequences.
36 */
37DEFINE_SPINLOCK(pmu_lock);
38
39/*
40 * ARMv6 supports a maximum of 3 events, starting from index 1. If we add
41 * another platform that supports more, we need to increase this to be the
42 * largest of all platforms.
Jean PIHET796d1292010-01-26 18:51:05 +010043 *
44 * ARMv7 supports up to 32 events:
45 * cycle counter CCNT + 31 events counters CNT0..30.
46 * Cortex-A8 has 1+4 counters, Cortex-A9 has 1+6 counters.
Jamie Iles1b8873a2010-02-02 20:25:44 +010047 */
Jean PIHET796d1292010-01-26 18:51:05 +010048#define ARMPMU_MAX_HWEVENTS 33
Jamie Iles1b8873a2010-02-02 20:25:44 +010049
50/* The events for a given CPU. */
51struct cpu_hw_events {
52 /*
53 * The events that are active on the CPU for the given index. Index 0
54 * is reserved.
55 */
56 struct perf_event *events[ARMPMU_MAX_HWEVENTS];
57
58 /*
59 * A 1 bit for an index indicates that the counter is being used for
60 * an event. A 0 means that the counter can be used.
61 */
62 unsigned long used_mask[BITS_TO_LONGS(ARMPMU_MAX_HWEVENTS)];
63
64 /*
65 * A 1 bit for an index indicates that the counter is actively being
66 * used.
67 */
68 unsigned long active_mask[BITS_TO_LONGS(ARMPMU_MAX_HWEVENTS)];
69};
70DEFINE_PER_CPU(struct cpu_hw_events, cpu_hw_events);
71
Will Deacon181193f2010-04-30 11:32:44 +010072/* PMU names. */
73static const char *arm_pmu_names[] = {
74 [ARM_PERF_PMU_ID_XSCALE1] = "xscale1",
75 [ARM_PERF_PMU_ID_XSCALE2] = "xscale2",
76 [ARM_PERF_PMU_ID_V6] = "v6",
77 [ARM_PERF_PMU_ID_V6MP] = "v6mpcore",
78 [ARM_PERF_PMU_ID_CA8] = "ARMv7 Cortex-A8",
79 [ARM_PERF_PMU_ID_CA9] = "ARMv7 Cortex-A9",
80};
81
Jamie Iles1b8873a2010-02-02 20:25:44 +010082struct arm_pmu {
Will Deacon181193f2010-04-30 11:32:44 +010083 enum arm_perf_pmu_ids id;
Jamie Iles1b8873a2010-02-02 20:25:44 +010084 irqreturn_t (*handle_irq)(int irq_num, void *dev);
85 void (*enable)(struct hw_perf_event *evt, int idx);
86 void (*disable)(struct hw_perf_event *evt, int idx);
87 int (*event_map)(int evt);
88 u64 (*raw_event)(u64);
89 int (*get_event_idx)(struct cpu_hw_events *cpuc,
90 struct hw_perf_event *hwc);
91 u32 (*read_counter)(int idx);
92 void (*write_counter)(int idx, u32 val);
93 void (*start)(void);
94 void (*stop)(void);
95 int num_events;
96 u64 max_period;
97};
98
99/* Set at runtime when we know what CPU type we are. */
100static const struct arm_pmu *armpmu;
101
Will Deacon181193f2010-04-30 11:32:44 +0100102enum arm_perf_pmu_ids
103armpmu_get_pmu_id(void)
104{
105 int id = -ENODEV;
106
107 if (armpmu != NULL)
108 id = armpmu->id;
109
110 return id;
111}
112EXPORT_SYMBOL_GPL(armpmu_get_pmu_id);
113
Jamie Iles1b8873a2010-02-02 20:25:44 +0100114#define HW_OP_UNSUPPORTED 0xFFFF
115
116#define C(_x) \
117 PERF_COUNT_HW_CACHE_##_x
118
119#define CACHE_OP_UNSUPPORTED 0xFFFF
120
121static unsigned armpmu_perf_cache_map[PERF_COUNT_HW_CACHE_MAX]
122 [PERF_COUNT_HW_CACHE_OP_MAX]
123 [PERF_COUNT_HW_CACHE_RESULT_MAX];
124
125static int
126armpmu_map_cache_event(u64 config)
127{
128 unsigned int cache_type, cache_op, cache_result, ret;
129
130 cache_type = (config >> 0) & 0xff;
131 if (cache_type >= PERF_COUNT_HW_CACHE_MAX)
132 return -EINVAL;
133
134 cache_op = (config >> 8) & 0xff;
135 if (cache_op >= PERF_COUNT_HW_CACHE_OP_MAX)
136 return -EINVAL;
137
138 cache_result = (config >> 16) & 0xff;
139 if (cache_result >= PERF_COUNT_HW_CACHE_RESULT_MAX)
140 return -EINVAL;
141
142 ret = (int)armpmu_perf_cache_map[cache_type][cache_op][cache_result];
143
144 if (ret == CACHE_OP_UNSUPPORTED)
145 return -ENOENT;
146
147 return ret;
148}
149
150static int
151armpmu_event_set_period(struct perf_event *event,
152 struct hw_perf_event *hwc,
153 int idx)
154{
155 s64 left = atomic64_read(&hwc->period_left);
156 s64 period = hwc->sample_period;
157 int ret = 0;
158
159 if (unlikely(left <= -period)) {
160 left = period;
161 atomic64_set(&hwc->period_left, left);
162 hwc->last_period = period;
163 ret = 1;
164 }
165
166 if (unlikely(left <= 0)) {
167 left += period;
168 atomic64_set(&hwc->period_left, left);
169 hwc->last_period = period;
170 ret = 1;
171 }
172
173 if (left > (s64)armpmu->max_period)
174 left = armpmu->max_period;
175
176 atomic64_set(&hwc->prev_count, (u64)-left);
177
178 armpmu->write_counter(idx, (u64)(-left) & 0xffffffff);
179
180 perf_event_update_userpage(event);
181
182 return ret;
183}
184
185static u64
186armpmu_event_update(struct perf_event *event,
187 struct hw_perf_event *hwc,
188 int idx)
189{
190 int shift = 64 - 32;
191 s64 prev_raw_count, new_raw_count;
192 s64 delta;
193
194again:
195 prev_raw_count = atomic64_read(&hwc->prev_count);
196 new_raw_count = armpmu->read_counter(idx);
197
198 if (atomic64_cmpxchg(&hwc->prev_count, prev_raw_count,
199 new_raw_count) != prev_raw_count)
200 goto again;
201
202 delta = (new_raw_count << shift) - (prev_raw_count << shift);
203 delta >>= shift;
204
205 atomic64_add(delta, &event->count);
206 atomic64_sub(delta, &hwc->period_left);
207
208 return new_raw_count;
209}
210
211static void
212armpmu_disable(struct perf_event *event)
213{
214 struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events);
215 struct hw_perf_event *hwc = &event->hw;
216 int idx = hwc->idx;
217
218 WARN_ON(idx < 0);
219
220 clear_bit(idx, cpuc->active_mask);
221 armpmu->disable(hwc, idx);
222
223 barrier();
224
225 armpmu_event_update(event, hwc, idx);
226 cpuc->events[idx] = NULL;
227 clear_bit(idx, cpuc->used_mask);
228
229 perf_event_update_userpage(event);
230}
231
232static void
233armpmu_read(struct perf_event *event)
234{
235 struct hw_perf_event *hwc = &event->hw;
236
237 /* Don't read disabled counters! */
238 if (hwc->idx < 0)
239 return;
240
241 armpmu_event_update(event, hwc, hwc->idx);
242}
243
244static void
245armpmu_unthrottle(struct perf_event *event)
246{
247 struct hw_perf_event *hwc = &event->hw;
248
249 /*
250 * Set the period again. Some counters can't be stopped, so when we
251 * were throttled we simply disabled the IRQ source and the counter
252 * may have been left counting. If we don't do this step then we may
253 * get an interrupt too soon or *way* too late if the overflow has
254 * happened since disabling.
255 */
256 armpmu_event_set_period(event, hwc, hwc->idx);
257 armpmu->enable(hwc, hwc->idx);
258}
259
260static int
261armpmu_enable(struct perf_event *event)
262{
263 struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events);
264 struct hw_perf_event *hwc = &event->hw;
265 int idx;
266 int err = 0;
267
268 /* If we don't have a space for the counter then finish early. */
269 idx = armpmu->get_event_idx(cpuc, hwc);
270 if (idx < 0) {
271 err = idx;
272 goto out;
273 }
274
275 /*
276 * If there is an event in the counter we are going to use then make
277 * sure it is disabled.
278 */
279 event->hw.idx = idx;
280 armpmu->disable(hwc, idx);
281 cpuc->events[idx] = event;
282 set_bit(idx, cpuc->active_mask);
283
284 /* Set the period for the event. */
285 armpmu_event_set_period(event, hwc, idx);
286
287 /* Enable the event. */
288 armpmu->enable(hwc, idx);
289
290 /* Propagate our changes to the userspace mapping. */
291 perf_event_update_userpage(event);
292
293out:
294 return err;
295}
296
297static struct pmu pmu = {
298 .enable = armpmu_enable,
299 .disable = armpmu_disable,
300 .unthrottle = armpmu_unthrottle,
301 .read = armpmu_read,
302};
303
304static int
305validate_event(struct cpu_hw_events *cpuc,
306 struct perf_event *event)
307{
308 struct hw_perf_event fake_event = event->hw;
309
310 if (event->pmu && event->pmu != &pmu)
311 return 0;
312
313 return armpmu->get_event_idx(cpuc, &fake_event) >= 0;
314}
315
316static int
317validate_group(struct perf_event *event)
318{
319 struct perf_event *sibling, *leader = event->group_leader;
320 struct cpu_hw_events fake_pmu;
321
322 memset(&fake_pmu, 0, sizeof(fake_pmu));
323
324 if (!validate_event(&fake_pmu, leader))
325 return -ENOSPC;
326
327 list_for_each_entry(sibling, &leader->sibling_list, group_entry) {
328 if (!validate_event(&fake_pmu, sibling))
329 return -ENOSPC;
330 }
331
332 if (!validate_event(&fake_pmu, event))
333 return -ENOSPC;
334
335 return 0;
336}
337
338static int
339armpmu_reserve_hardware(void)
340{
Will Deacon49c006b2010-04-29 17:13:24 +0100341 int i, err = -ENODEV, irq;
Jamie Iles1b8873a2010-02-02 20:25:44 +0100342
Will Deacon49c006b2010-04-29 17:13:24 +0100343 pmu_device = reserve_pmu(ARM_PMU_DEVICE_CPU);
344 if (IS_ERR(pmu_device)) {
Jamie Iles1b8873a2010-02-02 20:25:44 +0100345 pr_warning("unable to reserve pmu\n");
Will Deacon49c006b2010-04-29 17:13:24 +0100346 return PTR_ERR(pmu_device);
Jamie Iles1b8873a2010-02-02 20:25:44 +0100347 }
348
Will Deacon49c006b2010-04-29 17:13:24 +0100349 init_pmu(ARM_PMU_DEVICE_CPU);
Jamie Iles1b8873a2010-02-02 20:25:44 +0100350
Will Deacon49c006b2010-04-29 17:13:24 +0100351 if (pmu_device->num_resources < 1) {
Jamie Iles1b8873a2010-02-02 20:25:44 +0100352 pr_err("no irqs for PMUs defined\n");
353 return -ENODEV;
354 }
355
Will Deacon49c006b2010-04-29 17:13:24 +0100356 for (i = 0; i < pmu_device->num_resources; ++i) {
357 irq = platform_get_irq(pmu_device, i);
358 if (irq < 0)
359 continue;
360
361 err = request_irq(irq, armpmu->handle_irq,
Will Deaconddee87f2010-02-25 15:04:14 +0100362 IRQF_DISABLED | IRQF_NOBALANCING,
363 "armpmu", NULL);
Jamie Iles1b8873a2010-02-02 20:25:44 +0100364 if (err) {
Will Deacon49c006b2010-04-29 17:13:24 +0100365 pr_warning("unable to request IRQ%d for ARM perf "
366 "counters\n", irq);
Jamie Iles1b8873a2010-02-02 20:25:44 +0100367 break;
368 }
369 }
370
371 if (err) {
Will Deacon49c006b2010-04-29 17:13:24 +0100372 for (i = i - 1; i >= 0; --i) {
373 irq = platform_get_irq(pmu_device, i);
374 if (irq >= 0)
375 free_irq(irq, NULL);
376 }
377 release_pmu(pmu_device);
378 pmu_device = NULL;
Jamie Iles1b8873a2010-02-02 20:25:44 +0100379 }
380
381 return err;
382}
383
384static void
385armpmu_release_hardware(void)
386{
Will Deacon49c006b2010-04-29 17:13:24 +0100387 int i, irq;
Jamie Iles1b8873a2010-02-02 20:25:44 +0100388
Will Deacon49c006b2010-04-29 17:13:24 +0100389 for (i = pmu_device->num_resources - 1; i >= 0; --i) {
390 irq = platform_get_irq(pmu_device, i);
391 if (irq >= 0)
392 free_irq(irq, NULL);
393 }
Jamie Iles1b8873a2010-02-02 20:25:44 +0100394 armpmu->stop();
395
Will Deacon49c006b2010-04-29 17:13:24 +0100396 release_pmu(pmu_device);
397 pmu_device = NULL;
Jamie Iles1b8873a2010-02-02 20:25:44 +0100398}
399
400static atomic_t active_events = ATOMIC_INIT(0);
401static DEFINE_MUTEX(pmu_reserve_mutex);
402
403static void
404hw_perf_event_destroy(struct perf_event *event)
405{
406 if (atomic_dec_and_mutex_lock(&active_events, &pmu_reserve_mutex)) {
407 armpmu_release_hardware();
408 mutex_unlock(&pmu_reserve_mutex);
409 }
410}
411
412static int
413__hw_perf_event_init(struct perf_event *event)
414{
415 struct hw_perf_event *hwc = &event->hw;
416 int mapping, err;
417
418 /* Decode the generic type into an ARM event identifier. */
419 if (PERF_TYPE_HARDWARE == event->attr.type) {
420 mapping = armpmu->event_map(event->attr.config);
421 } else if (PERF_TYPE_HW_CACHE == event->attr.type) {
422 mapping = armpmu_map_cache_event(event->attr.config);
423 } else if (PERF_TYPE_RAW == event->attr.type) {
424 mapping = armpmu->raw_event(event->attr.config);
425 } else {
426 pr_debug("event type %x not supported\n", event->attr.type);
427 return -EOPNOTSUPP;
428 }
429
430 if (mapping < 0) {
431 pr_debug("event %x:%llx not supported\n", event->attr.type,
432 event->attr.config);
433 return mapping;
434 }
435
436 /*
437 * Check whether we need to exclude the counter from certain modes.
438 * The ARM performance counters are on all of the time so if someone
439 * has asked us for some excludes then we have to fail.
440 */
441 if (event->attr.exclude_kernel || event->attr.exclude_user ||
442 event->attr.exclude_hv || event->attr.exclude_idle) {
443 pr_debug("ARM performance counters do not support "
444 "mode exclusion\n");
445 return -EPERM;
446 }
447
448 /*
449 * We don't assign an index until we actually place the event onto
450 * hardware. Use -1 to signify that we haven't decided where to put it
451 * yet. For SMP systems, each core has it's own PMU so we can't do any
452 * clever allocation or constraints checking at this point.
453 */
454 hwc->idx = -1;
455
456 /*
457 * Store the event encoding into the config_base field. config and
458 * event_base are unused as the only 2 things we need to know are
459 * the event mapping and the counter to use. The counter to use is
460 * also the indx and the config_base is the event type.
461 */
462 hwc->config_base = (unsigned long)mapping;
463 hwc->config = 0;
464 hwc->event_base = 0;
465
466 if (!hwc->sample_period) {
467 hwc->sample_period = armpmu->max_period;
468 hwc->last_period = hwc->sample_period;
469 atomic64_set(&hwc->period_left, hwc->sample_period);
470 }
471
472 err = 0;
473 if (event->group_leader != event) {
474 err = validate_group(event);
475 if (err)
476 return -EINVAL;
477 }
478
479 return err;
480}
481
482const struct pmu *
483hw_perf_event_init(struct perf_event *event)
484{
485 int err = 0;
486
487 if (!armpmu)
488 return ERR_PTR(-ENODEV);
489
490 event->destroy = hw_perf_event_destroy;
491
492 if (!atomic_inc_not_zero(&active_events)) {
493 if (atomic_read(&active_events) > perf_max_events) {
494 atomic_dec(&active_events);
495 return ERR_PTR(-ENOSPC);
496 }
497
498 mutex_lock(&pmu_reserve_mutex);
499 if (atomic_read(&active_events) == 0) {
500 err = armpmu_reserve_hardware();
501 }
502
503 if (!err)
504 atomic_inc(&active_events);
505 mutex_unlock(&pmu_reserve_mutex);
506 }
507
508 if (err)
509 return ERR_PTR(err);
510
511 err = __hw_perf_event_init(event);
512 if (err)
513 hw_perf_event_destroy(event);
514
515 return err ? ERR_PTR(err) : &pmu;
516}
517
518void
519hw_perf_enable(void)
520{
521 /* Enable all of the perf events on hardware. */
522 int idx;
523 struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events);
524
525 if (!armpmu)
526 return;
527
528 for (idx = 0; idx <= armpmu->num_events; ++idx) {
529 struct perf_event *event = cpuc->events[idx];
530
531 if (!event)
532 continue;
533
534 armpmu->enable(&event->hw, idx);
535 }
536
537 armpmu->start();
538}
539
540void
541hw_perf_disable(void)
542{
543 if (armpmu)
544 armpmu->stop();
545}
546
547/*
548 * ARMv6 Performance counter handling code.
549 *
550 * ARMv6 has 2 configurable performance counters and a single cycle counter.
551 * They all share a single reset bit but can be written to zero so we can use
552 * that for a reset.
553 *
554 * The counters can't be individually enabled or disabled so when we remove
555 * one event and replace it with another we could get spurious counts from the
556 * wrong event. However, we can take advantage of the fact that the
557 * performance counters can export events to the event bus, and the event bus
558 * itself can be monitored. This requires that we *don't* export the events to
559 * the event bus. The procedure for disabling a configurable counter is:
560 * - change the counter to count the ETMEXTOUT[0] signal (0x20). This
561 * effectively stops the counter from counting.
562 * - disable the counter's interrupt generation (each counter has it's
563 * own interrupt enable bit).
564 * Once stopped, the counter value can be written as 0 to reset.
565 *
566 * To enable a counter:
567 * - enable the counter's interrupt generation.
568 * - set the new event type.
569 *
570 * Note: the dedicated cycle counter only counts cycles and can't be
571 * enabled/disabled independently of the others. When we want to disable the
572 * cycle counter, we have to just disable the interrupt reporting and start
573 * ignoring that counter. When re-enabling, we have to reset the value and
574 * enable the interrupt.
575 */
576
577enum armv6_perf_types {
578 ARMV6_PERFCTR_ICACHE_MISS = 0x0,
579 ARMV6_PERFCTR_IBUF_STALL = 0x1,
580 ARMV6_PERFCTR_DDEP_STALL = 0x2,
581 ARMV6_PERFCTR_ITLB_MISS = 0x3,
582 ARMV6_PERFCTR_DTLB_MISS = 0x4,
583 ARMV6_PERFCTR_BR_EXEC = 0x5,
584 ARMV6_PERFCTR_BR_MISPREDICT = 0x6,
585 ARMV6_PERFCTR_INSTR_EXEC = 0x7,
586 ARMV6_PERFCTR_DCACHE_HIT = 0x9,
587 ARMV6_PERFCTR_DCACHE_ACCESS = 0xA,
588 ARMV6_PERFCTR_DCACHE_MISS = 0xB,
589 ARMV6_PERFCTR_DCACHE_WBACK = 0xC,
590 ARMV6_PERFCTR_SW_PC_CHANGE = 0xD,
591 ARMV6_PERFCTR_MAIN_TLB_MISS = 0xF,
592 ARMV6_PERFCTR_EXPL_D_ACCESS = 0x10,
593 ARMV6_PERFCTR_LSU_FULL_STALL = 0x11,
594 ARMV6_PERFCTR_WBUF_DRAINED = 0x12,
595 ARMV6_PERFCTR_CPU_CYCLES = 0xFF,
596 ARMV6_PERFCTR_NOP = 0x20,
597};
598
599enum armv6_counters {
600 ARMV6_CYCLE_COUNTER = 1,
601 ARMV6_COUNTER0,
602 ARMV6_COUNTER1,
603};
604
605/*
606 * The hardware events that we support. We do support cache operations but
607 * we have harvard caches and no way to combine instruction and data
608 * accesses/misses in hardware.
609 */
610static const unsigned armv6_perf_map[PERF_COUNT_HW_MAX] = {
611 [PERF_COUNT_HW_CPU_CYCLES] = ARMV6_PERFCTR_CPU_CYCLES,
612 [PERF_COUNT_HW_INSTRUCTIONS] = ARMV6_PERFCTR_INSTR_EXEC,
613 [PERF_COUNT_HW_CACHE_REFERENCES] = HW_OP_UNSUPPORTED,
614 [PERF_COUNT_HW_CACHE_MISSES] = HW_OP_UNSUPPORTED,
615 [PERF_COUNT_HW_BRANCH_INSTRUCTIONS] = ARMV6_PERFCTR_BR_EXEC,
616 [PERF_COUNT_HW_BRANCH_MISSES] = ARMV6_PERFCTR_BR_MISPREDICT,
617 [PERF_COUNT_HW_BUS_CYCLES] = HW_OP_UNSUPPORTED,
618};
619
620static const unsigned armv6_perf_cache_map[PERF_COUNT_HW_CACHE_MAX]
621 [PERF_COUNT_HW_CACHE_OP_MAX]
622 [PERF_COUNT_HW_CACHE_RESULT_MAX] = {
623 [C(L1D)] = {
624 /*
625 * The performance counters don't differentiate between read
626 * and write accesses/misses so this isn't strictly correct,
627 * but it's the best we can do. Writes and reads get
628 * combined.
629 */
630 [C(OP_READ)] = {
631 [C(RESULT_ACCESS)] = ARMV6_PERFCTR_DCACHE_ACCESS,
632 [C(RESULT_MISS)] = ARMV6_PERFCTR_DCACHE_MISS,
633 },
634 [C(OP_WRITE)] = {
635 [C(RESULT_ACCESS)] = ARMV6_PERFCTR_DCACHE_ACCESS,
636 [C(RESULT_MISS)] = ARMV6_PERFCTR_DCACHE_MISS,
637 },
638 [C(OP_PREFETCH)] = {
639 [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED,
640 [C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED,
641 },
642 },
643 [C(L1I)] = {
644 [C(OP_READ)] = {
645 [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED,
646 [C(RESULT_MISS)] = ARMV6_PERFCTR_ICACHE_MISS,
647 },
648 [C(OP_WRITE)] = {
649 [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED,
650 [C(RESULT_MISS)] = ARMV6_PERFCTR_ICACHE_MISS,
651 },
652 [C(OP_PREFETCH)] = {
653 [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED,
654 [C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED,
655 },
656 },
657 [C(LL)] = {
658 [C(OP_READ)] = {
659 [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED,
660 [C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED,
661 },
662 [C(OP_WRITE)] = {
663 [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED,
664 [C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED,
665 },
666 [C(OP_PREFETCH)] = {
667 [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED,
668 [C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED,
669 },
670 },
671 [C(DTLB)] = {
672 /*
673 * The ARM performance counters can count micro DTLB misses,
674 * micro ITLB misses and main TLB misses. There isn't an event
675 * for TLB misses, so use the micro misses here and if users
676 * want the main TLB misses they can use a raw counter.
677 */
678 [C(OP_READ)] = {
679 [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED,
680 [C(RESULT_MISS)] = ARMV6_PERFCTR_DTLB_MISS,
681 },
682 [C(OP_WRITE)] = {
683 [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED,
684 [C(RESULT_MISS)] = ARMV6_PERFCTR_DTLB_MISS,
685 },
686 [C(OP_PREFETCH)] = {
687 [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED,
688 [C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED,
689 },
690 },
691 [C(ITLB)] = {
692 [C(OP_READ)] = {
693 [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED,
694 [C(RESULT_MISS)] = ARMV6_PERFCTR_ITLB_MISS,
695 },
696 [C(OP_WRITE)] = {
697 [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED,
698 [C(RESULT_MISS)] = ARMV6_PERFCTR_ITLB_MISS,
699 },
700 [C(OP_PREFETCH)] = {
701 [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED,
702 [C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED,
703 },
704 },
705 [C(BPU)] = {
706 [C(OP_READ)] = {
707 [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED,
708 [C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED,
709 },
710 [C(OP_WRITE)] = {
711 [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED,
712 [C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED,
713 },
714 [C(OP_PREFETCH)] = {
715 [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED,
716 [C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED,
717 },
718 },
719};
720
721enum armv6mpcore_perf_types {
722 ARMV6MPCORE_PERFCTR_ICACHE_MISS = 0x0,
723 ARMV6MPCORE_PERFCTR_IBUF_STALL = 0x1,
724 ARMV6MPCORE_PERFCTR_DDEP_STALL = 0x2,
725 ARMV6MPCORE_PERFCTR_ITLB_MISS = 0x3,
726 ARMV6MPCORE_PERFCTR_DTLB_MISS = 0x4,
727 ARMV6MPCORE_PERFCTR_BR_EXEC = 0x5,
728 ARMV6MPCORE_PERFCTR_BR_NOTPREDICT = 0x6,
729 ARMV6MPCORE_PERFCTR_BR_MISPREDICT = 0x7,
730 ARMV6MPCORE_PERFCTR_INSTR_EXEC = 0x8,
731 ARMV6MPCORE_PERFCTR_DCACHE_RDACCESS = 0xA,
732 ARMV6MPCORE_PERFCTR_DCACHE_RDMISS = 0xB,
733 ARMV6MPCORE_PERFCTR_DCACHE_WRACCESS = 0xC,
734 ARMV6MPCORE_PERFCTR_DCACHE_WRMISS = 0xD,
735 ARMV6MPCORE_PERFCTR_DCACHE_EVICTION = 0xE,
736 ARMV6MPCORE_PERFCTR_SW_PC_CHANGE = 0xF,
737 ARMV6MPCORE_PERFCTR_MAIN_TLB_MISS = 0x10,
738 ARMV6MPCORE_PERFCTR_EXPL_MEM_ACCESS = 0x11,
739 ARMV6MPCORE_PERFCTR_LSU_FULL_STALL = 0x12,
740 ARMV6MPCORE_PERFCTR_WBUF_DRAINED = 0x13,
741 ARMV6MPCORE_PERFCTR_CPU_CYCLES = 0xFF,
742};
743
744/*
745 * The hardware events that we support. We do support cache operations but
746 * we have harvard caches and no way to combine instruction and data
747 * accesses/misses in hardware.
748 */
749static const unsigned armv6mpcore_perf_map[PERF_COUNT_HW_MAX] = {
750 [PERF_COUNT_HW_CPU_CYCLES] = ARMV6MPCORE_PERFCTR_CPU_CYCLES,
751 [PERF_COUNT_HW_INSTRUCTIONS] = ARMV6MPCORE_PERFCTR_INSTR_EXEC,
752 [PERF_COUNT_HW_CACHE_REFERENCES] = HW_OP_UNSUPPORTED,
753 [PERF_COUNT_HW_CACHE_MISSES] = HW_OP_UNSUPPORTED,
754 [PERF_COUNT_HW_BRANCH_INSTRUCTIONS] = ARMV6MPCORE_PERFCTR_BR_EXEC,
755 [PERF_COUNT_HW_BRANCH_MISSES] = ARMV6MPCORE_PERFCTR_BR_MISPREDICT,
756 [PERF_COUNT_HW_BUS_CYCLES] = HW_OP_UNSUPPORTED,
757};
758
759static const unsigned armv6mpcore_perf_cache_map[PERF_COUNT_HW_CACHE_MAX]
760 [PERF_COUNT_HW_CACHE_OP_MAX]
761 [PERF_COUNT_HW_CACHE_RESULT_MAX] = {
762 [C(L1D)] = {
763 [C(OP_READ)] = {
764 [C(RESULT_ACCESS)] =
765 ARMV6MPCORE_PERFCTR_DCACHE_RDACCESS,
766 [C(RESULT_MISS)] =
767 ARMV6MPCORE_PERFCTR_DCACHE_RDMISS,
768 },
769 [C(OP_WRITE)] = {
770 [C(RESULT_ACCESS)] =
771 ARMV6MPCORE_PERFCTR_DCACHE_WRACCESS,
772 [C(RESULT_MISS)] =
773 ARMV6MPCORE_PERFCTR_DCACHE_WRMISS,
774 },
775 [C(OP_PREFETCH)] = {
776 [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED,
777 [C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED,
778 },
779 },
780 [C(L1I)] = {
781 [C(OP_READ)] = {
782 [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED,
783 [C(RESULT_MISS)] = ARMV6MPCORE_PERFCTR_ICACHE_MISS,
784 },
785 [C(OP_WRITE)] = {
786 [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED,
787 [C(RESULT_MISS)] = ARMV6MPCORE_PERFCTR_ICACHE_MISS,
788 },
789 [C(OP_PREFETCH)] = {
790 [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED,
791 [C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED,
792 },
793 },
794 [C(LL)] = {
795 [C(OP_READ)] = {
796 [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED,
797 [C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED,
798 },
799 [C(OP_WRITE)] = {
800 [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED,
801 [C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED,
802 },
803 [C(OP_PREFETCH)] = {
804 [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED,
805 [C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED,
806 },
807 },
808 [C(DTLB)] = {
809 /*
810 * The ARM performance counters can count micro DTLB misses,
811 * micro ITLB misses and main TLB misses. There isn't an event
812 * for TLB misses, so use the micro misses here and if users
813 * want the main TLB misses they can use a raw counter.
814 */
815 [C(OP_READ)] = {
816 [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED,
817 [C(RESULT_MISS)] = ARMV6MPCORE_PERFCTR_DTLB_MISS,
818 },
819 [C(OP_WRITE)] = {
820 [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED,
821 [C(RESULT_MISS)] = ARMV6MPCORE_PERFCTR_DTLB_MISS,
822 },
823 [C(OP_PREFETCH)] = {
824 [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED,
825 [C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED,
826 },
827 },
828 [C(ITLB)] = {
829 [C(OP_READ)] = {
830 [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED,
831 [C(RESULT_MISS)] = ARMV6MPCORE_PERFCTR_ITLB_MISS,
832 },
833 [C(OP_WRITE)] = {
834 [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED,
835 [C(RESULT_MISS)] = ARMV6MPCORE_PERFCTR_ITLB_MISS,
836 },
837 [C(OP_PREFETCH)] = {
838 [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED,
839 [C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED,
840 },
841 },
842 [C(BPU)] = {
843 [C(OP_READ)] = {
844 [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED,
845 [C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED,
846 },
847 [C(OP_WRITE)] = {
848 [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED,
849 [C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED,
850 },
851 [C(OP_PREFETCH)] = {
852 [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED,
853 [C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED,
854 },
855 },
856};
857
858static inline unsigned long
859armv6_pmcr_read(void)
860{
861 u32 val;
862 asm volatile("mrc p15, 0, %0, c15, c12, 0" : "=r"(val));
863 return val;
864}
865
866static inline void
867armv6_pmcr_write(unsigned long val)
868{
869 asm volatile("mcr p15, 0, %0, c15, c12, 0" : : "r"(val));
870}
871
872#define ARMV6_PMCR_ENABLE (1 << 0)
873#define ARMV6_PMCR_CTR01_RESET (1 << 1)
874#define ARMV6_PMCR_CCOUNT_RESET (1 << 2)
875#define ARMV6_PMCR_CCOUNT_DIV (1 << 3)
876#define ARMV6_PMCR_COUNT0_IEN (1 << 4)
877#define ARMV6_PMCR_COUNT1_IEN (1 << 5)
878#define ARMV6_PMCR_CCOUNT_IEN (1 << 6)
879#define ARMV6_PMCR_COUNT0_OVERFLOW (1 << 8)
880#define ARMV6_PMCR_COUNT1_OVERFLOW (1 << 9)
881#define ARMV6_PMCR_CCOUNT_OVERFLOW (1 << 10)
882#define ARMV6_PMCR_EVT_COUNT0_SHIFT 20
883#define ARMV6_PMCR_EVT_COUNT0_MASK (0xFF << ARMV6_PMCR_EVT_COUNT0_SHIFT)
884#define ARMV6_PMCR_EVT_COUNT1_SHIFT 12
885#define ARMV6_PMCR_EVT_COUNT1_MASK (0xFF << ARMV6_PMCR_EVT_COUNT1_SHIFT)
886
887#define ARMV6_PMCR_OVERFLOWED_MASK \
888 (ARMV6_PMCR_COUNT0_OVERFLOW | ARMV6_PMCR_COUNT1_OVERFLOW | \
889 ARMV6_PMCR_CCOUNT_OVERFLOW)
890
891static inline int
892armv6_pmcr_has_overflowed(unsigned long pmcr)
893{
894 return (pmcr & ARMV6_PMCR_OVERFLOWED_MASK);
895}
896
897static inline int
898armv6_pmcr_counter_has_overflowed(unsigned long pmcr,
899 enum armv6_counters counter)
900{
901 int ret = 0;
902
903 if (ARMV6_CYCLE_COUNTER == counter)
904 ret = pmcr & ARMV6_PMCR_CCOUNT_OVERFLOW;
905 else if (ARMV6_COUNTER0 == counter)
906 ret = pmcr & ARMV6_PMCR_COUNT0_OVERFLOW;
907 else if (ARMV6_COUNTER1 == counter)
908 ret = pmcr & ARMV6_PMCR_COUNT1_OVERFLOW;
909 else
910 WARN_ONCE(1, "invalid counter number (%d)\n", counter);
911
912 return ret;
913}
914
915static inline u32
916armv6pmu_read_counter(int counter)
917{
918 unsigned long value = 0;
919
920 if (ARMV6_CYCLE_COUNTER == counter)
921 asm volatile("mrc p15, 0, %0, c15, c12, 1" : "=r"(value));
922 else if (ARMV6_COUNTER0 == counter)
923 asm volatile("mrc p15, 0, %0, c15, c12, 2" : "=r"(value));
924 else if (ARMV6_COUNTER1 == counter)
925 asm volatile("mrc p15, 0, %0, c15, c12, 3" : "=r"(value));
926 else
927 WARN_ONCE(1, "invalid counter number (%d)\n", counter);
928
929 return value;
930}
931
932static inline void
933armv6pmu_write_counter(int counter,
934 u32 value)
935{
936 if (ARMV6_CYCLE_COUNTER == counter)
937 asm volatile("mcr p15, 0, %0, c15, c12, 1" : : "r"(value));
938 else if (ARMV6_COUNTER0 == counter)
939 asm volatile("mcr p15, 0, %0, c15, c12, 2" : : "r"(value));
940 else if (ARMV6_COUNTER1 == counter)
941 asm volatile("mcr p15, 0, %0, c15, c12, 3" : : "r"(value));
942 else
943 WARN_ONCE(1, "invalid counter number (%d)\n", counter);
944}
945
946void
947armv6pmu_enable_event(struct hw_perf_event *hwc,
948 int idx)
949{
950 unsigned long val, mask, evt, flags;
951
952 if (ARMV6_CYCLE_COUNTER == idx) {
953 mask = 0;
954 evt = ARMV6_PMCR_CCOUNT_IEN;
955 } else if (ARMV6_COUNTER0 == idx) {
956 mask = ARMV6_PMCR_EVT_COUNT0_MASK;
957 evt = (hwc->config_base << ARMV6_PMCR_EVT_COUNT0_SHIFT) |
958 ARMV6_PMCR_COUNT0_IEN;
959 } else if (ARMV6_COUNTER1 == idx) {
960 mask = ARMV6_PMCR_EVT_COUNT1_MASK;
961 evt = (hwc->config_base << ARMV6_PMCR_EVT_COUNT1_SHIFT) |
962 ARMV6_PMCR_COUNT1_IEN;
963 } else {
964 WARN_ONCE(1, "invalid counter number (%d)\n", idx);
965 return;
966 }
967
968 /*
969 * Mask out the current event and set the counter to count the event
970 * that we're interested in.
971 */
972 spin_lock_irqsave(&pmu_lock, flags);
973 val = armv6_pmcr_read();
974 val &= ~mask;
975 val |= evt;
976 armv6_pmcr_write(val);
977 spin_unlock_irqrestore(&pmu_lock, flags);
978}
979
980static irqreturn_t
981armv6pmu_handle_irq(int irq_num,
982 void *dev)
983{
984 unsigned long pmcr = armv6_pmcr_read();
985 struct perf_sample_data data;
986 struct cpu_hw_events *cpuc;
987 struct pt_regs *regs;
988 int idx;
989
990 if (!armv6_pmcr_has_overflowed(pmcr))
991 return IRQ_NONE;
992
993 regs = get_irq_regs();
994
995 /*
996 * The interrupts are cleared by writing the overflow flags back to
997 * the control register. All of the other bits don't have any effect
998 * if they are rewritten, so write the whole value back.
999 */
1000 armv6_pmcr_write(pmcr);
1001
Peter Zijlstradc1d6282010-03-03 15:55:04 +01001002 perf_sample_data_init(&data, 0);
Jamie Iles1b8873a2010-02-02 20:25:44 +01001003
1004 cpuc = &__get_cpu_var(cpu_hw_events);
1005 for (idx = 0; idx <= armpmu->num_events; ++idx) {
1006 struct perf_event *event = cpuc->events[idx];
1007 struct hw_perf_event *hwc;
1008
1009 if (!test_bit(idx, cpuc->active_mask))
1010 continue;
1011
1012 /*
1013 * We have a single interrupt for all counters. Check that
1014 * each counter has overflowed before we process it.
1015 */
1016 if (!armv6_pmcr_counter_has_overflowed(pmcr, idx))
1017 continue;
1018
1019 hwc = &event->hw;
1020 armpmu_event_update(event, hwc, idx);
1021 data.period = event->hw.last_period;
1022 if (!armpmu_event_set_period(event, hwc, idx))
1023 continue;
1024
1025 if (perf_event_overflow(event, 0, &data, regs))
1026 armpmu->disable(hwc, idx);
1027 }
1028
1029 /*
1030 * Handle the pending perf events.
1031 *
1032 * Note: this call *must* be run with interrupts enabled. For
1033 * platforms that can have the PMU interrupts raised as a PMI, this
1034 * will not work.
1035 */
1036 perf_event_do_pending();
1037
1038 return IRQ_HANDLED;
1039}
1040
1041static void
1042armv6pmu_start(void)
1043{
1044 unsigned long flags, val;
1045
1046 spin_lock_irqsave(&pmu_lock, flags);
1047 val = armv6_pmcr_read();
1048 val |= ARMV6_PMCR_ENABLE;
1049 armv6_pmcr_write(val);
1050 spin_unlock_irqrestore(&pmu_lock, flags);
1051}
1052
1053void
1054armv6pmu_stop(void)
1055{
1056 unsigned long flags, val;
1057
1058 spin_lock_irqsave(&pmu_lock, flags);
1059 val = armv6_pmcr_read();
1060 val &= ~ARMV6_PMCR_ENABLE;
1061 armv6_pmcr_write(val);
1062 spin_unlock_irqrestore(&pmu_lock, flags);
1063}
1064
1065static inline int
1066armv6pmu_event_map(int config)
1067{
1068 int mapping = armv6_perf_map[config];
1069 if (HW_OP_UNSUPPORTED == mapping)
1070 mapping = -EOPNOTSUPP;
1071 return mapping;
1072}
1073
1074static inline int
1075armv6mpcore_pmu_event_map(int config)
1076{
1077 int mapping = armv6mpcore_perf_map[config];
1078 if (HW_OP_UNSUPPORTED == mapping)
1079 mapping = -EOPNOTSUPP;
1080 return mapping;
1081}
1082
1083static u64
1084armv6pmu_raw_event(u64 config)
1085{
1086 return config & 0xff;
1087}
1088
1089static int
1090armv6pmu_get_event_idx(struct cpu_hw_events *cpuc,
1091 struct hw_perf_event *event)
1092{
1093 /* Always place a cycle counter into the cycle counter. */
1094 if (ARMV6_PERFCTR_CPU_CYCLES == event->config_base) {
1095 if (test_and_set_bit(ARMV6_CYCLE_COUNTER, cpuc->used_mask))
1096 return -EAGAIN;
1097
1098 return ARMV6_CYCLE_COUNTER;
1099 } else {
1100 /*
1101 * For anything other than a cycle counter, try and use
1102 * counter0 and counter1.
1103 */
1104 if (!test_and_set_bit(ARMV6_COUNTER1, cpuc->used_mask)) {
1105 return ARMV6_COUNTER1;
1106 }
1107
1108 if (!test_and_set_bit(ARMV6_COUNTER0, cpuc->used_mask)) {
1109 return ARMV6_COUNTER0;
1110 }
1111
1112 /* The counters are all in use. */
1113 return -EAGAIN;
1114 }
1115}
1116
1117static void
1118armv6pmu_disable_event(struct hw_perf_event *hwc,
1119 int idx)
1120{
1121 unsigned long val, mask, evt, flags;
1122
1123 if (ARMV6_CYCLE_COUNTER == idx) {
1124 mask = ARMV6_PMCR_CCOUNT_IEN;
1125 evt = 0;
1126 } else if (ARMV6_COUNTER0 == idx) {
1127 mask = ARMV6_PMCR_COUNT0_IEN | ARMV6_PMCR_EVT_COUNT0_MASK;
1128 evt = ARMV6_PERFCTR_NOP << ARMV6_PMCR_EVT_COUNT0_SHIFT;
1129 } else if (ARMV6_COUNTER1 == idx) {
1130 mask = ARMV6_PMCR_COUNT1_IEN | ARMV6_PMCR_EVT_COUNT1_MASK;
1131 evt = ARMV6_PERFCTR_NOP << ARMV6_PMCR_EVT_COUNT1_SHIFT;
1132 } else {
1133 WARN_ONCE(1, "invalid counter number (%d)\n", idx);
1134 return;
1135 }
1136
1137 /*
1138 * Mask out the current event and set the counter to count the number
1139 * of ETM bus signal assertion cycles. The external reporting should
1140 * be disabled and so this should never increment.
1141 */
1142 spin_lock_irqsave(&pmu_lock, flags);
1143 val = armv6_pmcr_read();
1144 val &= ~mask;
1145 val |= evt;
1146 armv6_pmcr_write(val);
1147 spin_unlock_irqrestore(&pmu_lock, flags);
1148}
1149
1150static void
1151armv6mpcore_pmu_disable_event(struct hw_perf_event *hwc,
1152 int idx)
1153{
1154 unsigned long val, mask, flags, evt = 0;
1155
1156 if (ARMV6_CYCLE_COUNTER == idx) {
1157 mask = ARMV6_PMCR_CCOUNT_IEN;
1158 } else if (ARMV6_COUNTER0 == idx) {
1159 mask = ARMV6_PMCR_COUNT0_IEN;
1160 } else if (ARMV6_COUNTER1 == idx) {
1161 mask = ARMV6_PMCR_COUNT1_IEN;
1162 } else {
1163 WARN_ONCE(1, "invalid counter number (%d)\n", idx);
1164 return;
1165 }
1166
1167 /*
1168 * Unlike UP ARMv6, we don't have a way of stopping the counters. We
1169 * simply disable the interrupt reporting.
1170 */
1171 spin_lock_irqsave(&pmu_lock, flags);
1172 val = armv6_pmcr_read();
1173 val &= ~mask;
1174 val |= evt;
1175 armv6_pmcr_write(val);
1176 spin_unlock_irqrestore(&pmu_lock, flags);
1177}
1178
1179static const struct arm_pmu armv6pmu = {
Will Deacon181193f2010-04-30 11:32:44 +01001180 .id = ARM_PERF_PMU_ID_V6,
Jamie Iles1b8873a2010-02-02 20:25:44 +01001181 .handle_irq = armv6pmu_handle_irq,
1182 .enable = armv6pmu_enable_event,
1183 .disable = armv6pmu_disable_event,
1184 .event_map = armv6pmu_event_map,
1185 .raw_event = armv6pmu_raw_event,
1186 .read_counter = armv6pmu_read_counter,
1187 .write_counter = armv6pmu_write_counter,
1188 .get_event_idx = armv6pmu_get_event_idx,
1189 .start = armv6pmu_start,
1190 .stop = armv6pmu_stop,
1191 .num_events = 3,
1192 .max_period = (1LLU << 32) - 1,
1193};
1194
1195/*
1196 * ARMv6mpcore is almost identical to single core ARMv6 with the exception
1197 * that some of the events have different enumerations and that there is no
1198 * *hack* to stop the programmable counters. To stop the counters we simply
1199 * disable the interrupt reporting and update the event. When unthrottling we
1200 * reset the period and enable the interrupt reporting.
1201 */
1202static const struct arm_pmu armv6mpcore_pmu = {
Will Deacon181193f2010-04-30 11:32:44 +01001203 .id = ARM_PERF_PMU_ID_V6MP,
Jamie Iles1b8873a2010-02-02 20:25:44 +01001204 .handle_irq = armv6pmu_handle_irq,
1205 .enable = armv6pmu_enable_event,
1206 .disable = armv6mpcore_pmu_disable_event,
1207 .event_map = armv6mpcore_pmu_event_map,
1208 .raw_event = armv6pmu_raw_event,
1209 .read_counter = armv6pmu_read_counter,
1210 .write_counter = armv6pmu_write_counter,
1211 .get_event_idx = armv6pmu_get_event_idx,
1212 .start = armv6pmu_start,
1213 .stop = armv6pmu_stop,
1214 .num_events = 3,
1215 .max_period = (1LLU << 32) - 1,
1216};
1217
Jean PIHET796d1292010-01-26 18:51:05 +01001218/*
1219 * ARMv7 Cortex-A8 and Cortex-A9 Performance Events handling code.
1220 *
1221 * Copied from ARMv6 code, with the low level code inspired
1222 * by the ARMv7 Oprofile code.
1223 *
1224 * Cortex-A8 has up to 4 configurable performance counters and
1225 * a single cycle counter.
1226 * Cortex-A9 has up to 31 configurable performance counters and
1227 * a single cycle counter.
1228 *
1229 * All counters can be enabled/disabled and IRQ masked separately. The cycle
1230 * counter and all 4 performance counters together can be reset separately.
1231 */
1232
Jean PIHET796d1292010-01-26 18:51:05 +01001233/* Common ARMv7 event types */
1234enum armv7_perf_types {
1235 ARMV7_PERFCTR_PMNC_SW_INCR = 0x00,
1236 ARMV7_PERFCTR_IFETCH_MISS = 0x01,
1237 ARMV7_PERFCTR_ITLB_MISS = 0x02,
1238 ARMV7_PERFCTR_DCACHE_REFILL = 0x03,
1239 ARMV7_PERFCTR_DCACHE_ACCESS = 0x04,
1240 ARMV7_PERFCTR_DTLB_REFILL = 0x05,
1241 ARMV7_PERFCTR_DREAD = 0x06,
1242 ARMV7_PERFCTR_DWRITE = 0x07,
1243
1244 ARMV7_PERFCTR_EXC_TAKEN = 0x09,
1245 ARMV7_PERFCTR_EXC_EXECUTED = 0x0A,
1246 ARMV7_PERFCTR_CID_WRITE = 0x0B,
1247 /* ARMV7_PERFCTR_PC_WRITE is equivalent to HW_BRANCH_INSTRUCTIONS.
1248 * It counts:
1249 * - all branch instructions,
1250 * - instructions that explicitly write the PC,
1251 * - exception generating instructions.
1252 */
1253 ARMV7_PERFCTR_PC_WRITE = 0x0C,
1254 ARMV7_PERFCTR_PC_IMM_BRANCH = 0x0D,
1255 ARMV7_PERFCTR_UNALIGNED_ACCESS = 0x0F,
1256 ARMV7_PERFCTR_PC_BRANCH_MIS_PRED = 0x10,
1257 ARMV7_PERFCTR_CLOCK_CYCLES = 0x11,
1258
1259 ARMV7_PERFCTR_PC_BRANCH_MIS_USED = 0x12,
1260
1261 ARMV7_PERFCTR_CPU_CYCLES = 0xFF
1262};
1263
1264/* ARMv7 Cortex-A8 specific event types */
1265enum armv7_a8_perf_types {
1266 ARMV7_PERFCTR_INSTR_EXECUTED = 0x08,
1267
1268 ARMV7_PERFCTR_PC_PROC_RETURN = 0x0E,
1269
1270 ARMV7_PERFCTR_WRITE_BUFFER_FULL = 0x40,
1271 ARMV7_PERFCTR_L2_STORE_MERGED = 0x41,
1272 ARMV7_PERFCTR_L2_STORE_BUFF = 0x42,
1273 ARMV7_PERFCTR_L2_ACCESS = 0x43,
1274 ARMV7_PERFCTR_L2_CACH_MISS = 0x44,
1275 ARMV7_PERFCTR_AXI_READ_CYCLES = 0x45,
1276 ARMV7_PERFCTR_AXI_WRITE_CYCLES = 0x46,
1277 ARMV7_PERFCTR_MEMORY_REPLAY = 0x47,
1278 ARMV7_PERFCTR_UNALIGNED_ACCESS_REPLAY = 0x48,
1279 ARMV7_PERFCTR_L1_DATA_MISS = 0x49,
1280 ARMV7_PERFCTR_L1_INST_MISS = 0x4A,
1281 ARMV7_PERFCTR_L1_DATA_COLORING = 0x4B,
1282 ARMV7_PERFCTR_L1_NEON_DATA = 0x4C,
1283 ARMV7_PERFCTR_L1_NEON_CACH_DATA = 0x4D,
1284 ARMV7_PERFCTR_L2_NEON = 0x4E,
1285 ARMV7_PERFCTR_L2_NEON_HIT = 0x4F,
1286 ARMV7_PERFCTR_L1_INST = 0x50,
1287 ARMV7_PERFCTR_PC_RETURN_MIS_PRED = 0x51,
1288 ARMV7_PERFCTR_PC_BRANCH_FAILED = 0x52,
1289 ARMV7_PERFCTR_PC_BRANCH_TAKEN = 0x53,
1290 ARMV7_PERFCTR_PC_BRANCH_EXECUTED = 0x54,
1291 ARMV7_PERFCTR_OP_EXECUTED = 0x55,
1292 ARMV7_PERFCTR_CYCLES_INST_STALL = 0x56,
1293 ARMV7_PERFCTR_CYCLES_INST = 0x57,
1294 ARMV7_PERFCTR_CYCLES_NEON_DATA_STALL = 0x58,
1295 ARMV7_PERFCTR_CYCLES_NEON_INST_STALL = 0x59,
1296 ARMV7_PERFCTR_NEON_CYCLES = 0x5A,
1297
1298 ARMV7_PERFCTR_PMU0_EVENTS = 0x70,
1299 ARMV7_PERFCTR_PMU1_EVENTS = 0x71,
1300 ARMV7_PERFCTR_PMU_EVENTS = 0x72,
1301};
1302
1303/* ARMv7 Cortex-A9 specific event types */
1304enum armv7_a9_perf_types {
1305 ARMV7_PERFCTR_JAVA_HW_BYTECODE_EXEC = 0x40,
1306 ARMV7_PERFCTR_JAVA_SW_BYTECODE_EXEC = 0x41,
1307 ARMV7_PERFCTR_JAZELLE_BRANCH_EXEC = 0x42,
1308
1309 ARMV7_PERFCTR_COHERENT_LINE_MISS = 0x50,
1310 ARMV7_PERFCTR_COHERENT_LINE_HIT = 0x51,
1311
1312 ARMV7_PERFCTR_ICACHE_DEP_STALL_CYCLES = 0x60,
1313 ARMV7_PERFCTR_DCACHE_DEP_STALL_CYCLES = 0x61,
1314 ARMV7_PERFCTR_TLB_MISS_DEP_STALL_CYCLES = 0x62,
1315 ARMV7_PERFCTR_STREX_EXECUTED_PASSED = 0x63,
1316 ARMV7_PERFCTR_STREX_EXECUTED_FAILED = 0x64,
1317 ARMV7_PERFCTR_DATA_EVICTION = 0x65,
1318 ARMV7_PERFCTR_ISSUE_STAGE_NO_INST = 0x66,
1319 ARMV7_PERFCTR_ISSUE_STAGE_EMPTY = 0x67,
1320 ARMV7_PERFCTR_INST_OUT_OF_RENAME_STAGE = 0x68,
1321
1322 ARMV7_PERFCTR_PREDICTABLE_FUNCT_RETURNS = 0x6E,
1323
1324 ARMV7_PERFCTR_MAIN_UNIT_EXECUTED_INST = 0x70,
1325 ARMV7_PERFCTR_SECOND_UNIT_EXECUTED_INST = 0x71,
1326 ARMV7_PERFCTR_LD_ST_UNIT_EXECUTED_INST = 0x72,
1327 ARMV7_PERFCTR_FP_EXECUTED_INST = 0x73,
1328 ARMV7_PERFCTR_NEON_EXECUTED_INST = 0x74,
1329
1330 ARMV7_PERFCTR_PLD_FULL_DEP_STALL_CYCLES = 0x80,
1331 ARMV7_PERFCTR_DATA_WR_DEP_STALL_CYCLES = 0x81,
1332 ARMV7_PERFCTR_ITLB_MISS_DEP_STALL_CYCLES = 0x82,
1333 ARMV7_PERFCTR_DTLB_MISS_DEP_STALL_CYCLES = 0x83,
1334 ARMV7_PERFCTR_MICRO_ITLB_MISS_DEP_STALL_CYCLES = 0x84,
1335 ARMV7_PERFCTR_MICRO_DTLB_MISS_DEP_STALL_CYCLES = 0x85,
1336 ARMV7_PERFCTR_DMB_DEP_STALL_CYCLES = 0x86,
1337
1338 ARMV7_PERFCTR_INTGR_CLK_ENABLED_CYCLES = 0x8A,
1339 ARMV7_PERFCTR_DATA_ENGINE_CLK_EN_CYCLES = 0x8B,
1340
1341 ARMV7_PERFCTR_ISB_INST = 0x90,
1342 ARMV7_PERFCTR_DSB_INST = 0x91,
1343 ARMV7_PERFCTR_DMB_INST = 0x92,
1344 ARMV7_PERFCTR_EXT_INTERRUPTS = 0x93,
1345
1346 ARMV7_PERFCTR_PLE_CACHE_LINE_RQST_COMPLETED = 0xA0,
1347 ARMV7_PERFCTR_PLE_CACHE_LINE_RQST_SKIPPED = 0xA1,
1348 ARMV7_PERFCTR_PLE_FIFO_FLUSH = 0xA2,
1349 ARMV7_PERFCTR_PLE_RQST_COMPLETED = 0xA3,
1350 ARMV7_PERFCTR_PLE_FIFO_OVERFLOW = 0xA4,
1351 ARMV7_PERFCTR_PLE_RQST_PROG = 0xA5
1352};
1353
1354/*
1355 * Cortex-A8 HW events mapping
1356 *
1357 * The hardware events that we support. We do support cache operations but
1358 * we have harvard caches and no way to combine instruction and data
1359 * accesses/misses in hardware.
1360 */
1361static const unsigned armv7_a8_perf_map[PERF_COUNT_HW_MAX] = {
1362 [PERF_COUNT_HW_CPU_CYCLES] = ARMV7_PERFCTR_CPU_CYCLES,
1363 [PERF_COUNT_HW_INSTRUCTIONS] = ARMV7_PERFCTR_INSTR_EXECUTED,
1364 [PERF_COUNT_HW_CACHE_REFERENCES] = HW_OP_UNSUPPORTED,
1365 [PERF_COUNT_HW_CACHE_MISSES] = HW_OP_UNSUPPORTED,
1366 [PERF_COUNT_HW_BRANCH_INSTRUCTIONS] = ARMV7_PERFCTR_PC_WRITE,
1367 [PERF_COUNT_HW_BRANCH_MISSES] = ARMV7_PERFCTR_PC_BRANCH_MIS_PRED,
1368 [PERF_COUNT_HW_BUS_CYCLES] = ARMV7_PERFCTR_CLOCK_CYCLES,
1369};
1370
1371static const unsigned armv7_a8_perf_cache_map[PERF_COUNT_HW_CACHE_MAX]
1372 [PERF_COUNT_HW_CACHE_OP_MAX]
1373 [PERF_COUNT_HW_CACHE_RESULT_MAX] = {
1374 [C(L1D)] = {
1375 /*
1376 * The performance counters don't differentiate between read
1377 * and write accesses/misses so this isn't strictly correct,
1378 * but it's the best we can do. Writes and reads get
1379 * combined.
1380 */
1381 [C(OP_READ)] = {
1382 [C(RESULT_ACCESS)] = ARMV7_PERFCTR_DCACHE_ACCESS,
1383 [C(RESULT_MISS)] = ARMV7_PERFCTR_DCACHE_REFILL,
1384 },
1385 [C(OP_WRITE)] = {
1386 [C(RESULT_ACCESS)] = ARMV7_PERFCTR_DCACHE_ACCESS,
1387 [C(RESULT_MISS)] = ARMV7_PERFCTR_DCACHE_REFILL,
1388 },
1389 [C(OP_PREFETCH)] = {
1390 [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED,
1391 [C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED,
1392 },
1393 },
1394 [C(L1I)] = {
1395 [C(OP_READ)] = {
1396 [C(RESULT_ACCESS)] = ARMV7_PERFCTR_L1_INST,
1397 [C(RESULT_MISS)] = ARMV7_PERFCTR_L1_INST_MISS,
1398 },
1399 [C(OP_WRITE)] = {
1400 [C(RESULT_ACCESS)] = ARMV7_PERFCTR_L1_INST,
1401 [C(RESULT_MISS)] = ARMV7_PERFCTR_L1_INST_MISS,
1402 },
1403 [C(OP_PREFETCH)] = {
1404 [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED,
1405 [C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED,
1406 },
1407 },
1408 [C(LL)] = {
1409 [C(OP_READ)] = {
1410 [C(RESULT_ACCESS)] = ARMV7_PERFCTR_L2_ACCESS,
1411 [C(RESULT_MISS)] = ARMV7_PERFCTR_L2_CACH_MISS,
1412 },
1413 [C(OP_WRITE)] = {
1414 [C(RESULT_ACCESS)] = ARMV7_PERFCTR_L2_ACCESS,
1415 [C(RESULT_MISS)] = ARMV7_PERFCTR_L2_CACH_MISS,
1416 },
1417 [C(OP_PREFETCH)] = {
1418 [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED,
1419 [C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED,
1420 },
1421 },
1422 [C(DTLB)] = {
1423 /*
1424 * Only ITLB misses and DTLB refills are supported.
1425 * If users want the DTLB refills misses a raw counter
1426 * must be used.
1427 */
1428 [C(OP_READ)] = {
1429 [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED,
1430 [C(RESULT_MISS)] = ARMV7_PERFCTR_DTLB_REFILL,
1431 },
1432 [C(OP_WRITE)] = {
1433 [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED,
1434 [C(RESULT_MISS)] = ARMV7_PERFCTR_DTLB_REFILL,
1435 },
1436 [C(OP_PREFETCH)] = {
1437 [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED,
1438 [C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED,
1439 },
1440 },
1441 [C(ITLB)] = {
1442 [C(OP_READ)] = {
1443 [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED,
1444 [C(RESULT_MISS)] = ARMV7_PERFCTR_ITLB_MISS,
1445 },
1446 [C(OP_WRITE)] = {
1447 [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED,
1448 [C(RESULT_MISS)] = ARMV7_PERFCTR_ITLB_MISS,
1449 },
1450 [C(OP_PREFETCH)] = {
1451 [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED,
1452 [C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED,
1453 },
1454 },
1455 [C(BPU)] = {
1456 [C(OP_READ)] = {
1457 [C(RESULT_ACCESS)] = ARMV7_PERFCTR_PC_WRITE,
1458 [C(RESULT_MISS)]
1459 = ARMV7_PERFCTR_PC_BRANCH_MIS_PRED,
1460 },
1461 [C(OP_WRITE)] = {
1462 [C(RESULT_ACCESS)] = ARMV7_PERFCTR_PC_WRITE,
1463 [C(RESULT_MISS)]
1464 = ARMV7_PERFCTR_PC_BRANCH_MIS_PRED,
1465 },
1466 [C(OP_PREFETCH)] = {
1467 [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED,
1468 [C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED,
1469 },
1470 },
1471};
1472
1473/*
1474 * Cortex-A9 HW events mapping
1475 */
1476static const unsigned armv7_a9_perf_map[PERF_COUNT_HW_MAX] = {
1477 [PERF_COUNT_HW_CPU_CYCLES] = ARMV7_PERFCTR_CPU_CYCLES,
1478 [PERF_COUNT_HW_INSTRUCTIONS] =
1479 ARMV7_PERFCTR_INST_OUT_OF_RENAME_STAGE,
1480 [PERF_COUNT_HW_CACHE_REFERENCES] = ARMV7_PERFCTR_COHERENT_LINE_HIT,
1481 [PERF_COUNT_HW_CACHE_MISSES] = ARMV7_PERFCTR_COHERENT_LINE_MISS,
1482 [PERF_COUNT_HW_BRANCH_INSTRUCTIONS] = ARMV7_PERFCTR_PC_WRITE,
1483 [PERF_COUNT_HW_BRANCH_MISSES] = ARMV7_PERFCTR_PC_BRANCH_MIS_PRED,
1484 [PERF_COUNT_HW_BUS_CYCLES] = ARMV7_PERFCTR_CLOCK_CYCLES,
1485};
1486
1487static const unsigned armv7_a9_perf_cache_map[PERF_COUNT_HW_CACHE_MAX]
1488 [PERF_COUNT_HW_CACHE_OP_MAX]
1489 [PERF_COUNT_HW_CACHE_RESULT_MAX] = {
1490 [C(L1D)] = {
1491 /*
1492 * The performance counters don't differentiate between read
1493 * and write accesses/misses so this isn't strictly correct,
1494 * but it's the best we can do. Writes and reads get
1495 * combined.
1496 */
1497 [C(OP_READ)] = {
1498 [C(RESULT_ACCESS)] = ARMV7_PERFCTR_DCACHE_ACCESS,
1499 [C(RESULT_MISS)] = ARMV7_PERFCTR_DCACHE_REFILL,
1500 },
1501 [C(OP_WRITE)] = {
1502 [C(RESULT_ACCESS)] = ARMV7_PERFCTR_DCACHE_ACCESS,
1503 [C(RESULT_MISS)] = ARMV7_PERFCTR_DCACHE_REFILL,
1504 },
1505 [C(OP_PREFETCH)] = {
1506 [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED,
1507 [C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED,
1508 },
1509 },
1510 [C(L1I)] = {
1511 [C(OP_READ)] = {
1512 [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED,
1513 [C(RESULT_MISS)] = ARMV7_PERFCTR_IFETCH_MISS,
1514 },
1515 [C(OP_WRITE)] = {
1516 [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED,
1517 [C(RESULT_MISS)] = ARMV7_PERFCTR_IFETCH_MISS,
1518 },
1519 [C(OP_PREFETCH)] = {
1520 [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED,
1521 [C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED,
1522 },
1523 },
1524 [C(LL)] = {
1525 [C(OP_READ)] = {
1526 [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED,
1527 [C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED,
1528 },
1529 [C(OP_WRITE)] = {
1530 [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED,
1531 [C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED,
1532 },
1533 [C(OP_PREFETCH)] = {
1534 [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED,
1535 [C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED,
1536 },
1537 },
1538 [C(DTLB)] = {
1539 /*
1540 * Only ITLB misses and DTLB refills are supported.
1541 * If users want the DTLB refills misses a raw counter
1542 * must be used.
1543 */
1544 [C(OP_READ)] = {
1545 [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED,
1546 [C(RESULT_MISS)] = ARMV7_PERFCTR_DTLB_REFILL,
1547 },
1548 [C(OP_WRITE)] = {
1549 [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED,
1550 [C(RESULT_MISS)] = ARMV7_PERFCTR_DTLB_REFILL,
1551 },
1552 [C(OP_PREFETCH)] = {
1553 [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED,
1554 [C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED,
1555 },
1556 },
1557 [C(ITLB)] = {
1558 [C(OP_READ)] = {
1559 [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED,
1560 [C(RESULT_MISS)] = ARMV7_PERFCTR_ITLB_MISS,
1561 },
1562 [C(OP_WRITE)] = {
1563 [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED,
1564 [C(RESULT_MISS)] = ARMV7_PERFCTR_ITLB_MISS,
1565 },
1566 [C(OP_PREFETCH)] = {
1567 [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED,
1568 [C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED,
1569 },
1570 },
1571 [C(BPU)] = {
1572 [C(OP_READ)] = {
1573 [C(RESULT_ACCESS)] = ARMV7_PERFCTR_PC_WRITE,
1574 [C(RESULT_MISS)]
1575 = ARMV7_PERFCTR_PC_BRANCH_MIS_PRED,
1576 },
1577 [C(OP_WRITE)] = {
1578 [C(RESULT_ACCESS)] = ARMV7_PERFCTR_PC_WRITE,
1579 [C(RESULT_MISS)]
1580 = ARMV7_PERFCTR_PC_BRANCH_MIS_PRED,
1581 },
1582 [C(OP_PREFETCH)] = {
1583 [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED,
1584 [C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED,
1585 },
1586 },
1587};
1588
1589/*
1590 * Perf Events counters
1591 */
1592enum armv7_counters {
1593 ARMV7_CYCLE_COUNTER = 1, /* Cycle counter */
1594 ARMV7_COUNTER0 = 2, /* First event counter */
1595};
1596
1597/*
1598 * The cycle counter is ARMV7_CYCLE_COUNTER.
1599 * The first event counter is ARMV7_COUNTER0.
1600 * The last event counter is (ARMV7_COUNTER0 + armpmu->num_events - 1).
1601 */
1602#define ARMV7_COUNTER_LAST (ARMV7_COUNTER0 + armpmu->num_events - 1)
1603
1604/*
1605 * ARMv7 low level PMNC access
1606 */
1607
1608/*
1609 * Per-CPU PMNC: config reg
1610 */
1611#define ARMV7_PMNC_E (1 << 0) /* Enable all counters */
1612#define ARMV7_PMNC_P (1 << 1) /* Reset all counters */
1613#define ARMV7_PMNC_C (1 << 2) /* Cycle counter reset */
1614#define ARMV7_PMNC_D (1 << 3) /* CCNT counts every 64th cpu cycle */
1615#define ARMV7_PMNC_X (1 << 4) /* Export to ETM */
1616#define ARMV7_PMNC_DP (1 << 5) /* Disable CCNT if non-invasive debug*/
1617#define ARMV7_PMNC_N_SHIFT 11 /* Number of counters supported */
1618#define ARMV7_PMNC_N_MASK 0x1f
1619#define ARMV7_PMNC_MASK 0x3f /* Mask for writable bits */
1620
1621/*
1622 * Available counters
1623 */
1624#define ARMV7_CNT0 0 /* First event counter */
1625#define ARMV7_CCNT 31 /* Cycle counter */
1626
1627/* Perf Event to low level counters mapping */
1628#define ARMV7_EVENT_CNT_TO_CNTx (ARMV7_COUNTER0 - ARMV7_CNT0)
1629
1630/*
1631 * CNTENS: counters enable reg
1632 */
1633#define ARMV7_CNTENS_P(idx) (1 << (idx - ARMV7_EVENT_CNT_TO_CNTx))
1634#define ARMV7_CNTENS_C (1 << ARMV7_CCNT)
1635
1636/*
1637 * CNTENC: counters disable reg
1638 */
1639#define ARMV7_CNTENC_P(idx) (1 << (idx - ARMV7_EVENT_CNT_TO_CNTx))
1640#define ARMV7_CNTENC_C (1 << ARMV7_CCNT)
1641
1642/*
1643 * INTENS: counters overflow interrupt enable reg
1644 */
1645#define ARMV7_INTENS_P(idx) (1 << (idx - ARMV7_EVENT_CNT_TO_CNTx))
1646#define ARMV7_INTENS_C (1 << ARMV7_CCNT)
1647
1648/*
1649 * INTENC: counters overflow interrupt disable reg
1650 */
1651#define ARMV7_INTENC_P(idx) (1 << (idx - ARMV7_EVENT_CNT_TO_CNTx))
1652#define ARMV7_INTENC_C (1 << ARMV7_CCNT)
1653
1654/*
1655 * EVTSEL: Event selection reg
1656 */
Will Deacond10fca92010-02-26 10:46:15 +01001657#define ARMV7_EVTSEL_MASK 0xff /* Mask for writable bits */
Jean PIHET796d1292010-01-26 18:51:05 +01001658
1659/*
1660 * SELECT: Counter selection reg
1661 */
1662#define ARMV7_SELECT_MASK 0x1f /* Mask for writable bits */
1663
1664/*
1665 * FLAG: counters overflow flag status reg
1666 */
1667#define ARMV7_FLAG_P(idx) (1 << (idx - ARMV7_EVENT_CNT_TO_CNTx))
1668#define ARMV7_FLAG_C (1 << ARMV7_CCNT)
1669#define ARMV7_FLAG_MASK 0xffffffff /* Mask for writable bits */
1670#define ARMV7_OVERFLOWED_MASK ARMV7_FLAG_MASK
1671
1672static inline unsigned long armv7_pmnc_read(void)
1673{
1674 u32 val;
1675 asm volatile("mrc p15, 0, %0, c9, c12, 0" : "=r"(val));
1676 return val;
1677}
1678
1679static inline void armv7_pmnc_write(unsigned long val)
1680{
1681 val &= ARMV7_PMNC_MASK;
1682 asm volatile("mcr p15, 0, %0, c9, c12, 0" : : "r"(val));
1683}
1684
1685static inline int armv7_pmnc_has_overflowed(unsigned long pmnc)
1686{
1687 return pmnc & ARMV7_OVERFLOWED_MASK;
1688}
1689
1690static inline int armv7_pmnc_counter_has_overflowed(unsigned long pmnc,
1691 enum armv7_counters counter)
1692{
1693 int ret;
1694
1695 if (counter == ARMV7_CYCLE_COUNTER)
1696 ret = pmnc & ARMV7_FLAG_C;
1697 else if ((counter >= ARMV7_COUNTER0) && (counter <= ARMV7_COUNTER_LAST))
1698 ret = pmnc & ARMV7_FLAG_P(counter);
1699 else
1700 pr_err("CPU%u checking wrong counter %d overflow status\n",
1701 smp_processor_id(), counter);
1702
1703 return ret;
1704}
1705
1706static inline int armv7_pmnc_select_counter(unsigned int idx)
1707{
1708 u32 val;
1709
1710 if ((idx < ARMV7_COUNTER0) || (idx > ARMV7_COUNTER_LAST)) {
1711 pr_err("CPU%u selecting wrong PMNC counter"
1712 " %d\n", smp_processor_id(), idx);
1713 return -1;
1714 }
1715
1716 val = (idx - ARMV7_EVENT_CNT_TO_CNTx) & ARMV7_SELECT_MASK;
1717 asm volatile("mcr p15, 0, %0, c9, c12, 5" : : "r" (val));
1718
1719 return idx;
1720}
1721
1722static inline u32 armv7pmu_read_counter(int idx)
1723{
1724 unsigned long value = 0;
1725
1726 if (idx == ARMV7_CYCLE_COUNTER)
1727 asm volatile("mrc p15, 0, %0, c9, c13, 0" : "=r" (value));
1728 else if ((idx >= ARMV7_COUNTER0) && (idx <= ARMV7_COUNTER_LAST)) {
1729 if (armv7_pmnc_select_counter(idx) == idx)
1730 asm volatile("mrc p15, 0, %0, c9, c13, 2"
1731 : "=r" (value));
1732 } else
1733 pr_err("CPU%u reading wrong counter %d\n",
1734 smp_processor_id(), idx);
1735
1736 return value;
1737}
1738
1739static inline void armv7pmu_write_counter(int idx, u32 value)
1740{
1741 if (idx == ARMV7_CYCLE_COUNTER)
1742 asm volatile("mcr p15, 0, %0, c9, c13, 0" : : "r" (value));
1743 else if ((idx >= ARMV7_COUNTER0) && (idx <= ARMV7_COUNTER_LAST)) {
1744 if (armv7_pmnc_select_counter(idx) == idx)
1745 asm volatile("mcr p15, 0, %0, c9, c13, 2"
1746 : : "r" (value));
1747 } else
1748 pr_err("CPU%u writing wrong counter %d\n",
1749 smp_processor_id(), idx);
1750}
1751
1752static inline void armv7_pmnc_write_evtsel(unsigned int idx, u32 val)
1753{
1754 if (armv7_pmnc_select_counter(idx) == idx) {
1755 val &= ARMV7_EVTSEL_MASK;
1756 asm volatile("mcr p15, 0, %0, c9, c13, 1" : : "r" (val));
1757 }
1758}
1759
1760static inline u32 armv7_pmnc_enable_counter(unsigned int idx)
1761{
1762 u32 val;
1763
1764 if ((idx != ARMV7_CYCLE_COUNTER) &&
1765 ((idx < ARMV7_COUNTER0) || (idx > ARMV7_COUNTER_LAST))) {
1766 pr_err("CPU%u enabling wrong PMNC counter"
1767 " %d\n", smp_processor_id(), idx);
1768 return -1;
1769 }
1770
1771 if (idx == ARMV7_CYCLE_COUNTER)
1772 val = ARMV7_CNTENS_C;
1773 else
1774 val = ARMV7_CNTENS_P(idx);
1775
1776 asm volatile("mcr p15, 0, %0, c9, c12, 1" : : "r" (val));
1777
1778 return idx;
1779}
1780
1781static inline u32 armv7_pmnc_disable_counter(unsigned int idx)
1782{
1783 u32 val;
1784
1785
1786 if ((idx != ARMV7_CYCLE_COUNTER) &&
1787 ((idx < ARMV7_COUNTER0) || (idx > ARMV7_COUNTER_LAST))) {
1788 pr_err("CPU%u disabling wrong PMNC counter"
1789 " %d\n", smp_processor_id(), idx);
1790 return -1;
1791 }
1792
1793 if (idx == ARMV7_CYCLE_COUNTER)
1794 val = ARMV7_CNTENC_C;
1795 else
1796 val = ARMV7_CNTENC_P(idx);
1797
1798 asm volatile("mcr p15, 0, %0, c9, c12, 2" : : "r" (val));
1799
1800 return idx;
1801}
1802
1803static inline u32 armv7_pmnc_enable_intens(unsigned int idx)
1804{
1805 u32 val;
1806
1807 if ((idx != ARMV7_CYCLE_COUNTER) &&
1808 ((idx < ARMV7_COUNTER0) || (idx > ARMV7_COUNTER_LAST))) {
1809 pr_err("CPU%u enabling wrong PMNC counter"
1810 " interrupt enable %d\n", smp_processor_id(), idx);
1811 return -1;
1812 }
1813
1814 if (idx == ARMV7_CYCLE_COUNTER)
1815 val = ARMV7_INTENS_C;
1816 else
1817 val = ARMV7_INTENS_P(idx);
1818
1819 asm volatile("mcr p15, 0, %0, c9, c14, 1" : : "r" (val));
1820
1821 return idx;
1822}
1823
1824static inline u32 armv7_pmnc_disable_intens(unsigned int idx)
1825{
1826 u32 val;
1827
1828 if ((idx != ARMV7_CYCLE_COUNTER) &&
1829 ((idx < ARMV7_COUNTER0) || (idx > ARMV7_COUNTER_LAST))) {
1830 pr_err("CPU%u disabling wrong PMNC counter"
1831 " interrupt enable %d\n", smp_processor_id(), idx);
1832 return -1;
1833 }
1834
1835 if (idx == ARMV7_CYCLE_COUNTER)
1836 val = ARMV7_INTENC_C;
1837 else
1838 val = ARMV7_INTENC_P(idx);
1839
1840 asm volatile("mcr p15, 0, %0, c9, c14, 2" : : "r" (val));
1841
1842 return idx;
1843}
1844
1845static inline u32 armv7_pmnc_getreset_flags(void)
1846{
1847 u32 val;
1848
1849 /* Read */
1850 asm volatile("mrc p15, 0, %0, c9, c12, 3" : "=r" (val));
1851
1852 /* Write to clear flags */
1853 val &= ARMV7_FLAG_MASK;
1854 asm volatile("mcr p15, 0, %0, c9, c12, 3" : : "r" (val));
1855
1856 return val;
1857}
1858
1859#ifdef DEBUG
1860static void armv7_pmnc_dump_regs(void)
1861{
1862 u32 val;
1863 unsigned int cnt;
1864
1865 printk(KERN_INFO "PMNC registers dump:\n");
1866
1867 asm volatile("mrc p15, 0, %0, c9, c12, 0" : "=r" (val));
1868 printk(KERN_INFO "PMNC =0x%08x\n", val);
1869
1870 asm volatile("mrc p15, 0, %0, c9, c12, 1" : "=r" (val));
1871 printk(KERN_INFO "CNTENS=0x%08x\n", val);
1872
1873 asm volatile("mrc p15, 0, %0, c9, c14, 1" : "=r" (val));
1874 printk(KERN_INFO "INTENS=0x%08x\n", val);
1875
1876 asm volatile("mrc p15, 0, %0, c9, c12, 3" : "=r" (val));
1877 printk(KERN_INFO "FLAGS =0x%08x\n", val);
1878
1879 asm volatile("mrc p15, 0, %0, c9, c12, 5" : "=r" (val));
1880 printk(KERN_INFO "SELECT=0x%08x\n", val);
1881
1882 asm volatile("mrc p15, 0, %0, c9, c13, 0" : "=r" (val));
1883 printk(KERN_INFO "CCNT =0x%08x\n", val);
1884
1885 for (cnt = ARMV7_COUNTER0; cnt < ARMV7_COUNTER_LAST; cnt++) {
1886 armv7_pmnc_select_counter(cnt);
1887 asm volatile("mrc p15, 0, %0, c9, c13, 2" : "=r" (val));
1888 printk(KERN_INFO "CNT[%d] count =0x%08x\n",
1889 cnt-ARMV7_EVENT_CNT_TO_CNTx, val);
1890 asm volatile("mrc p15, 0, %0, c9, c13, 1" : "=r" (val));
1891 printk(KERN_INFO "CNT[%d] evtsel=0x%08x\n",
1892 cnt-ARMV7_EVENT_CNT_TO_CNTx, val);
1893 }
1894}
1895#endif
1896
1897void armv7pmu_enable_event(struct hw_perf_event *hwc, int idx)
1898{
1899 unsigned long flags;
1900
1901 /*
1902 * Enable counter and interrupt, and set the counter to count
1903 * the event that we're interested in.
1904 */
1905 spin_lock_irqsave(&pmu_lock, flags);
1906
1907 /*
1908 * Disable counter
1909 */
1910 armv7_pmnc_disable_counter(idx);
1911
1912 /*
1913 * Set event (if destined for PMNx counters)
1914 * We don't need to set the event if it's a cycle count
1915 */
1916 if (idx != ARMV7_CYCLE_COUNTER)
1917 armv7_pmnc_write_evtsel(idx, hwc->config_base);
1918
1919 /*
1920 * Enable interrupt for this counter
1921 */
1922 armv7_pmnc_enable_intens(idx);
1923
1924 /*
1925 * Enable counter
1926 */
1927 armv7_pmnc_enable_counter(idx);
1928
1929 spin_unlock_irqrestore(&pmu_lock, flags);
1930}
1931
1932static void armv7pmu_disable_event(struct hw_perf_event *hwc, int idx)
1933{
1934 unsigned long flags;
1935
1936 /*
1937 * Disable counter and interrupt
1938 */
1939 spin_lock_irqsave(&pmu_lock, flags);
1940
1941 /*
1942 * Disable counter
1943 */
1944 armv7_pmnc_disable_counter(idx);
1945
1946 /*
1947 * Disable interrupt for this counter
1948 */
1949 armv7_pmnc_disable_intens(idx);
1950
1951 spin_unlock_irqrestore(&pmu_lock, flags);
1952}
1953
1954static irqreturn_t armv7pmu_handle_irq(int irq_num, void *dev)
1955{
1956 unsigned long pmnc;
1957 struct perf_sample_data data;
1958 struct cpu_hw_events *cpuc;
1959 struct pt_regs *regs;
1960 int idx;
1961
1962 /*
1963 * Get and reset the IRQ flags
1964 */
1965 pmnc = armv7_pmnc_getreset_flags();
1966
1967 /*
1968 * Did an overflow occur?
1969 */
1970 if (!armv7_pmnc_has_overflowed(pmnc))
1971 return IRQ_NONE;
1972
1973 /*
1974 * Handle the counter(s) overflow(s)
1975 */
1976 regs = get_irq_regs();
1977
Peter Zijlstradc1d6282010-03-03 15:55:04 +01001978 perf_sample_data_init(&data, 0);
Jean PIHET796d1292010-01-26 18:51:05 +01001979
1980 cpuc = &__get_cpu_var(cpu_hw_events);
1981 for (idx = 0; idx <= armpmu->num_events; ++idx) {
1982 struct perf_event *event = cpuc->events[idx];
1983 struct hw_perf_event *hwc;
1984
1985 if (!test_bit(idx, cpuc->active_mask))
1986 continue;
1987
1988 /*
1989 * We have a single interrupt for all counters. Check that
1990 * each counter has overflowed before we process it.
1991 */
1992 if (!armv7_pmnc_counter_has_overflowed(pmnc, idx))
1993 continue;
1994
1995 hwc = &event->hw;
1996 armpmu_event_update(event, hwc, idx);
1997 data.period = event->hw.last_period;
1998 if (!armpmu_event_set_period(event, hwc, idx))
1999 continue;
2000
2001 if (perf_event_overflow(event, 0, &data, regs))
2002 armpmu->disable(hwc, idx);
2003 }
2004
2005 /*
2006 * Handle the pending perf events.
2007 *
2008 * Note: this call *must* be run with interrupts enabled. For
2009 * platforms that can have the PMU interrupts raised as a PMI, this
2010 * will not work.
2011 */
2012 perf_event_do_pending();
2013
2014 return IRQ_HANDLED;
2015}
2016
2017static void armv7pmu_start(void)
2018{
2019 unsigned long flags;
2020
2021 spin_lock_irqsave(&pmu_lock, flags);
2022 /* Enable all counters */
2023 armv7_pmnc_write(armv7_pmnc_read() | ARMV7_PMNC_E);
2024 spin_unlock_irqrestore(&pmu_lock, flags);
2025}
2026
2027static void armv7pmu_stop(void)
2028{
2029 unsigned long flags;
2030
2031 spin_lock_irqsave(&pmu_lock, flags);
2032 /* Disable all counters */
2033 armv7_pmnc_write(armv7_pmnc_read() & ~ARMV7_PMNC_E);
2034 spin_unlock_irqrestore(&pmu_lock, flags);
2035}
2036
2037static inline int armv7_a8_pmu_event_map(int config)
2038{
2039 int mapping = armv7_a8_perf_map[config];
2040 if (HW_OP_UNSUPPORTED == mapping)
2041 mapping = -EOPNOTSUPP;
2042 return mapping;
2043}
2044
2045static inline int armv7_a9_pmu_event_map(int config)
2046{
2047 int mapping = armv7_a9_perf_map[config];
2048 if (HW_OP_UNSUPPORTED == mapping)
2049 mapping = -EOPNOTSUPP;
2050 return mapping;
2051}
2052
2053static u64 armv7pmu_raw_event(u64 config)
2054{
2055 return config & 0xff;
2056}
2057
2058static int armv7pmu_get_event_idx(struct cpu_hw_events *cpuc,
2059 struct hw_perf_event *event)
2060{
2061 int idx;
2062
2063 /* Always place a cycle counter into the cycle counter. */
2064 if (event->config_base == ARMV7_PERFCTR_CPU_CYCLES) {
2065 if (test_and_set_bit(ARMV7_CYCLE_COUNTER, cpuc->used_mask))
2066 return -EAGAIN;
2067
2068 return ARMV7_CYCLE_COUNTER;
2069 } else {
2070 /*
2071 * For anything other than a cycle counter, try and use
2072 * the events counters
2073 */
2074 for (idx = ARMV7_COUNTER0; idx <= armpmu->num_events; ++idx) {
2075 if (!test_and_set_bit(idx, cpuc->used_mask))
2076 return idx;
2077 }
2078
2079 /* The counters are all in use. */
2080 return -EAGAIN;
2081 }
2082}
2083
2084static struct arm_pmu armv7pmu = {
2085 .handle_irq = armv7pmu_handle_irq,
2086 .enable = armv7pmu_enable_event,
2087 .disable = armv7pmu_disable_event,
2088 .raw_event = armv7pmu_raw_event,
2089 .read_counter = armv7pmu_read_counter,
2090 .write_counter = armv7pmu_write_counter,
2091 .get_event_idx = armv7pmu_get_event_idx,
2092 .start = armv7pmu_start,
2093 .stop = armv7pmu_stop,
2094 .max_period = (1LLU << 32) - 1,
2095};
2096
2097static u32 __init armv7_reset_read_pmnc(void)
2098{
2099 u32 nb_cnt;
2100
2101 /* Initialize & Reset PMNC: C and P bits */
2102 armv7_pmnc_write(ARMV7_PMNC_P | ARMV7_PMNC_C);
2103
2104 /* Read the nb of CNTx counters supported from PMNC */
2105 nb_cnt = (armv7_pmnc_read() >> ARMV7_PMNC_N_SHIFT) & ARMV7_PMNC_N_MASK;
2106
2107 /* Add the CPU cycles counter and return */
2108 return nb_cnt + 1;
2109}
2110
Jamie Iles1b8873a2010-02-02 20:25:44 +01002111static int __init
2112init_hw_perf_events(void)
2113{
2114 unsigned long cpuid = read_cpuid_id();
2115 unsigned long implementor = (cpuid & 0xFF000000) >> 24;
2116 unsigned long part_number = (cpuid & 0xFFF0);
2117
2118 /* We only support ARM CPUs implemented by ARM at the moment. */
2119 if (0x41 == implementor) {
2120 switch (part_number) {
2121 case 0xB360: /* ARM1136 */
2122 case 0xB560: /* ARM1156 */
2123 case 0xB760: /* ARM1176 */
2124 armpmu = &armv6pmu;
2125 memcpy(armpmu_perf_cache_map, armv6_perf_cache_map,
2126 sizeof(armv6_perf_cache_map));
2127 perf_max_events = armv6pmu.num_events;
2128 break;
2129 case 0xB020: /* ARM11mpcore */
2130 armpmu = &armv6mpcore_pmu;
2131 memcpy(armpmu_perf_cache_map,
2132 armv6mpcore_perf_cache_map,
2133 sizeof(armv6mpcore_perf_cache_map));
2134 perf_max_events = armv6mpcore_pmu.num_events;
2135 break;
Jean PIHET796d1292010-01-26 18:51:05 +01002136 case 0xC080: /* Cortex-A8 */
Will Deacon181193f2010-04-30 11:32:44 +01002137 armv7pmu.id = ARM_PERF_PMU_ID_CA8;
Jean PIHET796d1292010-01-26 18:51:05 +01002138 memcpy(armpmu_perf_cache_map, armv7_a8_perf_cache_map,
2139 sizeof(armv7_a8_perf_cache_map));
2140 armv7pmu.event_map = armv7_a8_pmu_event_map;
2141 armpmu = &armv7pmu;
2142
2143 /* Reset PMNC and read the nb of CNTx counters
2144 supported */
2145 armv7pmu.num_events = armv7_reset_read_pmnc();
2146 perf_max_events = armv7pmu.num_events;
2147 break;
2148 case 0xC090: /* Cortex-A9 */
Will Deacon181193f2010-04-30 11:32:44 +01002149 armv7pmu.id = ARM_PERF_PMU_ID_CA9;
Jean PIHET796d1292010-01-26 18:51:05 +01002150 memcpy(armpmu_perf_cache_map, armv7_a9_perf_cache_map,
2151 sizeof(armv7_a9_perf_cache_map));
2152 armv7pmu.event_map = armv7_a9_pmu_event_map;
2153 armpmu = &armv7pmu;
2154
2155 /* Reset PMNC and read the nb of CNTx counters
2156 supported */
2157 armv7pmu.num_events = armv7_reset_read_pmnc();
2158 perf_max_events = armv7pmu.num_events;
2159 break;
Jamie Iles1b8873a2010-02-02 20:25:44 +01002160 default:
2161 pr_info("no hardware support available\n");
2162 perf_max_events = -1;
2163 }
2164 }
2165
2166 if (armpmu)
Jean PIHET796d1292010-01-26 18:51:05 +01002167 pr_info("enabled with %s PMU driver, %d counters available\n",
Will Deacon181193f2010-04-30 11:32:44 +01002168 arm_pmu_names[armpmu->id], armpmu->num_events);
Jamie Iles1b8873a2010-02-02 20:25:44 +01002169
2170 return 0;
2171}
2172arch_initcall(init_hw_perf_events);
2173
2174/*
2175 * Callchain handling code.
2176 */
2177static inline void
2178callchain_store(struct perf_callchain_entry *entry,
2179 u64 ip)
2180{
2181 if (entry->nr < PERF_MAX_STACK_DEPTH)
2182 entry->ip[entry->nr++] = ip;
2183}
2184
2185/*
2186 * The registers we're interested in are at the end of the variable
2187 * length saved register structure. The fp points at the end of this
2188 * structure so the address of this struct is:
2189 * (struct frame_tail *)(xxx->fp)-1
2190 *
2191 * This code has been adapted from the ARM OProfile support.
2192 */
2193struct frame_tail {
2194 struct frame_tail *fp;
2195 unsigned long sp;
2196 unsigned long lr;
2197} __attribute__((packed));
2198
2199/*
2200 * Get the return address for a single stackframe and return a pointer to the
2201 * next frame tail.
2202 */
2203static struct frame_tail *
2204user_backtrace(struct frame_tail *tail,
2205 struct perf_callchain_entry *entry)
2206{
2207 struct frame_tail buftail;
2208
2209 /* Also check accessibility of one struct frame_tail beyond */
2210 if (!access_ok(VERIFY_READ, tail, sizeof(buftail)))
2211 return NULL;
2212 if (__copy_from_user_inatomic(&buftail, tail, sizeof(buftail)))
2213 return NULL;
2214
2215 callchain_store(entry, buftail.lr);
2216
2217 /*
2218 * Frame pointers should strictly progress back up the stack
2219 * (towards higher addresses).
2220 */
2221 if (tail >= buftail.fp)
2222 return NULL;
2223
2224 return buftail.fp - 1;
2225}
2226
2227static void
2228perf_callchain_user(struct pt_regs *regs,
2229 struct perf_callchain_entry *entry)
2230{
2231 struct frame_tail *tail;
2232
2233 callchain_store(entry, PERF_CONTEXT_USER);
2234
2235 if (!user_mode(regs))
2236 regs = task_pt_regs(current);
2237
2238 tail = (struct frame_tail *)regs->ARM_fp - 1;
2239
2240 while (tail && !((unsigned long)tail & 0x3))
2241 tail = user_backtrace(tail, entry);
2242}
2243
2244/*
2245 * Gets called by walk_stackframe() for every stackframe. This will be called
2246 * whist unwinding the stackframe and is like a subroutine return so we use
2247 * the PC.
2248 */
2249static int
2250callchain_trace(struct stackframe *fr,
2251 void *data)
2252{
2253 struct perf_callchain_entry *entry = data;
2254 callchain_store(entry, fr->pc);
2255 return 0;
2256}
2257
2258static void
2259perf_callchain_kernel(struct pt_regs *regs,
2260 struct perf_callchain_entry *entry)
2261{
2262 struct stackframe fr;
2263
2264 callchain_store(entry, PERF_CONTEXT_KERNEL);
2265 fr.fp = regs->ARM_fp;
2266 fr.sp = regs->ARM_sp;
2267 fr.lr = regs->ARM_lr;
2268 fr.pc = regs->ARM_pc;
2269 walk_stackframe(&fr, callchain_trace, entry);
2270}
2271
2272static void
2273perf_do_callchain(struct pt_regs *regs,
2274 struct perf_callchain_entry *entry)
2275{
2276 int is_user;
2277
2278 if (!regs)
2279 return;
2280
2281 is_user = user_mode(regs);
2282
2283 if (!current || !current->pid)
2284 return;
2285
2286 if (is_user && current->state != TASK_RUNNING)
2287 return;
2288
2289 if (!is_user)
2290 perf_callchain_kernel(regs, entry);
2291
2292 if (current->mm)
2293 perf_callchain_user(regs, entry);
2294}
2295
2296static DEFINE_PER_CPU(struct perf_callchain_entry, pmc_irq_entry);
2297
2298struct perf_callchain_entry *
2299perf_callchain(struct pt_regs *regs)
2300{
2301 struct perf_callchain_entry *entry = &__get_cpu_var(pmc_irq_entry);
2302
2303 entry->nr = 0;
2304 perf_do_callchain(regs, entry);
2305 return entry;
2306}