blob: 753648cf9af67828c1e9259f3c5b821b1e7c0b2c [file] [log] [blame]
Jamie Iles1b8873a2010-02-02 20:25:44 +01001#undef DEBUG
2
3/*
4 * ARM performance counter support.
5 *
6 * Copyright (C) 2009 picoChip Designs, Ltd., Jamie Iles
Will Deacon43eab872010-11-13 19:04:32 +00007 * Copyright (C) 2010 ARM Ltd., Will Deacon <will.deacon@arm.com>
Jean PIHET796d1292010-01-26 18:51:05 +01008 *
Jamie Iles1b8873a2010-02-02 20:25:44 +01009 * This code is based on the sparc64 perf event code, which is in turn based
10 * on the x86 code. Callchain code is based on the ARM OProfile backtrace
11 * code.
12 */
13#define pr_fmt(fmt) "hw perfevents: " fmt
14
15#include <linux/interrupt.h>
16#include <linux/kernel.h>
Will Deacon181193f2010-04-30 11:32:44 +010017#include <linux/module.h>
Jamie Iles1b8873a2010-02-02 20:25:44 +010018#include <linux/perf_event.h>
Will Deacon49c006b2010-04-29 17:13:24 +010019#include <linux/platform_device.h>
Jamie Iles1b8873a2010-02-02 20:25:44 +010020#include <linux/spinlock.h>
21#include <linux/uaccess.h>
22
23#include <asm/cputype.h>
24#include <asm/irq.h>
25#include <asm/irq_regs.h>
26#include <asm/pmu.h>
27#include <asm/stacktrace.h>
28
Will Deacon49c006b2010-04-29 17:13:24 +010029static struct platform_device *pmu_device;
Jamie Iles1b8873a2010-02-02 20:25:44 +010030
31/*
32 * Hardware lock to serialize accesses to PMU registers. Needed for the
33 * read/modify/write sequences.
34 */
Will Deacon961ec6da2010-12-02 18:01:49 +010035static DEFINE_RAW_SPINLOCK(pmu_lock);
Jamie Iles1b8873a2010-02-02 20:25:44 +010036
37/*
Will Deaconecf5a892011-07-19 22:43:28 +010038 * ARMv6 supports a maximum of 3 events, starting from index 0. If we add
Jamie Iles1b8873a2010-02-02 20:25:44 +010039 * another platform that supports more, we need to increase this to be the
40 * largest of all platforms.
Jean PIHET796d1292010-01-26 18:51:05 +010041 *
42 * ARMv7 supports up to 32 events:
43 * cycle counter CCNT + 31 events counters CNT0..30.
44 * Cortex-A8 has 1+4 counters, Cortex-A9 has 1+6 counters.
Jamie Iles1b8873a2010-02-02 20:25:44 +010045 */
Will Deaconecf5a892011-07-19 22:43:28 +010046#define ARMPMU_MAX_HWEVENTS 32
Jamie Iles1b8873a2010-02-02 20:25:44 +010047
48/* The events for a given CPU. */
49struct cpu_hw_events {
50 /*
Will Deaconecf5a892011-07-19 22:43:28 +010051 * The events that are active on the CPU for the given index.
Jamie Iles1b8873a2010-02-02 20:25:44 +010052 */
53 struct perf_event *events[ARMPMU_MAX_HWEVENTS];
54
55 /*
56 * A 1 bit for an index indicates that the counter is being used for
57 * an event. A 0 means that the counter can be used.
58 */
59 unsigned long used_mask[BITS_TO_LONGS(ARMPMU_MAX_HWEVENTS)];
60
61 /*
62 * A 1 bit for an index indicates that the counter is actively being
63 * used.
64 */
65 unsigned long active_mask[BITS_TO_LONGS(ARMPMU_MAX_HWEVENTS)];
66};
Will Deacon4d6b7a72010-11-30 18:15:53 +010067static DEFINE_PER_CPU(struct cpu_hw_events, cpu_hw_events);
Will Deacon181193f2010-04-30 11:32:44 +010068
Jamie Iles1b8873a2010-02-02 20:25:44 +010069struct arm_pmu {
Will Deacon181193f2010-04-30 11:32:44 +010070 enum arm_perf_pmu_ids id;
Will Deacon0b390e22011-07-27 15:18:59 +010071 cpumask_t active_irqs;
Will Deacon62994832010-11-13 18:45:27 +000072 const char *name;
Jamie Iles1b8873a2010-02-02 20:25:44 +010073 irqreturn_t (*handle_irq)(int irq_num, void *dev);
74 void (*enable)(struct hw_perf_event *evt, int idx);
75 void (*disable)(struct hw_perf_event *evt, int idx);
Jamie Iles1b8873a2010-02-02 20:25:44 +010076 int (*get_event_idx)(struct cpu_hw_events *cpuc,
77 struct hw_perf_event *hwc);
Will Deacon05d22fd2011-07-19 11:57:30 +010078 int (*set_event_filter)(struct hw_perf_event *evt,
79 struct perf_event_attr *attr);
Jamie Iles1b8873a2010-02-02 20:25:44 +010080 u32 (*read_counter)(int idx);
81 void (*write_counter)(int idx, u32 val);
82 void (*start)(void);
83 void (*stop)(void);
Will Deacon574b69c2011-03-25 13:13:34 +010084 void (*reset)(void *);
Will Deacon84fee972010-11-13 17:13:56 +000085 const unsigned (*cache_map)[PERF_COUNT_HW_CACHE_MAX]
86 [PERF_COUNT_HW_CACHE_OP_MAX]
87 [PERF_COUNT_HW_CACHE_RESULT_MAX];
88 const unsigned (*event_map)[PERF_COUNT_HW_MAX];
89 u32 raw_event_mask;
Jamie Iles1b8873a2010-02-02 20:25:44 +010090 int num_events;
91 u64 max_period;
92};
93
94/* Set at runtime when we know what CPU type we are. */
Mark Rutlanda6c93af2011-04-15 11:14:38 +010095static struct arm_pmu *armpmu;
Jamie Iles1b8873a2010-02-02 20:25:44 +010096
Will Deacon181193f2010-04-30 11:32:44 +010097enum arm_perf_pmu_ids
98armpmu_get_pmu_id(void)
99{
100 int id = -ENODEV;
101
102 if (armpmu != NULL)
103 id = armpmu->id;
104
105 return id;
106}
107EXPORT_SYMBOL_GPL(armpmu_get_pmu_id);
108
Will Deacon929f5192010-04-30 11:34:26 +0100109int
110armpmu_get_max_events(void)
111{
112 int max_events = 0;
113
114 if (armpmu != NULL)
115 max_events = armpmu->num_events;
116
117 return max_events;
118}
119EXPORT_SYMBOL_GPL(armpmu_get_max_events);
120
Matt Fleming3bf101b2010-09-27 20:22:24 +0100121int perf_num_counters(void)
122{
123 return armpmu_get_max_events();
124}
125EXPORT_SYMBOL_GPL(perf_num_counters);
126
Jamie Iles1b8873a2010-02-02 20:25:44 +0100127#define HW_OP_UNSUPPORTED 0xFFFF
128
129#define C(_x) \
130 PERF_COUNT_HW_CACHE_##_x
131
132#define CACHE_OP_UNSUPPORTED 0xFFFF
133
Jamie Iles1b8873a2010-02-02 20:25:44 +0100134static int
135armpmu_map_cache_event(u64 config)
136{
137 unsigned int cache_type, cache_op, cache_result, ret;
138
139 cache_type = (config >> 0) & 0xff;
140 if (cache_type >= PERF_COUNT_HW_CACHE_MAX)
141 return -EINVAL;
142
143 cache_op = (config >> 8) & 0xff;
144 if (cache_op >= PERF_COUNT_HW_CACHE_OP_MAX)
145 return -EINVAL;
146
147 cache_result = (config >> 16) & 0xff;
148 if (cache_result >= PERF_COUNT_HW_CACHE_RESULT_MAX)
149 return -EINVAL;
150
Will Deacon84fee972010-11-13 17:13:56 +0000151 ret = (int)(*armpmu->cache_map)[cache_type][cache_op][cache_result];
Jamie Iles1b8873a2010-02-02 20:25:44 +0100152
153 if (ret == CACHE_OP_UNSUPPORTED)
154 return -ENOENT;
155
156 return ret;
157}
158
159static int
Will Deacon84fee972010-11-13 17:13:56 +0000160armpmu_map_event(u64 config)
161{
162 int mapping = (*armpmu->event_map)[config];
163 return mapping == HW_OP_UNSUPPORTED ? -EOPNOTSUPP : mapping;
164}
165
166static int
167armpmu_map_raw_event(u64 config)
168{
169 return (int)(config & armpmu->raw_event_mask);
170}
171
172static int
Jamie Iles1b8873a2010-02-02 20:25:44 +0100173armpmu_event_set_period(struct perf_event *event,
174 struct hw_perf_event *hwc,
175 int idx)
176{
Peter Zijlstrae7850592010-05-21 14:43:08 +0200177 s64 left = local64_read(&hwc->period_left);
Jamie Iles1b8873a2010-02-02 20:25:44 +0100178 s64 period = hwc->sample_period;
179 int ret = 0;
180
181 if (unlikely(left <= -period)) {
182 left = period;
Peter Zijlstrae7850592010-05-21 14:43:08 +0200183 local64_set(&hwc->period_left, left);
Jamie Iles1b8873a2010-02-02 20:25:44 +0100184 hwc->last_period = period;
185 ret = 1;
186 }
187
188 if (unlikely(left <= 0)) {
189 left += period;
Peter Zijlstrae7850592010-05-21 14:43:08 +0200190 local64_set(&hwc->period_left, left);
Jamie Iles1b8873a2010-02-02 20:25:44 +0100191 hwc->last_period = period;
192 ret = 1;
193 }
194
195 if (left > (s64)armpmu->max_period)
196 left = armpmu->max_period;
197
Peter Zijlstrae7850592010-05-21 14:43:08 +0200198 local64_set(&hwc->prev_count, (u64)-left);
Jamie Iles1b8873a2010-02-02 20:25:44 +0100199
200 armpmu->write_counter(idx, (u64)(-left) & 0xffffffff);
201
202 perf_event_update_userpage(event);
203
204 return ret;
205}
206
207static u64
208armpmu_event_update(struct perf_event *event,
209 struct hw_perf_event *hwc,
Will Deacona7378232011-03-25 17:12:37 +0100210 int idx, int overflow)
Jamie Iles1b8873a2010-02-02 20:25:44 +0100211{
Will Deacona7378232011-03-25 17:12:37 +0100212 u64 delta, prev_raw_count, new_raw_count;
Jamie Iles1b8873a2010-02-02 20:25:44 +0100213
214again:
Peter Zijlstrae7850592010-05-21 14:43:08 +0200215 prev_raw_count = local64_read(&hwc->prev_count);
Jamie Iles1b8873a2010-02-02 20:25:44 +0100216 new_raw_count = armpmu->read_counter(idx);
217
Peter Zijlstrae7850592010-05-21 14:43:08 +0200218 if (local64_cmpxchg(&hwc->prev_count, prev_raw_count,
Jamie Iles1b8873a2010-02-02 20:25:44 +0100219 new_raw_count) != prev_raw_count)
220 goto again;
221
Will Deacona7378232011-03-25 17:12:37 +0100222 new_raw_count &= armpmu->max_period;
223 prev_raw_count &= armpmu->max_period;
224
225 if (overflow)
Will Deacon67597882011-04-05 14:01:24 +0100226 delta = armpmu->max_period - prev_raw_count + new_raw_count + 1;
Will Deacona7378232011-03-25 17:12:37 +0100227 else
228 delta = new_raw_count - prev_raw_count;
Jamie Iles1b8873a2010-02-02 20:25:44 +0100229
Peter Zijlstrae7850592010-05-21 14:43:08 +0200230 local64_add(delta, &event->count);
231 local64_sub(delta, &hwc->period_left);
Jamie Iles1b8873a2010-02-02 20:25:44 +0100232
233 return new_raw_count;
234}
235
236static void
Jamie Iles1b8873a2010-02-02 20:25:44 +0100237armpmu_read(struct perf_event *event)
238{
239 struct hw_perf_event *hwc = &event->hw;
240
241 /* Don't read disabled counters! */
242 if (hwc->idx < 0)
243 return;
244
Will Deacona7378232011-03-25 17:12:37 +0100245 armpmu_event_update(event, hwc, hwc->idx, 0);
Jamie Iles1b8873a2010-02-02 20:25:44 +0100246}
247
248static void
Peter Zijlstraa4eaf7f2010-06-16 14:37:10 +0200249armpmu_stop(struct perf_event *event, int flags)
Jamie Iles1b8873a2010-02-02 20:25:44 +0100250{
251 struct hw_perf_event *hwc = &event->hw;
252
Peter Zijlstraa4eaf7f2010-06-16 14:37:10 +0200253 /*
254 * ARM pmu always has to update the counter, so ignore
255 * PERF_EF_UPDATE, see comments in armpmu_start().
256 */
257 if (!(hwc->state & PERF_HES_STOPPED)) {
258 armpmu->disable(hwc, hwc->idx);
259 barrier(); /* why? */
Will Deacona7378232011-03-25 17:12:37 +0100260 armpmu_event_update(event, hwc, hwc->idx, 0);
Peter Zijlstraa4eaf7f2010-06-16 14:37:10 +0200261 hwc->state |= PERF_HES_STOPPED | PERF_HES_UPTODATE;
262 }
263}
264
265static void
266armpmu_start(struct perf_event *event, int flags)
267{
268 struct hw_perf_event *hwc = &event->hw;
269
Peter Zijlstraa4eaf7f2010-06-16 14:37:10 +0200270 /*
271 * ARM pmu always has to reprogram the period, so ignore
272 * PERF_EF_RELOAD, see the comment below.
273 */
274 if (flags & PERF_EF_RELOAD)
275 WARN_ON_ONCE(!(hwc->state & PERF_HES_UPTODATE));
276
277 hwc->state = 0;
Jamie Iles1b8873a2010-02-02 20:25:44 +0100278 /*
279 * Set the period again. Some counters can't be stopped, so when we
Peter Zijlstraa4eaf7f2010-06-16 14:37:10 +0200280 * were stopped we simply disabled the IRQ source and the counter
Jamie Iles1b8873a2010-02-02 20:25:44 +0100281 * may have been left counting. If we don't do this step then we may
282 * get an interrupt too soon or *way* too late if the overflow has
283 * happened since disabling.
284 */
285 armpmu_event_set_period(event, hwc, hwc->idx);
286 armpmu->enable(hwc, hwc->idx);
287}
288
Peter Zijlstraa4eaf7f2010-06-16 14:37:10 +0200289static void
290armpmu_del(struct perf_event *event, int flags)
291{
292 struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events);
293 struct hw_perf_event *hwc = &event->hw;
294 int idx = hwc->idx;
295
296 WARN_ON(idx < 0);
297
298 clear_bit(idx, cpuc->active_mask);
299 armpmu_stop(event, PERF_EF_UPDATE);
300 cpuc->events[idx] = NULL;
301 clear_bit(idx, cpuc->used_mask);
302
303 perf_event_update_userpage(event);
304}
305
Jamie Iles1b8873a2010-02-02 20:25:44 +0100306static int
Peter Zijlstraa4eaf7f2010-06-16 14:37:10 +0200307armpmu_add(struct perf_event *event, int flags)
Jamie Iles1b8873a2010-02-02 20:25:44 +0100308{
309 struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events);
310 struct hw_perf_event *hwc = &event->hw;
311 int idx;
312 int err = 0;
313
Peter Zijlstra33696fc2010-06-14 08:49:00 +0200314 perf_pmu_disable(event->pmu);
Peter Zijlstra24cd7f52010-06-11 17:32:03 +0200315
Jamie Iles1b8873a2010-02-02 20:25:44 +0100316 /* If we don't have a space for the counter then finish early. */
317 idx = armpmu->get_event_idx(cpuc, hwc);
318 if (idx < 0) {
319 err = idx;
320 goto out;
321 }
322
323 /*
324 * If there is an event in the counter we are going to use then make
325 * sure it is disabled.
326 */
327 event->hw.idx = idx;
328 armpmu->disable(hwc, idx);
329 cpuc->events[idx] = event;
330 set_bit(idx, cpuc->active_mask);
331
Peter Zijlstraa4eaf7f2010-06-16 14:37:10 +0200332 hwc->state = PERF_HES_STOPPED | PERF_HES_UPTODATE;
333 if (flags & PERF_EF_START)
334 armpmu_start(event, PERF_EF_RELOAD);
Jamie Iles1b8873a2010-02-02 20:25:44 +0100335
336 /* Propagate our changes to the userspace mapping. */
337 perf_event_update_userpage(event);
338
339out:
Peter Zijlstra33696fc2010-06-14 08:49:00 +0200340 perf_pmu_enable(event->pmu);
Jamie Iles1b8873a2010-02-02 20:25:44 +0100341 return err;
342}
343
Peter Zijlstrab0a873e2010-06-11 13:35:08 +0200344static struct pmu pmu;
Jamie Iles1b8873a2010-02-02 20:25:44 +0100345
346static int
347validate_event(struct cpu_hw_events *cpuc,
348 struct perf_event *event)
349{
350 struct hw_perf_event fake_event = event->hw;
351
Will Deacon65b47112010-09-02 09:32:08 +0100352 if (event->pmu != &pmu || event->state <= PERF_EVENT_STATE_OFF)
353 return 1;
Jamie Iles1b8873a2010-02-02 20:25:44 +0100354
355 return armpmu->get_event_idx(cpuc, &fake_event) >= 0;
356}
357
358static int
359validate_group(struct perf_event *event)
360{
361 struct perf_event *sibling, *leader = event->group_leader;
362 struct cpu_hw_events fake_pmu;
363
364 memset(&fake_pmu, 0, sizeof(fake_pmu));
365
366 if (!validate_event(&fake_pmu, leader))
367 return -ENOSPC;
368
369 list_for_each_entry(sibling, &leader->sibling_list, group_entry) {
370 if (!validate_event(&fake_pmu, sibling))
371 return -ENOSPC;
372 }
373
374 if (!validate_event(&fake_pmu, event))
375 return -ENOSPC;
376
377 return 0;
378}
379
Rabin Vincent0e25a5c2011-02-08 09:24:36 +0530380static irqreturn_t armpmu_platform_irq(int irq, void *dev)
381{
382 struct arm_pmu_platdata *plat = dev_get_platdata(&pmu_device->dev);
383
384 return plat->handle_irq(irq, dev, armpmu->handle_irq);
385}
386
Will Deacon0b390e22011-07-27 15:18:59 +0100387static void
388armpmu_release_hardware(void)
389{
390 int i, irq, irqs;
391
392 irqs = min(pmu_device->num_resources, num_possible_cpus());
393
394 for (i = 0; i < irqs; ++i) {
395 if (!cpumask_test_and_clear_cpu(i, &armpmu->active_irqs))
396 continue;
397 irq = platform_get_irq(pmu_device, i);
398 if (irq >= 0)
399 free_irq(irq, NULL);
400 }
401
402 armpmu->stop();
403 release_pmu(ARM_PMU_DEVICE_CPU);
404}
405
Jamie Iles1b8873a2010-02-02 20:25:44 +0100406static int
407armpmu_reserve_hardware(void)
408{
Rabin Vincent0e25a5c2011-02-08 09:24:36 +0530409 struct arm_pmu_platdata *plat;
410 irq_handler_t handle_irq;
Will Deaconb0e89592011-07-26 22:10:28 +0100411 int i, err, irq, irqs;
Jamie Iles1b8873a2010-02-02 20:25:44 +0100412
Will Deaconb0e89592011-07-26 22:10:28 +0100413 err = reserve_pmu(ARM_PMU_DEVICE_CPU);
414 if (err) {
Jamie Iles1b8873a2010-02-02 20:25:44 +0100415 pr_warning("unable to reserve pmu\n");
Will Deaconb0e89592011-07-26 22:10:28 +0100416 return err;
Jamie Iles1b8873a2010-02-02 20:25:44 +0100417 }
418
Rabin Vincent0e25a5c2011-02-08 09:24:36 +0530419 plat = dev_get_platdata(&pmu_device->dev);
420 if (plat && plat->handle_irq)
421 handle_irq = armpmu_platform_irq;
422 else
423 handle_irq = armpmu->handle_irq;
424
Will Deacon0b390e22011-07-27 15:18:59 +0100425 irqs = min(pmu_device->num_resources, num_possible_cpus());
Will Deaconb0e89592011-07-26 22:10:28 +0100426 if (irqs < 1) {
Jamie Iles1b8873a2010-02-02 20:25:44 +0100427 pr_err("no irqs for PMUs defined\n");
428 return -ENODEV;
429 }
430
Will Deaconb0e89592011-07-26 22:10:28 +0100431 for (i = 0; i < irqs; ++i) {
Will Deacon0b390e22011-07-27 15:18:59 +0100432 err = 0;
Will Deacon49c006b2010-04-29 17:13:24 +0100433 irq = platform_get_irq(pmu_device, i);
434 if (irq < 0)
435 continue;
436
Will Deaconb0e89592011-07-26 22:10:28 +0100437 /*
438 * If we have a single PMU interrupt that we can't shift,
439 * assume that we're running on a uniprocessor machine and
Will Deacon0b390e22011-07-27 15:18:59 +0100440 * continue. Otherwise, continue without this interrupt.
Will Deaconb0e89592011-07-26 22:10:28 +0100441 */
Will Deacon0b390e22011-07-27 15:18:59 +0100442 if (irq_set_affinity(irq, cpumask_of(i)) && irqs > 1) {
443 pr_warning("unable to set irq affinity (irq=%d, cpu=%u)\n",
444 irq, i);
445 continue;
Will Deaconb0e89592011-07-26 22:10:28 +0100446 }
447
Rabin Vincent0e25a5c2011-02-08 09:24:36 +0530448 err = request_irq(irq, handle_irq,
Will Deaconddee87f2010-02-25 15:04:14 +0100449 IRQF_DISABLED | IRQF_NOBALANCING,
Will Deaconb0e89592011-07-26 22:10:28 +0100450 "arm-pmu", NULL);
Jamie Iles1b8873a2010-02-02 20:25:44 +0100451 if (err) {
Will Deaconb0e89592011-07-26 22:10:28 +0100452 pr_err("unable to request IRQ%d for ARM PMU counters\n",
453 irq);
Will Deacon0b390e22011-07-27 15:18:59 +0100454 armpmu_release_hardware();
455 return err;
Jamie Iles1b8873a2010-02-02 20:25:44 +0100456 }
Will Deacon0b390e22011-07-27 15:18:59 +0100457
458 cpumask_set_cpu(i, &armpmu->active_irqs);
Jamie Iles1b8873a2010-02-02 20:25:44 +0100459 }
460
Will Deacon0b390e22011-07-27 15:18:59 +0100461 return 0;
Jamie Iles1b8873a2010-02-02 20:25:44 +0100462}
463
464static atomic_t active_events = ATOMIC_INIT(0);
465static DEFINE_MUTEX(pmu_reserve_mutex);
466
467static void
468hw_perf_event_destroy(struct perf_event *event)
469{
470 if (atomic_dec_and_mutex_lock(&active_events, &pmu_reserve_mutex)) {
471 armpmu_release_hardware();
472 mutex_unlock(&pmu_reserve_mutex);
473 }
474}
475
476static int
Will Deacon05d22fd2011-07-19 11:57:30 +0100477event_requires_mode_exclusion(struct perf_event_attr *attr)
478{
479 return attr->exclude_idle || attr->exclude_user ||
480 attr->exclude_kernel || attr->exclude_hv;
481}
482
483static int
Jamie Iles1b8873a2010-02-02 20:25:44 +0100484__hw_perf_event_init(struct perf_event *event)
485{
486 struct hw_perf_event *hwc = &event->hw;
487 int mapping, err;
488
489 /* Decode the generic type into an ARM event identifier. */
490 if (PERF_TYPE_HARDWARE == event->attr.type) {
Will Deacon84fee972010-11-13 17:13:56 +0000491 mapping = armpmu_map_event(event->attr.config);
Jamie Iles1b8873a2010-02-02 20:25:44 +0100492 } else if (PERF_TYPE_HW_CACHE == event->attr.type) {
493 mapping = armpmu_map_cache_event(event->attr.config);
494 } else if (PERF_TYPE_RAW == event->attr.type) {
Will Deacon84fee972010-11-13 17:13:56 +0000495 mapping = armpmu_map_raw_event(event->attr.config);
Jamie Iles1b8873a2010-02-02 20:25:44 +0100496 } else {
497 pr_debug("event type %x not supported\n", event->attr.type);
498 return -EOPNOTSUPP;
499 }
500
501 if (mapping < 0) {
502 pr_debug("event %x:%llx not supported\n", event->attr.type,
503 event->attr.config);
504 return mapping;
505 }
506
507 /*
Will Deacon05d22fd2011-07-19 11:57:30 +0100508 * We don't assign an index until we actually place the event onto
509 * hardware. Use -1 to signify that we haven't decided where to put it
510 * yet. For SMP systems, each core has it's own PMU so we can't do any
511 * clever allocation or constraints checking at this point.
Jamie Iles1b8873a2010-02-02 20:25:44 +0100512 */
Will Deacon05d22fd2011-07-19 11:57:30 +0100513 hwc->idx = -1;
514 hwc->config_base = 0;
515 hwc->config = 0;
516 hwc->event_base = 0;
517
518 /*
519 * Check whether we need to exclude the counter from certain modes.
520 */
521 if ((!armpmu->set_event_filter ||
522 armpmu->set_event_filter(hwc, &event->attr)) &&
523 event_requires_mode_exclusion(&event->attr)) {
Jamie Iles1b8873a2010-02-02 20:25:44 +0100524 pr_debug("ARM performance counters do not support "
525 "mode exclusion\n");
526 return -EPERM;
527 }
528
529 /*
Will Deacon05d22fd2011-07-19 11:57:30 +0100530 * Store the event encoding into the config_base field.
Jamie Iles1b8873a2010-02-02 20:25:44 +0100531 */
Will Deacon05d22fd2011-07-19 11:57:30 +0100532 hwc->config_base |= (unsigned long)mapping;
Jamie Iles1b8873a2010-02-02 20:25:44 +0100533
534 if (!hwc->sample_period) {
535 hwc->sample_period = armpmu->max_period;
536 hwc->last_period = hwc->sample_period;
Peter Zijlstrae7850592010-05-21 14:43:08 +0200537 local64_set(&hwc->period_left, hwc->sample_period);
Jamie Iles1b8873a2010-02-02 20:25:44 +0100538 }
539
540 err = 0;
541 if (event->group_leader != event) {
542 err = validate_group(event);
543 if (err)
544 return -EINVAL;
545 }
546
547 return err;
548}
549
Peter Zijlstrab0a873e2010-06-11 13:35:08 +0200550static int armpmu_event_init(struct perf_event *event)
Jamie Iles1b8873a2010-02-02 20:25:44 +0100551{
552 int err = 0;
553
Peter Zijlstrab0a873e2010-06-11 13:35:08 +0200554 switch (event->attr.type) {
555 case PERF_TYPE_RAW:
556 case PERF_TYPE_HARDWARE:
557 case PERF_TYPE_HW_CACHE:
558 break;
559
560 default:
561 return -ENOENT;
562 }
563
Jamie Iles1b8873a2010-02-02 20:25:44 +0100564 event->destroy = hw_perf_event_destroy;
565
566 if (!atomic_inc_not_zero(&active_events)) {
Jamie Iles1b8873a2010-02-02 20:25:44 +0100567 mutex_lock(&pmu_reserve_mutex);
568 if (atomic_read(&active_events) == 0) {
569 err = armpmu_reserve_hardware();
570 }
571
572 if (!err)
573 atomic_inc(&active_events);
574 mutex_unlock(&pmu_reserve_mutex);
575 }
576
577 if (err)
Peter Zijlstrab0a873e2010-06-11 13:35:08 +0200578 return err;
Jamie Iles1b8873a2010-02-02 20:25:44 +0100579
580 err = __hw_perf_event_init(event);
581 if (err)
582 hw_perf_event_destroy(event);
583
Peter Zijlstrab0a873e2010-06-11 13:35:08 +0200584 return err;
Jamie Iles1b8873a2010-02-02 20:25:44 +0100585}
586
Peter Zijlstraa4eaf7f2010-06-16 14:37:10 +0200587static void armpmu_enable(struct pmu *pmu)
Jamie Iles1b8873a2010-02-02 20:25:44 +0100588{
589 /* Enable all of the perf events on hardware. */
Will Deaconf4f38432011-07-01 14:38:12 +0100590 int idx, enabled = 0;
Jamie Iles1b8873a2010-02-02 20:25:44 +0100591 struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events);
592
Will Deaconecf5a892011-07-19 22:43:28 +0100593 for (idx = 0; idx < armpmu->num_events; ++idx) {
Jamie Iles1b8873a2010-02-02 20:25:44 +0100594 struct perf_event *event = cpuc->events[idx];
595
596 if (!event)
597 continue;
598
599 armpmu->enable(&event->hw, idx);
Will Deaconf4f38432011-07-01 14:38:12 +0100600 enabled = 1;
Jamie Iles1b8873a2010-02-02 20:25:44 +0100601 }
602
Will Deaconf4f38432011-07-01 14:38:12 +0100603 if (enabled)
604 armpmu->start();
Jamie Iles1b8873a2010-02-02 20:25:44 +0100605}
606
Peter Zijlstraa4eaf7f2010-06-16 14:37:10 +0200607static void armpmu_disable(struct pmu *pmu)
Jamie Iles1b8873a2010-02-02 20:25:44 +0100608{
Mark Rutland48957152011-04-27 10:31:51 +0100609 armpmu->stop();
Jamie Iles1b8873a2010-02-02 20:25:44 +0100610}
611
Peter Zijlstra33696fc2010-06-14 08:49:00 +0200612static struct pmu pmu = {
Peter Zijlstraa4eaf7f2010-06-16 14:37:10 +0200613 .pmu_enable = armpmu_enable,
614 .pmu_disable = armpmu_disable,
615 .event_init = armpmu_event_init,
616 .add = armpmu_add,
617 .del = armpmu_del,
618 .start = armpmu_start,
619 .stop = armpmu_stop,
620 .read = armpmu_read,
Peter Zijlstra33696fc2010-06-14 08:49:00 +0200621};
622
Will Deacon43eab872010-11-13 19:04:32 +0000623/* Include the PMU-specific implementations. */
624#include "perf_event_xscale.c"
625#include "perf_event_v6.c"
626#include "perf_event_v7.c"
Will Deacon49e6a322010-04-30 11:33:33 +0100627
Will Deacon574b69c2011-03-25 13:13:34 +0100628/*
629 * Ensure the PMU has sane values out of reset.
630 * This requires SMP to be available, so exists as a separate initcall.
631 */
632static int __init
633armpmu_reset(void)
634{
635 if (armpmu && armpmu->reset)
636 return on_each_cpu(armpmu->reset, NULL, 1);
637 return 0;
638}
639arch_initcall(armpmu_reset);
640
Will Deaconb0e89592011-07-26 22:10:28 +0100641/*
642 * PMU platform driver and devicetree bindings.
643 */
644static struct of_device_id armpmu_of_device_ids[] = {
645 {.compatible = "arm,cortex-a9-pmu"},
646 {.compatible = "arm,cortex-a8-pmu"},
647 {.compatible = "arm,arm1136-pmu"},
648 {.compatible = "arm,arm1176-pmu"},
649 {},
650};
651
652static struct platform_device_id armpmu_plat_device_ids[] = {
653 {.name = "arm-pmu"},
654 {},
655};
656
657static int __devinit armpmu_device_probe(struct platform_device *pdev)
658{
659 pmu_device = pdev;
660 return 0;
661}
662
663static struct platform_driver armpmu_driver = {
664 .driver = {
665 .name = "arm-pmu",
666 .of_match_table = armpmu_of_device_ids,
667 },
668 .probe = armpmu_device_probe,
669 .id_table = armpmu_plat_device_ids,
670};
671
672static int __init register_pmu_driver(void)
673{
674 return platform_driver_register(&armpmu_driver);
675}
676device_initcall(register_pmu_driver);
677
678/*
679 * CPU PMU identification and registration.
680 */
Jamie Iles1b8873a2010-02-02 20:25:44 +0100681static int __init
682init_hw_perf_events(void)
683{
684 unsigned long cpuid = read_cpuid_id();
685 unsigned long implementor = (cpuid & 0xFF000000) >> 24;
686 unsigned long part_number = (cpuid & 0xFFF0);
687
Will Deacon49e6a322010-04-30 11:33:33 +0100688 /* ARM Ltd CPUs. */
Jamie Iles1b8873a2010-02-02 20:25:44 +0100689 if (0x41 == implementor) {
690 switch (part_number) {
691 case 0xB360: /* ARM1136 */
692 case 0xB560: /* ARM1156 */
693 case 0xB760: /* ARM1176 */
Will Deacon3cb314b2010-11-13 17:37:46 +0000694 armpmu = armv6pmu_init();
Jamie Iles1b8873a2010-02-02 20:25:44 +0100695 break;
696 case 0xB020: /* ARM11mpcore */
Will Deacon3cb314b2010-11-13 17:37:46 +0000697 armpmu = armv6mpcore_pmu_init();
Jamie Iles1b8873a2010-02-02 20:25:44 +0100698 break;
Jean PIHET796d1292010-01-26 18:51:05 +0100699 case 0xC080: /* Cortex-A8 */
Will Deacon3cb314b2010-11-13 17:37:46 +0000700 armpmu = armv7_a8_pmu_init();
Jean PIHET796d1292010-01-26 18:51:05 +0100701 break;
702 case 0xC090: /* Cortex-A9 */
Will Deacon3cb314b2010-11-13 17:37:46 +0000703 armpmu = armv7_a9_pmu_init();
Jean PIHET796d1292010-01-26 18:51:05 +0100704 break;
Will Deacon0c205cb2011-06-03 17:40:15 +0100705 case 0xC050: /* Cortex-A5 */
706 armpmu = armv7_a5_pmu_init();
707 break;
Will Deacon14abd032011-01-19 14:24:38 +0000708 case 0xC0F0: /* Cortex-A15 */
709 armpmu = armv7_a15_pmu_init();
710 break;
Will Deacon49e6a322010-04-30 11:33:33 +0100711 }
712 /* Intel CPUs [xscale]. */
713 } else if (0x69 == implementor) {
714 part_number = (cpuid >> 13) & 0x7;
715 switch (part_number) {
716 case 1:
Will Deacon3cb314b2010-11-13 17:37:46 +0000717 armpmu = xscale1pmu_init();
Will Deacon49e6a322010-04-30 11:33:33 +0100718 break;
719 case 2:
Will Deacon3cb314b2010-11-13 17:37:46 +0000720 armpmu = xscale2pmu_init();
Will Deacon49e6a322010-04-30 11:33:33 +0100721 break;
Jamie Iles1b8873a2010-02-02 20:25:44 +0100722 }
723 }
724
Will Deacon49e6a322010-04-30 11:33:33 +0100725 if (armpmu) {
Jean PIHET796d1292010-01-26 18:51:05 +0100726 pr_info("enabled with %s PMU driver, %d counters available\n",
Will Deacon62994832010-11-13 18:45:27 +0000727 armpmu->name, armpmu->num_events);
Mark Rutland48957152011-04-27 10:31:51 +0100728 perf_pmu_register(&pmu, "cpu", PERF_TYPE_RAW);
Will Deacon49e6a322010-04-30 11:33:33 +0100729 } else {
730 pr_info("no hardware support available\n");
Will Deacon49e6a322010-04-30 11:33:33 +0100731 }
Jamie Iles1b8873a2010-02-02 20:25:44 +0100732
733 return 0;
734}
Peter Zijlstra004417a2010-11-25 18:38:29 +0100735early_initcall(init_hw_perf_events);
Jamie Iles1b8873a2010-02-02 20:25:44 +0100736
737/*
738 * Callchain handling code.
739 */
Jamie Iles1b8873a2010-02-02 20:25:44 +0100740
741/*
742 * The registers we're interested in are at the end of the variable
743 * length saved register structure. The fp points at the end of this
744 * structure so the address of this struct is:
745 * (struct frame_tail *)(xxx->fp)-1
746 *
747 * This code has been adapted from the ARM OProfile support.
748 */
749struct frame_tail {
Will Deacon4d6b7a72010-11-30 18:15:53 +0100750 struct frame_tail __user *fp;
751 unsigned long sp;
752 unsigned long lr;
Jamie Iles1b8873a2010-02-02 20:25:44 +0100753} __attribute__((packed));
754
755/*
756 * Get the return address for a single stackframe and return a pointer to the
757 * next frame tail.
758 */
Will Deacon4d6b7a72010-11-30 18:15:53 +0100759static struct frame_tail __user *
760user_backtrace(struct frame_tail __user *tail,
Jamie Iles1b8873a2010-02-02 20:25:44 +0100761 struct perf_callchain_entry *entry)
762{
763 struct frame_tail buftail;
764
765 /* Also check accessibility of one struct frame_tail beyond */
766 if (!access_ok(VERIFY_READ, tail, sizeof(buftail)))
767 return NULL;
768 if (__copy_from_user_inatomic(&buftail, tail, sizeof(buftail)))
769 return NULL;
770
Frederic Weisbecker70791ce2010-06-29 19:34:05 +0200771 perf_callchain_store(entry, buftail.lr);
Jamie Iles1b8873a2010-02-02 20:25:44 +0100772
773 /*
774 * Frame pointers should strictly progress back up the stack
775 * (towards higher addresses).
776 */
Rabin Vincentcb061992011-02-09 11:35:12 +0100777 if (tail + 1 >= buftail.fp)
Jamie Iles1b8873a2010-02-02 20:25:44 +0100778 return NULL;
779
780 return buftail.fp - 1;
781}
782
Frederic Weisbecker56962b42010-06-30 23:03:51 +0200783void
784perf_callchain_user(struct perf_callchain_entry *entry, struct pt_regs *regs)
Jamie Iles1b8873a2010-02-02 20:25:44 +0100785{
Will Deacon4d6b7a72010-11-30 18:15:53 +0100786 struct frame_tail __user *tail;
Jamie Iles1b8873a2010-02-02 20:25:44 +0100787
Jamie Iles1b8873a2010-02-02 20:25:44 +0100788
Will Deacon4d6b7a72010-11-30 18:15:53 +0100789 tail = (struct frame_tail __user *)regs->ARM_fp - 1;
Jamie Iles1b8873a2010-02-02 20:25:44 +0100790
Sonny Rao860ad782011-04-18 22:12:59 +0100791 while ((entry->nr < PERF_MAX_STACK_DEPTH) &&
792 tail && !((unsigned long)tail & 0x3))
Jamie Iles1b8873a2010-02-02 20:25:44 +0100793 tail = user_backtrace(tail, entry);
794}
795
796/*
797 * Gets called by walk_stackframe() for every stackframe. This will be called
798 * whist unwinding the stackframe and is like a subroutine return so we use
799 * the PC.
800 */
801static int
802callchain_trace(struct stackframe *fr,
803 void *data)
804{
805 struct perf_callchain_entry *entry = data;
Frederic Weisbecker70791ce2010-06-29 19:34:05 +0200806 perf_callchain_store(entry, fr->pc);
Jamie Iles1b8873a2010-02-02 20:25:44 +0100807 return 0;
808}
809
Frederic Weisbecker56962b42010-06-30 23:03:51 +0200810void
811perf_callchain_kernel(struct perf_callchain_entry *entry, struct pt_regs *regs)
Jamie Iles1b8873a2010-02-02 20:25:44 +0100812{
813 struct stackframe fr;
814
Jamie Iles1b8873a2010-02-02 20:25:44 +0100815 fr.fp = regs->ARM_fp;
816 fr.sp = regs->ARM_sp;
817 fr.lr = regs->ARM_lr;
818 fr.pc = regs->ARM_pc;
819 walk_stackframe(&fr, callchain_trace, entry);
820}