blob: 5b99903c42b5adc1eb9caa8f878a925ea48fad62 [file] [log] [blame]
Jamie Iles1b8873a2010-02-02 20:25:44 +01001#undef DEBUG
2
3/*
4 * ARM performance counter support.
5 *
6 * Copyright (C) 2009 picoChip Designs, Ltd., Jamie Iles
Will Deacon43eab872010-11-13 19:04:32 +00007 * Copyright (C) 2010 ARM Ltd., Will Deacon <will.deacon@arm.com>
Jean PIHET796d1292010-01-26 18:51:05 +01008 *
Jamie Iles1b8873a2010-02-02 20:25:44 +01009 * This code is based on the sparc64 perf event code, which is in turn based
10 * on the x86 code. Callchain code is based on the ARM OProfile backtrace
11 * code.
12 */
13#define pr_fmt(fmt) "hw perfevents: " fmt
14
Mark Rutland7325eae2011-08-23 11:59:49 +010015#include <linux/bitmap.h>
Jamie Iles1b8873a2010-02-02 20:25:44 +010016#include <linux/interrupt.h>
17#include <linux/kernel.h>
Paul Gortmakerecea4ab2011-07-22 10:58:34 -040018#include <linux/export.h>
Jamie Iles1b8873a2010-02-02 20:25:44 +010019#include <linux/perf_event.h>
Will Deacon49c006b2010-04-29 17:13:24 +010020#include <linux/platform_device.h>
Jamie Iles1b8873a2010-02-02 20:25:44 +010021#include <linux/spinlock.h>
22#include <linux/uaccess.h>
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -070023#include <linux/irq.h>
Jamie Iles1b8873a2010-02-02 20:25:44 +010024
25#include <asm/cputype.h>
26#include <asm/irq.h>
27#include <asm/irq_regs.h>
28#include <asm/pmu.h>
29#include <asm/stacktrace.h>
30
Ashwin Chaugulef53fe442012-06-07 13:41:37 -040031#include <linux/cpu_pm.h>
32
Jamie Iles1b8873a2010-02-02 20:25:44 +010033/*
Will Deaconecf5a892011-07-19 22:43:28 +010034 * ARMv6 supports a maximum of 3 events, starting from index 0. If we add
Jamie Iles1b8873a2010-02-02 20:25:44 +010035 * another platform that supports more, we need to increase this to be the
36 * largest of all platforms.
Jean PIHET796d1292010-01-26 18:51:05 +010037 *
38 * ARMv7 supports up to 32 events:
39 * cycle counter CCNT + 31 events counters CNT0..30.
40 * Cortex-A8 has 1+4 counters, Cortex-A9 has 1+6 counters.
Jamie Iles1b8873a2010-02-02 20:25:44 +010041 */
Will Deaconecf5a892011-07-19 22:43:28 +010042#define ARMPMU_MAX_HWEVENTS 32
Jamie Iles1b8873a2010-02-02 20:25:44 +010043
Ashwin Chaugule5343d0c2012-09-06 17:49:31 -040044static DEFINE_PER_CPU(u32, from_idle);
Mark Rutland3fc2c832011-06-24 11:30:59 +010045static DEFINE_PER_CPU(struct perf_event * [ARMPMU_MAX_HWEVENTS], hw_events);
46static DEFINE_PER_CPU(unsigned long [BITS_TO_LONGS(ARMPMU_MAX_HWEVENTS)], used_mask);
Mark Rutland8be3f9a2011-05-17 11:20:11 +010047static DEFINE_PER_CPU(struct pmu_hw_events, cpu_hw_events);
Will Deacon181193f2010-04-30 11:32:44 +010048
Mark Rutland8a16b342011-04-28 16:27:54 +010049#define to_arm_pmu(p) (container_of(p, struct arm_pmu, pmu))
50
Jamie Iles1b8873a2010-02-02 20:25:44 +010051/* Set at runtime when we know what CPU type we are. */
Mark Rutland8be3f9a2011-05-17 11:20:11 +010052static struct arm_pmu *cpu_pmu;
Jamie Iles1b8873a2010-02-02 20:25:44 +010053
Will Deacon181193f2010-04-30 11:32:44 +010054enum arm_perf_pmu_ids
55armpmu_get_pmu_id(void)
56{
57 int id = -ENODEV;
58
Mark Rutland8be3f9a2011-05-17 11:20:11 +010059 if (cpu_pmu != NULL)
60 id = cpu_pmu->id;
Will Deacon181193f2010-04-30 11:32:44 +010061
62 return id;
63}
64EXPORT_SYMBOL_GPL(armpmu_get_pmu_id);
65
Will Deaconfeb45d02011-11-14 10:33:05 +000066int perf_num_counters(void)
Will Deacon929f5192010-04-30 11:34:26 +010067{
68 int max_events = 0;
69
Mark Rutland8be3f9a2011-05-17 11:20:11 +010070 if (cpu_pmu != NULL)
71 max_events = cpu_pmu->num_events;
Will Deacon929f5192010-04-30 11:34:26 +010072
73 return max_events;
74}
Matt Fleming3bf101b2010-09-27 20:22:24 +010075EXPORT_SYMBOL_GPL(perf_num_counters);
76
Jamie Iles1b8873a2010-02-02 20:25:44 +010077#define HW_OP_UNSUPPORTED 0xFFFF
78
79#define C(_x) \
80 PERF_COUNT_HW_CACHE_##_x
81
82#define CACHE_OP_UNSUPPORTED 0xFFFF
83
Jamie Iles1b8873a2010-02-02 20:25:44 +010084static int
Steve Mucklef132c6c2012-06-06 18:30:57 -070085armpmu_map_cache_event(unsigned (*cache_map)
Mark Rutlande1f431b2011-04-28 15:47:10 +010086 [PERF_COUNT_HW_CACHE_MAX]
87 [PERF_COUNT_HW_CACHE_OP_MAX]
88 [PERF_COUNT_HW_CACHE_RESULT_MAX],
89 u64 config)
Jamie Iles1b8873a2010-02-02 20:25:44 +010090{
91 unsigned int cache_type, cache_op, cache_result, ret;
92
93 cache_type = (config >> 0) & 0xff;
94 if (cache_type >= PERF_COUNT_HW_CACHE_MAX)
95 return -EINVAL;
96
97 cache_op = (config >> 8) & 0xff;
98 if (cache_op >= PERF_COUNT_HW_CACHE_OP_MAX)
99 return -EINVAL;
100
101 cache_result = (config >> 16) & 0xff;
102 if (cache_result >= PERF_COUNT_HW_CACHE_RESULT_MAX)
103 return -EINVAL;
104
Mark Rutlande1f431b2011-04-28 15:47:10 +0100105 ret = (int)(*cache_map)[cache_type][cache_op][cache_result];
Jamie Iles1b8873a2010-02-02 20:25:44 +0100106
107 if (ret == CACHE_OP_UNSUPPORTED)
108 return -ENOENT;
109
110 return ret;
111}
112
113static int
Mark Rutlande1f431b2011-04-28 15:47:10 +0100114armpmu_map_event(const unsigned (*event_map)[PERF_COUNT_HW_MAX], u64 config)
Will Deacon84fee972010-11-13 17:13:56 +0000115{
Stephen Boydb5bebaf2013-08-08 18:41:59 +0100116 int mapping;
117
118 if (config >= PERF_COUNT_HW_MAX)
119 return -EINVAL;
120
121 mapping = (*event_map)[config];
Mark Rutlande1f431b2011-04-28 15:47:10 +0100122 return mapping == HW_OP_UNSUPPORTED ? -ENOENT : mapping;
Will Deacon84fee972010-11-13 17:13:56 +0000123}
124
125static int
Mark Rutlande1f431b2011-04-28 15:47:10 +0100126armpmu_map_raw_event(u32 raw_event_mask, u64 config)
Will Deacon84fee972010-11-13 17:13:56 +0000127{
Mark Rutlande1f431b2011-04-28 15:47:10 +0100128 return (int)(config & raw_event_mask);
129}
130
131static int map_cpu_event(struct perf_event *event,
132 const unsigned (*event_map)[PERF_COUNT_HW_MAX],
Steve Mucklef132c6c2012-06-06 18:30:57 -0700133 unsigned (*cache_map)
Mark Rutlande1f431b2011-04-28 15:47:10 +0100134 [PERF_COUNT_HW_CACHE_MAX]
135 [PERF_COUNT_HW_CACHE_OP_MAX]
136 [PERF_COUNT_HW_CACHE_RESULT_MAX],
137 u32 raw_event_mask)
138{
139 u64 config = event->attr.config;
140
141 switch (event->attr.type) {
142 case PERF_TYPE_HARDWARE:
143 return armpmu_map_event(event_map, config);
144 case PERF_TYPE_HW_CACHE:
145 return armpmu_map_cache_event(cache_map, config);
146 case PERF_TYPE_RAW:
147 return armpmu_map_raw_event(raw_event_mask, config);
148 }
149
150 return -ENOENT;
Will Deacon84fee972010-11-13 17:13:56 +0000151}
152
Mark Rutland0ce47082011-05-19 10:07:57 +0100153int
Jamie Iles1b8873a2010-02-02 20:25:44 +0100154armpmu_event_set_period(struct perf_event *event,
155 struct hw_perf_event *hwc,
156 int idx)
157{
Mark Rutland8a16b342011-04-28 16:27:54 +0100158 struct arm_pmu *armpmu = to_arm_pmu(event->pmu);
Peter Zijlstrae7850592010-05-21 14:43:08 +0200159 s64 left = local64_read(&hwc->period_left);
Jamie Iles1b8873a2010-02-02 20:25:44 +0100160 s64 period = hwc->sample_period;
161 int ret = 0;
162
163 if (unlikely(left <= -period)) {
164 left = period;
Peter Zijlstrae7850592010-05-21 14:43:08 +0200165 local64_set(&hwc->period_left, left);
Jamie Iles1b8873a2010-02-02 20:25:44 +0100166 hwc->last_period = period;
167 ret = 1;
168 }
169
170 if (unlikely(left <= 0)) {
171 left += period;
Peter Zijlstrae7850592010-05-21 14:43:08 +0200172 local64_set(&hwc->period_left, left);
Jamie Iles1b8873a2010-02-02 20:25:44 +0100173 hwc->last_period = period;
174 ret = 1;
175 }
176
177 if (left > (s64)armpmu->max_period)
178 left = armpmu->max_period;
179
Peter Zijlstrae7850592010-05-21 14:43:08 +0200180 local64_set(&hwc->prev_count, (u64)-left);
Jamie Iles1b8873a2010-02-02 20:25:44 +0100181
182 armpmu->write_counter(idx, (u64)(-left) & 0xffffffff);
183
184 perf_event_update_userpage(event);
185
186 return ret;
187}
188
Mark Rutland0ce47082011-05-19 10:07:57 +0100189u64
Jamie Iles1b8873a2010-02-02 20:25:44 +0100190armpmu_event_update(struct perf_event *event,
191 struct hw_perf_event *hwc,
Will Deacon57273472012-03-06 17:33:17 +0100192 int idx)
Jamie Iles1b8873a2010-02-02 20:25:44 +0100193{
Mark Rutland8a16b342011-04-28 16:27:54 +0100194 struct arm_pmu *armpmu = to_arm_pmu(event->pmu);
Will Deacona7378232011-03-25 17:12:37 +0100195 u64 delta, prev_raw_count, new_raw_count;
Jamie Iles1b8873a2010-02-02 20:25:44 +0100196
197again:
Peter Zijlstrae7850592010-05-21 14:43:08 +0200198 prev_raw_count = local64_read(&hwc->prev_count);
Jamie Iles1b8873a2010-02-02 20:25:44 +0100199 new_raw_count = armpmu->read_counter(idx);
200
Peter Zijlstrae7850592010-05-21 14:43:08 +0200201 if (local64_cmpxchg(&hwc->prev_count, prev_raw_count,
Jamie Iles1b8873a2010-02-02 20:25:44 +0100202 new_raw_count) != prev_raw_count)
203 goto again;
204
Will Deacon57273472012-03-06 17:33:17 +0100205 delta = (new_raw_count - prev_raw_count) & armpmu->max_period;
Jamie Iles1b8873a2010-02-02 20:25:44 +0100206
Peter Zijlstrae7850592010-05-21 14:43:08 +0200207 local64_add(delta, &event->count);
208 local64_sub(delta, &hwc->period_left);
Jamie Iles1b8873a2010-02-02 20:25:44 +0100209
210 return new_raw_count;
211}
212
213static void
Jamie Iles1b8873a2010-02-02 20:25:44 +0100214armpmu_read(struct perf_event *event)
215{
216 struct hw_perf_event *hwc = &event->hw;
217
218 /* Don't read disabled counters! */
219 if (hwc->idx < 0)
220 return;
221
Will Deacon57273472012-03-06 17:33:17 +0100222 armpmu_event_update(event, hwc, hwc->idx);
Jamie Iles1b8873a2010-02-02 20:25:44 +0100223}
224
225static void
Peter Zijlstraa4eaf7f2010-06-16 14:37:10 +0200226armpmu_stop(struct perf_event *event, int flags)
Jamie Iles1b8873a2010-02-02 20:25:44 +0100227{
Mark Rutland8a16b342011-04-28 16:27:54 +0100228 struct arm_pmu *armpmu = to_arm_pmu(event->pmu);
Jamie Iles1b8873a2010-02-02 20:25:44 +0100229 struct hw_perf_event *hwc = &event->hw;
230
Peter Zijlstraa4eaf7f2010-06-16 14:37:10 +0200231 /*
232 * ARM pmu always has to update the counter, so ignore
233 * PERF_EF_UPDATE, see comments in armpmu_start().
234 */
235 if (!(hwc->state & PERF_HES_STOPPED)) {
236 armpmu->disable(hwc, hwc->idx);
237 barrier(); /* why? */
Will Deacon57273472012-03-06 17:33:17 +0100238 armpmu_event_update(event, hwc, hwc->idx);
Peter Zijlstraa4eaf7f2010-06-16 14:37:10 +0200239 hwc->state |= PERF_HES_STOPPED | PERF_HES_UPTODATE;
240 }
241}
242
243static void
244armpmu_start(struct perf_event *event, int flags)
245{
Mark Rutland8a16b342011-04-28 16:27:54 +0100246 struct arm_pmu *armpmu = to_arm_pmu(event->pmu);
Peter Zijlstraa4eaf7f2010-06-16 14:37:10 +0200247 struct hw_perf_event *hwc = &event->hw;
248
Peter Zijlstraa4eaf7f2010-06-16 14:37:10 +0200249 /*
250 * ARM pmu always has to reprogram the period, so ignore
251 * PERF_EF_RELOAD, see the comment below.
252 */
253 if (flags & PERF_EF_RELOAD)
254 WARN_ON_ONCE(!(hwc->state & PERF_HES_UPTODATE));
255
256 hwc->state = 0;
Jamie Iles1b8873a2010-02-02 20:25:44 +0100257 /*
258 * Set the period again. Some counters can't be stopped, so when we
Peter Zijlstraa4eaf7f2010-06-16 14:37:10 +0200259 * were stopped we simply disabled the IRQ source and the counter
Jamie Iles1b8873a2010-02-02 20:25:44 +0100260 * may have been left counting. If we don't do this step then we may
261 * get an interrupt too soon or *way* too late if the overflow has
262 * happened since disabling.
263 */
264 armpmu_event_set_period(event, hwc, hwc->idx);
Steve Mucklef132c6c2012-06-06 18:30:57 -0700265 armpmu->enable(hwc, hwc->idx, event->cpu);
Jamie Iles1b8873a2010-02-02 20:25:44 +0100266}
267
Peter Zijlstraa4eaf7f2010-06-16 14:37:10 +0200268static void
269armpmu_del(struct perf_event *event, int flags)
270{
Mark Rutland8a16b342011-04-28 16:27:54 +0100271 struct arm_pmu *armpmu = to_arm_pmu(event->pmu);
Mark Rutland8be3f9a2011-05-17 11:20:11 +0100272 struct pmu_hw_events *hw_events = armpmu->get_hw_events();
Peter Zijlstraa4eaf7f2010-06-16 14:37:10 +0200273 struct hw_perf_event *hwc = &event->hw;
274 int idx = hwc->idx;
275
276 WARN_ON(idx < 0);
277
Peter Zijlstraa4eaf7f2010-06-16 14:37:10 +0200278 armpmu_stop(event, PERF_EF_UPDATE);
Mark Rutland8be3f9a2011-05-17 11:20:11 +0100279 hw_events->events[idx] = NULL;
280 clear_bit(idx, hw_events->used_mask);
Peter Zijlstraa4eaf7f2010-06-16 14:37:10 +0200281
Ashwin Chaugule66a8a862012-06-13 14:58:04 -0400282 /* Clear event constraints. */
283 if (armpmu->clear_event_constraints)
284 armpmu->clear_event_constraints(event);
285
Peter Zijlstraa4eaf7f2010-06-16 14:37:10 +0200286 perf_event_update_userpage(event);
287}
288
Jamie Iles1b8873a2010-02-02 20:25:44 +0100289static int
Peter Zijlstraa4eaf7f2010-06-16 14:37:10 +0200290armpmu_add(struct perf_event *event, int flags)
Jamie Iles1b8873a2010-02-02 20:25:44 +0100291{
Mark Rutland8a16b342011-04-28 16:27:54 +0100292 struct arm_pmu *armpmu = to_arm_pmu(event->pmu);
Mark Rutland8be3f9a2011-05-17 11:20:11 +0100293 struct pmu_hw_events *hw_events = armpmu->get_hw_events();
Jamie Iles1b8873a2010-02-02 20:25:44 +0100294 struct hw_perf_event *hwc = &event->hw;
295 int idx;
296 int err = 0;
297
Peter Zijlstra33696fc2010-06-14 08:49:00 +0200298 perf_pmu_disable(event->pmu);
Ashwin Chaugule66a8a862012-06-13 14:58:04 -0400299 /*
300 * Tests if event is constrained. If not sets it so that next
301 * collision can be detected.
302 */
303 if (armpmu->test_set_event_constraints)
304 if (armpmu->test_set_event_constraints(event) < 0) {
305 pr_err("Event: %llx failed constraint check.\n",
306 event->attr.config);
307 event->state = PERF_EVENT_STATE_OFF;
308 goto out;
309 }
Peter Zijlstra24cd7f52010-06-11 17:32:03 +0200310
Jamie Iles1b8873a2010-02-02 20:25:44 +0100311 /* If we don't have a space for the counter then finish early. */
Mark Rutland8be3f9a2011-05-17 11:20:11 +0100312 idx = armpmu->get_event_idx(hw_events, hwc);
Jamie Iles1b8873a2010-02-02 20:25:44 +0100313 if (idx < 0) {
314 err = idx;
315 goto out;
316 }
317
318 /*
319 * If there is an event in the counter we are going to use then make
320 * sure it is disabled.
321 */
322 event->hw.idx = idx;
323 armpmu->disable(hwc, idx);
Mark Rutland8be3f9a2011-05-17 11:20:11 +0100324 hw_events->events[idx] = event;
Jamie Iles1b8873a2010-02-02 20:25:44 +0100325
Peter Zijlstraa4eaf7f2010-06-16 14:37:10 +0200326 hwc->state = PERF_HES_STOPPED | PERF_HES_UPTODATE;
327 if (flags & PERF_EF_START)
328 armpmu_start(event, PERF_EF_RELOAD);
Jamie Iles1b8873a2010-02-02 20:25:44 +0100329
330 /* Propagate our changes to the userspace mapping. */
331 perf_event_update_userpage(event);
332
333out:
Peter Zijlstra33696fc2010-06-14 08:49:00 +0200334 perf_pmu_enable(event->pmu);
Jamie Iles1b8873a2010-02-02 20:25:44 +0100335 return err;
336}
337
Jamie Iles1b8873a2010-02-02 20:25:44 +0100338static int
Mark Rutland8be3f9a2011-05-17 11:20:11 +0100339validate_event(struct pmu_hw_events *hw_events,
Jamie Iles1b8873a2010-02-02 20:25:44 +0100340 struct perf_event *event)
341{
Mark Rutland8a16b342011-04-28 16:27:54 +0100342 struct arm_pmu *armpmu = to_arm_pmu(event->pmu);
Jamie Iles1b8873a2010-02-02 20:25:44 +0100343 struct hw_perf_event fake_event = event->hw;
Mark Rutland7b9f72c2011-04-27 16:22:21 +0100344 struct pmu *leader_pmu = event->group_leader->pmu;
Jamie Iles1b8873a2010-02-02 20:25:44 +0100345
Will Deaconbaab9422013-08-07 23:39:41 +0100346 if (is_software_event(event))
347 return 1;
348
Mark Rutland7b9f72c2011-04-27 16:22:21 +0100349 if (event->pmu != leader_pmu || event->state <= PERF_EVENT_STATE_OFF)
Will Deacon65b47112010-09-02 09:32:08 +0100350 return 1;
Jamie Iles1b8873a2010-02-02 20:25:44 +0100351
Mark Rutland8be3f9a2011-05-17 11:20:11 +0100352 return armpmu->get_event_idx(hw_events, &fake_event) >= 0;
Jamie Iles1b8873a2010-02-02 20:25:44 +0100353}
354
355static int
356validate_group(struct perf_event *event)
357{
358 struct perf_event *sibling, *leader = event->group_leader;
Mark Rutland8be3f9a2011-05-17 11:20:11 +0100359 struct pmu_hw_events fake_pmu;
Will Deaconbce34d12011-11-17 15:05:14 +0000360 DECLARE_BITMAP(fake_used_mask, ARMPMU_MAX_HWEVENTS);
Jamie Iles1b8873a2010-02-02 20:25:44 +0100361
Will Deaconbce34d12011-11-17 15:05:14 +0000362 /*
363 * Initialise the fake PMU. We only need to populate the
364 * used_mask for the purposes of validation.
365 */
366 memset(fake_used_mask, 0, sizeof(fake_used_mask));
367 fake_pmu.used_mask = fake_used_mask;
Jamie Iles1b8873a2010-02-02 20:25:44 +0100368
369 if (!validate_event(&fake_pmu, leader))
Peter Zijlstraaa2bc1a2011-11-09 17:56:37 +0100370 return -EINVAL;
Jamie Iles1b8873a2010-02-02 20:25:44 +0100371
372 list_for_each_entry(sibling, &leader->sibling_list, group_entry) {
373 if (!validate_event(&fake_pmu, sibling))
Peter Zijlstraaa2bc1a2011-11-09 17:56:37 +0100374 return -EINVAL;
Jamie Iles1b8873a2010-02-02 20:25:44 +0100375 }
376
377 if (!validate_event(&fake_pmu, event))
Peter Zijlstraaa2bc1a2011-11-09 17:56:37 +0100378 return -EINVAL;
Jamie Iles1b8873a2010-02-02 20:25:44 +0100379
380 return 0;
381}
382
Rabin Vincent0e25a5c2011-02-08 09:24:36 +0530383static irqreturn_t armpmu_platform_irq(int irq, void *dev)
384{
Mark Rutland8a16b342011-04-28 16:27:54 +0100385 struct arm_pmu *armpmu = (struct arm_pmu *) dev;
Mark Rutlanda9356a02011-05-04 09:23:15 +0100386 struct platform_device *plat_device = armpmu->plat_device;
387 struct arm_pmu_platdata *plat = dev_get_platdata(&plat_device->dev);
Rabin Vincent0e25a5c2011-02-08 09:24:36 +0530388
389 return plat->handle_irq(irq, dev, armpmu->handle_irq);
390}
391
Steve Mucklef132c6c2012-06-06 18:30:57 -0700392int
Ashwin Chaugule4afdedc2012-01-17 13:23:50 -0500393armpmu_generic_request_irq(int irq, irq_handler_t *handle_irq)
394{
Steve Mucklef132c6c2012-06-06 18:30:57 -0700395 return request_irq(irq, *handle_irq,
396 IRQF_DISABLED | IRQF_NOBALANCING,
397 "armpmu", NULL);
398}
399
400void
401armpmu_generic_free_irq(int irq)
402{
403 if (irq >= 0)
404 free_irq(irq, NULL);
Ashwin Chaugule4afdedc2012-01-17 13:23:50 -0500405}
406
Will Deacon0b390e22011-07-27 15:18:59 +0100407static void
Mark Rutland8a16b342011-04-28 16:27:54 +0100408armpmu_release_hardware(struct arm_pmu *armpmu)
Will Deacon0b390e22011-07-27 15:18:59 +0100409{
410 int i, irq, irqs;
Mark Rutlanda9356a02011-05-04 09:23:15 +0100411 struct platform_device *pmu_device = armpmu->plat_device;
Ashwin Chaugulef53fe442012-06-07 13:41:37 -0400412
Will Deacon0b390e22011-07-27 15:18:59 +0100413 irqs = min(pmu_device->num_resources, num_possible_cpus());
414
415 for (i = 0; i < irqs; ++i) {
416 if (!cpumask_test_and_clear_cpu(i, &armpmu->active_irqs))
417 continue;
418 irq = platform_get_irq(pmu_device, i);
Steve Mucklef132c6c2012-06-06 18:30:57 -0700419 armpmu->free_pmu_irq(irq);
Will Deacon0b390e22011-07-27 15:18:59 +0100420 }
421
Mark Rutland7ae18a52011-06-06 10:37:50 +0100422 release_pmu(armpmu->type);
Will Deacon0b390e22011-07-27 15:18:59 +0100423}
424
Jamie Iles1b8873a2010-02-02 20:25:44 +0100425static int
Mark Rutland8a16b342011-04-28 16:27:54 +0100426armpmu_reserve_hardware(struct arm_pmu *armpmu)
Jamie Iles1b8873a2010-02-02 20:25:44 +0100427{
Rabin Vincent0e25a5c2011-02-08 09:24:36 +0530428 struct arm_pmu_platdata *plat;
429 irq_handler_t handle_irq;
Will Deaconb0e89592011-07-26 22:10:28 +0100430 int i, err, irq, irqs;
Mark Rutlanda9356a02011-05-04 09:23:15 +0100431 struct platform_device *pmu_device = armpmu->plat_device;
Jamie Iles1b8873a2010-02-02 20:25:44 +0100432
Will Deacone5a21322011-11-22 18:01:46 +0000433 if (!pmu_device)
434 return -ENODEV;
435
Mark Rutland7ae18a52011-06-06 10:37:50 +0100436 err = reserve_pmu(armpmu->type);
Will Deaconb0e89592011-07-26 22:10:28 +0100437 if (err) {
Jamie Iles1b8873a2010-02-02 20:25:44 +0100438 pr_warning("unable to reserve pmu\n");
Will Deaconb0e89592011-07-26 22:10:28 +0100439 return err;
Jamie Iles1b8873a2010-02-02 20:25:44 +0100440 }
441
Rabin Vincent0e25a5c2011-02-08 09:24:36 +0530442 plat = dev_get_platdata(&pmu_device->dev);
443 if (plat && plat->handle_irq)
444 handle_irq = armpmu_platform_irq;
445 else
446 handle_irq = armpmu->handle_irq;
447
Ashwin Chaugule6c755b22012-10-29 16:30:05 -0400448 if (plat && plat->request_pmu_irq)
449 armpmu->request_pmu_irq = plat->request_pmu_irq;
Ashwin Chaugule1f7e7ba2012-11-27 14:49:58 -0500450 else if (!armpmu->request_pmu_irq)
Ashwin Chaugule6c755b22012-10-29 16:30:05 -0400451 armpmu->request_pmu_irq = armpmu_generic_request_irq;
452
453 if (plat && plat->free_pmu_irq)
454 armpmu->free_pmu_irq = plat->free_pmu_irq;
Ashwin Chaugule2f71d2b2012-12-06 09:56:15 -0500455 else if (!armpmu->free_pmu_irq)
Ashwin Chaugule6c755b22012-10-29 16:30:05 -0400456 armpmu->free_pmu_irq = armpmu_generic_free_irq;
457
Will Deacon0b390e22011-07-27 15:18:59 +0100458 irqs = min(pmu_device->num_resources, num_possible_cpus());
Will Deaconb0e89592011-07-26 22:10:28 +0100459 if (irqs < 1) {
Jamie Iles1b8873a2010-02-02 20:25:44 +0100460 pr_err("no irqs for PMUs defined\n");
461 return -ENODEV;
462 }
463
Will Deaconb0e89592011-07-26 22:10:28 +0100464 for (i = 0; i < irqs; ++i) {
Will Deacon0b390e22011-07-27 15:18:59 +0100465 err = 0;
Will Deacon49c006b2010-04-29 17:13:24 +0100466 irq = platform_get_irq(pmu_device, i);
467 if (irq < 0)
468 continue;
469
Will Deaconb0e89592011-07-26 22:10:28 +0100470 /*
471 * If we have a single PMU interrupt that we can't shift,
472 * assume that we're running on a uniprocessor machine and
Will Deacon0b390e22011-07-27 15:18:59 +0100473 * continue. Otherwise, continue without this interrupt.
Will Deaconb0e89592011-07-26 22:10:28 +0100474 */
Will Deacon0b390e22011-07-27 15:18:59 +0100475 if (irq_set_affinity(irq, cpumask_of(i)) && irqs > 1) {
476 pr_warning("unable to set irq affinity (irq=%d, cpu=%u)\n",
477 irq, i);
478 continue;
Will Deaconb0e89592011-07-26 22:10:28 +0100479 }
480
Ashwin Chaugule4afdedc2012-01-17 13:23:50 -0500481 err = armpmu->request_pmu_irq(irq, &handle_irq);
482
Steve Mucklef132c6c2012-06-06 18:30:57 -0700483 if (err) {
484 pr_warning("unable to request IRQ%d for %s perf "
485 "counters\n", irq, armpmu->name);
486
487 armpmu_release_hardware(cpu_pmu);
488 return err;
489 }
490
Will Deacon0b390e22011-07-27 15:18:59 +0100491 cpumask_set_cpu(i, &armpmu->active_irqs);
Jamie Iles1b8873a2010-02-02 20:25:44 +0100492 }
493
Will Deacon0b390e22011-07-27 15:18:59 +0100494 return 0;
Jamie Iles1b8873a2010-02-02 20:25:44 +0100495}
496
Jamie Iles1b8873a2010-02-02 20:25:44 +0100497static void
498hw_perf_event_destroy(struct perf_event *event)
499{
Mark Rutland8a16b342011-04-28 16:27:54 +0100500 struct arm_pmu *armpmu = to_arm_pmu(event->pmu);
Mark Rutland03b78982011-04-27 11:20:11 +0100501 atomic_t *active_events = &armpmu->active_events;
502 struct mutex *pmu_reserve_mutex = &armpmu->reserve_mutex;
503
504 if (atomic_dec_and_mutex_lock(active_events, pmu_reserve_mutex)) {
Mark Rutland8a16b342011-04-28 16:27:54 +0100505 armpmu_release_hardware(armpmu);
Mark Rutland03b78982011-04-27 11:20:11 +0100506 mutex_unlock(pmu_reserve_mutex);
Jamie Iles1b8873a2010-02-02 20:25:44 +0100507 }
508}
509
510static int
Will Deacon05d22fd2011-07-19 11:57:30 +0100511event_requires_mode_exclusion(struct perf_event_attr *attr)
512{
513 return attr->exclude_idle || attr->exclude_user ||
514 attr->exclude_kernel || attr->exclude_hv;
515}
516
517static int
Jamie Iles1b8873a2010-02-02 20:25:44 +0100518__hw_perf_event_init(struct perf_event *event)
519{
Mark Rutland8a16b342011-04-28 16:27:54 +0100520 struct arm_pmu *armpmu = to_arm_pmu(event->pmu);
Jamie Iles1b8873a2010-02-02 20:25:44 +0100521 struct hw_perf_event *hwc = &event->hw;
522 int mapping, err;
523
Mark Rutlande1f431b2011-04-28 15:47:10 +0100524 mapping = armpmu->map_event(event);
Jamie Iles1b8873a2010-02-02 20:25:44 +0100525
526 if (mapping < 0) {
527 pr_debug("event %x:%llx not supported\n", event->attr.type,
528 event->attr.config);
529 return mapping;
530 }
531
532 /*
Will Deacon05d22fd2011-07-19 11:57:30 +0100533 * We don't assign an index until we actually place the event onto
534 * hardware. Use -1 to signify that we haven't decided where to put it
535 * yet. For SMP systems, each core has it's own PMU so we can't do any
536 * clever allocation or constraints checking at this point.
Jamie Iles1b8873a2010-02-02 20:25:44 +0100537 */
Will Deacon05d22fd2011-07-19 11:57:30 +0100538 hwc->idx = -1;
539 hwc->config_base = 0;
540 hwc->config = 0;
541 hwc->event_base = 0;
542
543 /*
544 * Check whether we need to exclude the counter from certain modes.
545 */
546 if ((!armpmu->set_event_filter ||
547 armpmu->set_event_filter(hwc, &event->attr)) &&
548 event_requires_mode_exclusion(&event->attr)) {
Jamie Iles1b8873a2010-02-02 20:25:44 +0100549 pr_debug("ARM performance counters do not support "
550 "mode exclusion\n");
551 return -EPERM;
552 }
553
Ashwin Chaugule66a8a862012-06-13 14:58:04 -0400554
Jamie Iles1b8873a2010-02-02 20:25:44 +0100555 /*
Will Deacon05d22fd2011-07-19 11:57:30 +0100556 * Store the event encoding into the config_base field.
Jamie Iles1b8873a2010-02-02 20:25:44 +0100557 */
Will Deacon05d22fd2011-07-19 11:57:30 +0100558 hwc->config_base |= (unsigned long)mapping;
Jamie Iles1b8873a2010-02-02 20:25:44 +0100559
560 if (!hwc->sample_period) {
Will Deacon57273472012-03-06 17:33:17 +0100561 /*
562 * For non-sampling runs, limit the sample_period to half
563 * of the counter width. That way, the new counter value
564 * is far less likely to overtake the previous one unless
565 * you have some serious IRQ latency issues.
566 */
567 hwc->sample_period = armpmu->max_period >> 1;
Jamie Iles1b8873a2010-02-02 20:25:44 +0100568 hwc->last_period = hwc->sample_period;
Peter Zijlstrae7850592010-05-21 14:43:08 +0200569 local64_set(&hwc->period_left, hwc->sample_period);
Jamie Iles1b8873a2010-02-02 20:25:44 +0100570 }
571
572 err = 0;
573 if (event->group_leader != event) {
574 err = validate_group(event);
575 if (err)
576 return -EINVAL;
577 }
578
579 return err;
580}
581
Peter Zijlstrab0a873e2010-06-11 13:35:08 +0200582static int armpmu_event_init(struct perf_event *event)
Jamie Iles1b8873a2010-02-02 20:25:44 +0100583{
Mark Rutland8a16b342011-04-28 16:27:54 +0100584 struct arm_pmu *armpmu = to_arm_pmu(event->pmu);
Jamie Iles1b8873a2010-02-02 20:25:44 +0100585 int err = 0;
Mark Rutland03b78982011-04-27 11:20:11 +0100586 atomic_t *active_events = &armpmu->active_events;
Jamie Iles1b8873a2010-02-02 20:25:44 +0100587
Stephane Eranian2481c5f2012-02-09 23:20:59 +0100588 /* does not support taken branch sampling */
589 if (has_branch_stack(event))
590 return -EOPNOTSUPP;
591
Mark Rutlande1f431b2011-04-28 15:47:10 +0100592 if (armpmu->map_event(event) == -ENOENT)
Peter Zijlstrab0a873e2010-06-11 13:35:08 +0200593 return -ENOENT;
Peter Zijlstrab0a873e2010-06-11 13:35:08 +0200594
Jamie Iles1b8873a2010-02-02 20:25:44 +0100595 event->destroy = hw_perf_event_destroy;
596
Mark Rutland03b78982011-04-27 11:20:11 +0100597 if (!atomic_inc_not_zero(active_events)) {
598 mutex_lock(&armpmu->reserve_mutex);
599 if (atomic_read(active_events) == 0)
Mark Rutland8a16b342011-04-28 16:27:54 +0100600 err = armpmu_reserve_hardware(armpmu);
Jamie Iles1b8873a2010-02-02 20:25:44 +0100601
602 if (!err)
Mark Rutland03b78982011-04-27 11:20:11 +0100603 atomic_inc(active_events);
604 mutex_unlock(&armpmu->reserve_mutex);
Jamie Iles1b8873a2010-02-02 20:25:44 +0100605 }
606
607 if (err)
Peter Zijlstrab0a873e2010-06-11 13:35:08 +0200608 return err;
Jamie Iles1b8873a2010-02-02 20:25:44 +0100609
610 err = __hw_perf_event_init(event);
611 if (err)
612 hw_perf_event_destroy(event);
613
Peter Zijlstrab0a873e2010-06-11 13:35:08 +0200614 return err;
Jamie Iles1b8873a2010-02-02 20:25:44 +0100615}
616
Peter Zijlstraa4eaf7f2010-06-16 14:37:10 +0200617static void armpmu_enable(struct pmu *pmu)
Jamie Iles1b8873a2010-02-02 20:25:44 +0100618{
Mark Rutland8be3f9a2011-05-17 11:20:11 +0100619 struct arm_pmu *armpmu = to_arm_pmu(pmu);
Mark Rutland8be3f9a2011-05-17 11:20:11 +0100620 struct pmu_hw_events *hw_events = armpmu->get_hw_events();
Mark Rutland7325eae2011-08-23 11:59:49 +0100621 int enabled = bitmap_weight(hw_events->used_mask, armpmu->num_events);
Ashwin Chauguleb2c31d42012-08-03 16:30:08 -0400622 int idx;
623
Ashwin Chaugule5343d0c2012-09-06 17:49:31 -0400624 if (__get_cpu_var(from_idle)) {
Ashwin Chauguleb2c31d42012-08-03 16:30:08 -0400625 for (idx = 0; idx <= cpu_pmu->num_events; ++idx) {
626 struct perf_event *event = hw_events->events[idx];
627
628 if (!event)
629 continue;
630
631 armpmu->enable(&event->hw, idx, event->cpu);
632 }
633
634 /* Reset bit so we don't needlessly re-enable counters.*/
Ashwin Chaugule5343d0c2012-09-06 17:49:31 -0400635 __get_cpu_var(from_idle) = 0;
Ashwin Chauguleb2c31d42012-08-03 16:30:08 -0400636 }
Jamie Iles1b8873a2010-02-02 20:25:44 +0100637
Ashwin Chaugule5343d0c2012-09-06 17:49:31 -0400638 /* So we don't start the PMU before enabling counters after idle. */
639 barrier();
640
Will Deaconf4f38432011-07-01 14:38:12 +0100641 if (enabled)
642 armpmu->start();
Jamie Iles1b8873a2010-02-02 20:25:44 +0100643}
644
Peter Zijlstraa4eaf7f2010-06-16 14:37:10 +0200645static void armpmu_disable(struct pmu *pmu)
Jamie Iles1b8873a2010-02-02 20:25:44 +0100646{
Mark Rutland8a16b342011-04-28 16:27:54 +0100647 struct arm_pmu *armpmu = to_arm_pmu(pmu);
Mark Rutland48957152011-04-27 10:31:51 +0100648 armpmu->stop();
Jamie Iles1b8873a2010-02-02 20:25:44 +0100649}
650
Ashwin Chaugule4a81cb82012-06-07 13:40:54 -0400651static void armpmu_init(struct arm_pmu *armpmu)
Mark Rutland03b78982011-04-27 11:20:11 +0100652{
653 atomic_set(&armpmu->active_events, 0);
654 mutex_init(&armpmu->reserve_mutex);
Mark Rutland8a16b342011-04-28 16:27:54 +0100655
Ashwin Chaugule795bf7b2012-06-20 16:23:08 -0400656 armpmu->pmu.pmu_enable = armpmu_enable;
657 armpmu->pmu.pmu_disable = armpmu_disable;
658 armpmu->pmu.event_init = armpmu_event_init;
659 armpmu->pmu.add = armpmu_add;
660 armpmu->pmu.del = armpmu_del;
661 armpmu->pmu.start = armpmu_start;
662 armpmu->pmu.stop = armpmu_stop;
663 armpmu->pmu.read = armpmu_read;
Mark Rutland8a16b342011-04-28 16:27:54 +0100664}
665
Ashwin Chaugule4a81cb82012-06-07 13:40:54 -0400666int armpmu_register(struct arm_pmu *armpmu, char *name, int type)
Mark Rutland8a16b342011-04-28 16:27:54 +0100667{
668 armpmu_init(armpmu);
669 return perf_pmu_register(&armpmu->pmu, name, type);
Mark Rutland03b78982011-04-27 11:20:11 +0100670}
671
Will Deacon43eab872010-11-13 19:04:32 +0000672/* Include the PMU-specific implementations. */
673#include "perf_event_xscale.c"
674#include "perf_event_v6.c"
675#include "perf_event_v7.c"
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700676#include "perf_event_msm_krait.c"
Ashwin Chaugule7cd836e2012-06-11 16:26:47 -0400677#include "perf_event_msm.c"
Will Deacon49e6a322010-04-30 11:33:33 +0100678
Will Deacon574b69c2011-03-25 13:13:34 +0100679/*
680 * Ensure the PMU has sane values out of reset.
681 * This requires SMP to be available, so exists as a separate initcall.
682 */
683static int __init
Mark Rutland8be3f9a2011-05-17 11:20:11 +0100684cpu_pmu_reset(void)
Will Deacon574b69c2011-03-25 13:13:34 +0100685{
Mark Rutland8be3f9a2011-05-17 11:20:11 +0100686 if (cpu_pmu && cpu_pmu->reset)
687 return on_each_cpu(cpu_pmu->reset, NULL, 1);
Will Deacon574b69c2011-03-25 13:13:34 +0100688 return 0;
689}
Mark Rutland8be3f9a2011-05-17 11:20:11 +0100690arch_initcall(cpu_pmu_reset);
Will Deacon574b69c2011-03-25 13:13:34 +0100691
Will Deaconb0e89592011-07-26 22:10:28 +0100692/*
693 * PMU platform driver and devicetree bindings.
694 */
695static struct of_device_id armpmu_of_device_ids[] = {
696 {.compatible = "arm,cortex-a9-pmu"},
697 {.compatible = "arm,cortex-a8-pmu"},
698 {.compatible = "arm,arm1136-pmu"},
699 {.compatible = "arm,arm1176-pmu"},
700 {},
701};
702
703static struct platform_device_id armpmu_plat_device_ids[] = {
Steve Mucklef132c6c2012-06-06 18:30:57 -0700704 {.name = "cpu-arm-pmu"},
Will Deaconb0e89592011-07-26 22:10:28 +0100705 {},
706};
707
708static int __devinit armpmu_device_probe(struct platform_device *pdev)
709{
Will Deacon6bd05402011-12-02 18:16:01 +0100710 if (!cpu_pmu)
711 return -ENODEV;
712
Mark Rutland8be3f9a2011-05-17 11:20:11 +0100713 cpu_pmu->plat_device = pdev;
Will Deaconb0e89592011-07-26 22:10:28 +0100714 return 0;
715}
716
717static struct platform_driver armpmu_driver = {
718 .driver = {
Steve Mucklef132c6c2012-06-06 18:30:57 -0700719 .name = "cpu-arm-pmu",
Will Deaconb0e89592011-07-26 22:10:28 +0100720 .of_match_table = armpmu_of_device_ids,
721 },
722 .probe = armpmu_device_probe,
723 .id_table = armpmu_plat_device_ids,
724};
725
726static int __init register_pmu_driver(void)
727{
728 return platform_driver_register(&armpmu_driver);
729}
730device_initcall(register_pmu_driver);
731
Mark Rutland8be3f9a2011-05-17 11:20:11 +0100732static struct pmu_hw_events *armpmu_get_cpu_events(void)
Mark Rutland92f701e2011-05-04 09:23:51 +0100733{
734 return &__get_cpu_var(cpu_hw_events);
735}
736
737static void __init cpu_pmu_init(struct arm_pmu *armpmu)
738{
Mark Rutland0f78d2d2011-04-28 10:17:04 +0100739 int cpu;
740 for_each_possible_cpu(cpu) {
Mark Rutland8be3f9a2011-05-17 11:20:11 +0100741 struct pmu_hw_events *events = &per_cpu(cpu_hw_events, cpu);
Mark Rutland3fc2c832011-06-24 11:30:59 +0100742 events->events = per_cpu(hw_events, cpu);
743 events->used_mask = per_cpu(used_mask, cpu);
Mark Rutland0f78d2d2011-04-28 10:17:04 +0100744 raw_spin_lock_init(&events->pmu_lock);
745 }
Mark Rutland92f701e2011-05-04 09:23:51 +0100746 armpmu->get_hw_events = armpmu_get_cpu_events;
Mark Rutland7ae18a52011-06-06 10:37:50 +0100747 armpmu->type = ARM_PMU_DEVICE_CPU;
Mark Rutland92f701e2011-05-04 09:23:51 +0100748}
749
Ashwin Chaugule4bacd562013-02-20 19:12:56 -0500750static int cpu_has_active_perf(int cpu)
Ashwin Chaugule4cdf85a2013-01-16 11:22:08 -0500751{
752 struct pmu_hw_events *hw_events;
753 int enabled;
754
755 if (!cpu_pmu)
756 return 0;
Ashwin Chaugule4bacd562013-02-20 19:12:56 -0500757 hw_events = &per_cpu(cpu_hw_events, cpu);
Ashwin Chaugule4cdf85a2013-01-16 11:22:08 -0500758 enabled = bitmap_weight(hw_events->used_mask, cpu_pmu->num_events);
759
760 if (enabled)
761 /*Even one event's existence is good enough.*/
762 return 1;
763
764 return 0;
765}
766
767void enable_irq_callback(void *info)
768{
769 int irq = *(unsigned int *)info;
770 enable_percpu_irq(irq, IRQ_TYPE_EDGE_RISING);
771}
772
773void disable_irq_callback(void *info)
774{
775 int irq = *(unsigned int *)info;
776 disable_percpu_irq(irq);
777}
778
Will Deaconb0e89592011-07-26 22:10:28 +0100779/*
Lorenzo Pieralisia0feb6d2012-03-06 17:37:45 +0100780 * PMU hardware loses all context when a CPU goes offline.
781 * When a CPU is hotplugged back in, since some hardware registers are
782 * UNKNOWN at reset, the PMU must be explicitly reset to avoid reading
783 * junk values out of them.
784 */
785static int __cpuinit pmu_cpu_notify(struct notifier_block *b,
786 unsigned long action, void *hcpu)
787{
Ashwin Chaugule4cdf85a2013-01-16 11:22:08 -0500788 int irq;
789
Ashwin Chaugule4bacd562013-02-20 19:12:56 -0500790 if (cpu_has_active_perf((int)hcpu)) {
Ashwin Chaugule4cdf85a2013-01-16 11:22:08 -0500791 switch ((action & ~CPU_TASKS_FROZEN)) {
792
793 case CPU_DOWN_PREPARE:
794 /*
795 * If this is on a multicore CPU, we need
796 * to disarm the PMU IRQ before disappearing.
797 */
798 if (cpu_pmu &&
799 cpu_pmu->plat_device->dev.platform_data) {
Neil Leeder05cfbed2013-02-19 16:10:10 -0500800 irq = platform_get_irq(cpu_pmu->plat_device, 0);
Ashwin Chaugule4cdf85a2013-01-16 11:22:08 -0500801 smp_call_function_single((int)hcpu,
802 disable_irq_callback, &irq, 1);
803 }
804 return NOTIFY_DONE;
805
806 case CPU_UP_PREPARE:
807 /*
808 * If this is on a multicore CPU, we need
809 * to arm the PMU IRQ before appearing.
810 */
811 if (cpu_pmu &&
812 cpu_pmu->plat_device->dev.platform_data) {
Neil Leeder05cfbed2013-02-19 16:10:10 -0500813 irq = platform_get_irq(cpu_pmu->plat_device, 0);
Ashwin Chaugule4cdf85a2013-01-16 11:22:08 -0500814 smp_call_function_single((int)hcpu,
815 enable_irq_callback, &irq, 1);
816 }
817 return NOTIFY_DONE;
818
819 case CPU_STARTING:
820 if (cpu_pmu && cpu_pmu->reset) {
821 cpu_pmu->reset(NULL);
822 return NOTIFY_OK;
823 }
824 default:
825 return NOTIFY_DONE;
826 }
827 }
828
Lorenzo Pieralisia0feb6d2012-03-06 17:37:45 +0100829 if ((action & ~CPU_TASKS_FROZEN) != CPU_STARTING)
830 return NOTIFY_DONE;
831
Lorenzo Pieralisia0feb6d2012-03-06 17:37:45 +0100832 return NOTIFY_OK;
833}
834
Ashwin Chaugulef53fe442012-06-07 13:41:37 -0400835static void armpmu_update_counters(void)
836{
837 struct pmu_hw_events *hw_events;
838 int idx;
839
840 if (!cpu_pmu)
841 return;
842
843 hw_events = cpu_pmu->get_hw_events();
844
845 for (idx = 0; idx <= cpu_pmu->num_events; ++idx) {
846 struct perf_event *event = hw_events->events[idx];
847
848 if (!event)
849 continue;
850
851 armpmu_read(event);
852 }
853}
854
Lorenzo Pieralisia0feb6d2012-03-06 17:37:45 +0100855static struct notifier_block __cpuinitdata pmu_cpu_notifier = {
856 .notifier_call = pmu_cpu_notify,
857};
858
Ashwin Chaugulef53fe442012-06-07 13:41:37 -0400859/*TODO: Unify with pending patch from ARM */
860static int perf_cpu_pm_notifier(struct notifier_block *self, unsigned long cmd,
861 void *v)
862{
Sheetal Sahasrabudhed0080dd2013-06-18 15:42:41 -0400863 struct pmu *pmu;
Ashwin Chaugulef53fe442012-06-07 13:41:37 -0400864 switch (cmd) {
865 case CPU_PM_ENTER:
Ashwin Chaugule4bacd562013-02-20 19:12:56 -0500866 if (cpu_has_active_perf((int)v)) {
Ashwin Chaugulef53fe442012-06-07 13:41:37 -0400867 armpmu_update_counters();
Sheetal Sahasrabudhed0080dd2013-06-18 15:42:41 -0400868 pmu = &cpu_pmu->pmu;
869 pmu->pmu_disable(pmu);
Ashwin Chaugulef53fe442012-06-07 13:41:37 -0400870 }
871 break;
872
873 case CPU_PM_ENTER_FAILED:
874 case CPU_PM_EXIT:
Ashwin Chaugule4bacd562013-02-20 19:12:56 -0500875 if (cpu_has_active_perf((int)v) && cpu_pmu->reset) {
Ashwin Chauguleb2c31d42012-08-03 16:30:08 -0400876 /*
877 * Flip this bit so armpmu_enable knows it needs
878 * to re-enable active counters.
879 */
Ashwin Chaugule5343d0c2012-09-06 17:49:31 -0400880 __get_cpu_var(from_idle) = 1;
Ashwin Chaugulef53fe442012-06-07 13:41:37 -0400881 cpu_pmu->reset(NULL);
Sheetal Sahasrabudhed0080dd2013-06-18 15:42:41 -0400882 pmu = &cpu_pmu->pmu;
883 pmu->pmu_enable(pmu);
Ashwin Chaugulef53fe442012-06-07 13:41:37 -0400884 }
885 break;
886 }
887
888 return NOTIFY_OK;
889}
890
891static struct notifier_block perf_cpu_pm_notifier_block = {
892 .notifier_call = perf_cpu_pm_notifier,
893};
894
Lorenzo Pieralisia0feb6d2012-03-06 17:37:45 +0100895/*
Will Deaconb0e89592011-07-26 22:10:28 +0100896 * CPU PMU identification and registration.
897 */
Jamie Iles1b8873a2010-02-02 20:25:44 +0100898static int __init
899init_hw_perf_events(void)
900{
901 unsigned long cpuid = read_cpuid_id();
902 unsigned long implementor = (cpuid & 0xFF000000) >> 24;
903 unsigned long part_number = (cpuid & 0xFFF0);
904
Will Deacon49e6a322010-04-30 11:33:33 +0100905 /* ARM Ltd CPUs. */
Jamie Iles1b8873a2010-02-02 20:25:44 +0100906 if (0x41 == implementor) {
907 switch (part_number) {
908 case 0xB360: /* ARM1136 */
909 case 0xB560: /* ARM1156 */
910 case 0xB760: /* ARM1176 */
Mark Rutland8be3f9a2011-05-17 11:20:11 +0100911 cpu_pmu = armv6pmu_init();
Jamie Iles1b8873a2010-02-02 20:25:44 +0100912 break;
913 case 0xB020: /* ARM11mpcore */
Mark Rutland8be3f9a2011-05-17 11:20:11 +0100914 cpu_pmu = armv6mpcore_pmu_init();
Jamie Iles1b8873a2010-02-02 20:25:44 +0100915 break;
Jean PIHET796d1292010-01-26 18:51:05 +0100916 case 0xC080: /* Cortex-A8 */
Mark Rutland8be3f9a2011-05-17 11:20:11 +0100917 cpu_pmu = armv7_a8_pmu_init();
Jean PIHET796d1292010-01-26 18:51:05 +0100918 break;
919 case 0xC090: /* Cortex-A9 */
Mark Rutland8be3f9a2011-05-17 11:20:11 +0100920 cpu_pmu = armv7_a9_pmu_init();
Jean PIHET796d1292010-01-26 18:51:05 +0100921 break;
Will Deacon0c205cb2011-06-03 17:40:15 +0100922 case 0xC050: /* Cortex-A5 */
Mark Rutland8be3f9a2011-05-17 11:20:11 +0100923 cpu_pmu = armv7_a5_pmu_init();
Will Deacon0c205cb2011-06-03 17:40:15 +0100924 break;
Will Deacon14abd032011-01-19 14:24:38 +0000925 case 0xC0F0: /* Cortex-A15 */
Mark Rutland8be3f9a2011-05-17 11:20:11 +0100926 cpu_pmu = armv7_a15_pmu_init();
Will Deacon14abd032011-01-19 14:24:38 +0000927 break;
Will Deacond33c88c2012-02-03 14:46:01 +0100928 case 0xC070: /* Cortex-A7 */
929 cpu_pmu = armv7_a7_pmu_init();
930 break;
Will Deacon49e6a322010-04-30 11:33:33 +0100931 }
932 /* Intel CPUs [xscale]. */
933 } else if (0x69 == implementor) {
934 part_number = (cpuid >> 13) & 0x7;
935 switch (part_number) {
936 case 1:
Mark Rutland8be3f9a2011-05-17 11:20:11 +0100937 cpu_pmu = xscale1pmu_init();
Will Deacon49e6a322010-04-30 11:33:33 +0100938 break;
939 case 2:
Mark Rutland8be3f9a2011-05-17 11:20:11 +0100940 cpu_pmu = xscale2pmu_init();
Will Deacon49e6a322010-04-30 11:33:33 +0100941 break;
Jamie Iles1b8873a2010-02-02 20:25:44 +0100942 }
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700943 /* Qualcomm CPUs */
944 } else if (0x51 == implementor) {
945 switch (part_number) {
946 case 0x00F0: /* 8x50 & 7x30*/
Ashwin Chaugule7cd836e2012-06-11 16:26:47 -0400947 cpu_pmu = armv7_scorpion_pmu_init();
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700948 break;
949 case 0x02D0: /* 8x60 */
Steve Mucklef132c6c2012-06-06 18:30:57 -0700950// fabricmon_pmu_init();
Ashwin Chaugule7cd836e2012-06-11 16:26:47 -0400951 cpu_pmu = armv7_scorpionmp_pmu_init();
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700952 break;
953 case 0x0490: /* 8960 sim */
954 case 0x04D0: /* 8960 */
Neil Leedered415112012-02-09 13:34:09 -0500955 case 0x06F0: /* 8064 */
Steve Mucklef132c6c2012-06-06 18:30:57 -0700956// fabricmon_pmu_init();
957 cpu_pmu = armv7_krait_pmu_init();
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700958 break;
959 }
Jamie Iles1b8873a2010-02-02 20:25:44 +0100960 }
961
Steve Mucklef132c6c2012-06-06 18:30:57 -0700962
Mark Rutland8be3f9a2011-05-17 11:20:11 +0100963 if (cpu_pmu) {
Jean PIHET796d1292010-01-26 18:51:05 +0100964 pr_info("enabled with %s PMU driver, %d counters available\n",
Mark Rutland8be3f9a2011-05-17 11:20:11 +0100965 cpu_pmu->name, cpu_pmu->num_events);
966 cpu_pmu_init(cpu_pmu);
Lorenzo Pieralisia0feb6d2012-03-06 17:37:45 +0100967 register_cpu_notifier(&pmu_cpu_notifier);
Mark Rutland8be3f9a2011-05-17 11:20:11 +0100968 armpmu_register(cpu_pmu, "cpu", PERF_TYPE_RAW);
Ashwin Chaugulef53fe442012-06-07 13:41:37 -0400969 cpu_pm_register_notifier(&perf_cpu_pm_notifier_block);
Will Deacon49e6a322010-04-30 11:33:33 +0100970 } else {
971 pr_info("no hardware support available\n");
Will Deacon49e6a322010-04-30 11:33:33 +0100972 }
Jamie Iles1b8873a2010-02-02 20:25:44 +0100973
974 return 0;
975}
Peter Zijlstra004417a2010-11-25 18:38:29 +0100976early_initcall(init_hw_perf_events);
Jamie Iles1b8873a2010-02-02 20:25:44 +0100977
978/*
979 * Callchain handling code.
980 */
Jamie Iles1b8873a2010-02-02 20:25:44 +0100981
982/*
983 * The registers we're interested in are at the end of the variable
984 * length saved register structure. The fp points at the end of this
985 * structure so the address of this struct is:
986 * (struct frame_tail *)(xxx->fp)-1
987 *
988 * This code has been adapted from the ARM OProfile support.
989 */
990struct frame_tail {
Will Deacon4d6b7a72010-11-30 18:15:53 +0100991 struct frame_tail __user *fp;
992 unsigned long sp;
993 unsigned long lr;
Jamie Iles1b8873a2010-02-02 20:25:44 +0100994} __attribute__((packed));
995
996/*
997 * Get the return address for a single stackframe and return a pointer to the
998 * next frame tail.
999 */
Will Deacon4d6b7a72010-11-30 18:15:53 +01001000static struct frame_tail __user *
1001user_backtrace(struct frame_tail __user *tail,
Jamie Iles1b8873a2010-02-02 20:25:44 +01001002 struct perf_callchain_entry *entry)
1003{
1004 struct frame_tail buftail;
1005
1006 /* Also check accessibility of one struct frame_tail beyond */
1007 if (!access_ok(VERIFY_READ, tail, sizeof(buftail)))
1008 return NULL;
1009 if (__copy_from_user_inatomic(&buftail, tail, sizeof(buftail)))
1010 return NULL;
1011
Frederic Weisbecker70791ce2010-06-29 19:34:05 +02001012 perf_callchain_store(entry, buftail.lr);
Jamie Iles1b8873a2010-02-02 20:25:44 +01001013
1014 /*
1015 * Frame pointers should strictly progress back up the stack
1016 * (towards higher addresses).
1017 */
Rabin Vincentcb061992011-02-09 11:35:12 +01001018 if (tail + 1 >= buftail.fp)
Jamie Iles1b8873a2010-02-02 20:25:44 +01001019 return NULL;
1020
1021 return buftail.fp - 1;
1022}
1023
Frederic Weisbecker56962b42010-06-30 23:03:51 +02001024void
1025perf_callchain_user(struct perf_callchain_entry *entry, struct pt_regs *regs)
Jamie Iles1b8873a2010-02-02 20:25:44 +01001026{
Will Deacon4d6b7a72010-11-30 18:15:53 +01001027 struct frame_tail __user *tail;
Jamie Iles1b8873a2010-02-02 20:25:44 +01001028
Jamie Iles1b8873a2010-02-02 20:25:44 +01001029
Will Deacon4d6b7a72010-11-30 18:15:53 +01001030 tail = (struct frame_tail __user *)regs->ARM_fp - 1;
Jamie Iles1b8873a2010-02-02 20:25:44 +01001031
Sonny Rao860ad782011-04-18 22:12:59 +01001032 while ((entry->nr < PERF_MAX_STACK_DEPTH) &&
1033 tail && !((unsigned long)tail & 0x3))
Jamie Iles1b8873a2010-02-02 20:25:44 +01001034 tail = user_backtrace(tail, entry);
1035}
1036
1037/*
1038 * Gets called by walk_stackframe() for every stackframe. This will be called
1039 * whist unwinding the stackframe and is like a subroutine return so we use
1040 * the PC.
1041 */
1042static int
1043callchain_trace(struct stackframe *fr,
1044 void *data)
1045{
1046 struct perf_callchain_entry *entry = data;
Frederic Weisbecker70791ce2010-06-29 19:34:05 +02001047 perf_callchain_store(entry, fr->pc);
Jamie Iles1b8873a2010-02-02 20:25:44 +01001048 return 0;
1049}
1050
Frederic Weisbecker56962b42010-06-30 23:03:51 +02001051void
1052perf_callchain_kernel(struct perf_callchain_entry *entry, struct pt_regs *regs)
Jamie Iles1b8873a2010-02-02 20:25:44 +01001053{
1054 struct stackframe fr;
1055
Jamie Iles1b8873a2010-02-02 20:25:44 +01001056 fr.fp = regs->ARM_fp;
1057 fr.sp = regs->ARM_sp;
1058 fr.lr = regs->ARM_lr;
1059 fr.pc = regs->ARM_pc;
1060 walk_stackframe(&fr, callchain_trace, entry);
1061}