blob: c3ad63775ff98d8f28e46503dd62a52af1c95b14 [file] [log] [blame]
Ingo Molnarcdd6c482009-09-21 12:02:48 +02001/* Performance event support for sparc64.
David S. Miller59abbd12009-09-10 06:28:20 -07002 *
David S. Miller4f6dbe42010-01-19 00:26:13 -08003 * Copyright (C) 2009, 2010 David S. Miller <davem@davemloft.net>
David S. Miller59abbd12009-09-10 06:28:20 -07004 *
Ingo Molnarcdd6c482009-09-21 12:02:48 +02005 * This code is based almost entirely upon the x86 perf event
David S. Miller59abbd12009-09-10 06:28:20 -07006 * code, which is:
7 *
8 * Copyright (C) 2008 Thomas Gleixner <tglx@linutronix.de>
9 * Copyright (C) 2008-2009 Red Hat, Inc., Ingo Molnar
10 * Copyright (C) 2009 Jaswinder Singh Rajput
11 * Copyright (C) 2009 Advanced Micro Devices, Inc., Robert Richter
12 * Copyright (C) 2008-2009 Red Hat, Inc., Peter Zijlstra <pzijlstr@redhat.com>
13 */
14
Ingo Molnarcdd6c482009-09-21 12:02:48 +020015#include <linux/perf_event.h>
David S. Miller59abbd12009-09-10 06:28:20 -070016#include <linux/kprobes.h>
David S. Miller667f0ce2010-04-21 03:08:11 -070017#include <linux/ftrace.h>
David S. Miller59abbd12009-09-10 06:28:20 -070018#include <linux/kernel.h>
19#include <linux/kdebug.h>
20#include <linux/mutex.h>
21
David S. Miller4f6dbe42010-01-19 00:26:13 -080022#include <asm/stacktrace.h>
David S. Miller59abbd12009-09-10 06:28:20 -070023#include <asm/cpudata.h>
David S. Miller4f6dbe42010-01-19 00:26:13 -080024#include <asm/uaccess.h>
Arun Sharma600634972011-07-26 16:09:06 -070025#include <linux/atomic.h>
David S. Miller59abbd12009-09-10 06:28:20 -070026#include <asm/nmi.h>
27#include <asm/pcr.h>
David Howellsd550bbd2012-03-28 18:30:03 +010028#include <asm/cacheflush.h>
David S. Miller59abbd12009-09-10 06:28:20 -070029
Sam Ravnborgcb1b8202011-04-21 15:45:45 -070030#include "kernel.h"
David S. Miller4f6dbe42010-01-19 00:26:13 -080031#include "kstack.h"
32
David S. Miller59abbd12009-09-10 06:28:20 -070033/* Sparc64 chips have two performance counters, 32-bits each, with
34 * overflow interrupts generated on transition from 0xffffffff to 0.
35 * The counters are accessed in one go using a 64-bit register.
36 *
37 * Both counters are controlled using a single control register. The
38 * only way to stop all sampling is to clear all of the context (user,
39 * supervisor, hypervisor) sampling enable bits. But these bits apply
40 * to both counters, thus the two counters can't be enabled/disabled
41 * individually.
42 *
43 * The control register has two event fields, one for each of the two
44 * counters. It's thus nearly impossible to have one counter going
45 * while keeping the other one stopped. Therefore it is possible to
46 * get overflow interrupts for counters not currently "in use" and
47 * that condition must be checked in the overflow interrupt handler.
48 *
49 * So we use a hack, in that we program inactive counters with the
50 * "sw_count0" and "sw_count1" events. These count how many times
51 * the instruction "sethi %hi(0xfc000), %g0" is executed. It's an
52 * unusual way to encode a NOP and therefore will not trigger in
53 * normal code.
54 */
55
Ingo Molnarcdd6c482009-09-21 12:02:48 +020056#define MAX_HWEVENTS 2
David S. Miller3f1a2092012-08-17 02:51:21 -070057#define MAX_PCRS 1
David S. Miller59abbd12009-09-10 06:28:20 -070058#define MAX_PERIOD ((1UL << 32) - 1)
59
60#define PIC_UPPER_INDEX 0
61#define PIC_LOWER_INDEX 1
David S. Millere7bef6b2010-01-20 02:59:47 -080062#define PIC_NO_INDEX -1
David S. Miller59abbd12009-09-10 06:28:20 -070063
Ingo Molnarcdd6c482009-09-21 12:02:48 +020064struct cpu_hw_events {
David S. Millere7bef6b2010-01-20 02:59:47 -080065 /* Number of events currently scheduled onto this cpu.
66 * This tells how many entries in the arrays below
67 * are valid.
68 */
69 int n_events;
70
71 /* Number of new events added since the last hw_perf_disable().
72 * This works because the perf event layer always adds new
73 * events inside of a perf_{disable,enable}() sequence.
74 */
75 int n_added;
76
77 /* Array of events current scheduled on this cpu. */
78 struct perf_event *event[MAX_HWEVENTS];
79
80 /* Array of encoded longs, specifying the %pcr register
81 * encoding and the mask of PIC counters this even can
82 * be scheduled on. See perf_event_encode() et al.
83 */
84 unsigned long events[MAX_HWEVENTS];
85
86 /* The current counter index assigned to an event. When the
87 * event hasn't been programmed into the cpu yet, this will
88 * hold PIC_NO_INDEX. The event->hw.idx value tells us where
89 * we ought to schedule the event.
90 */
91 int current_idx[MAX_HWEVENTS];
92
David S. Miller3f1a2092012-08-17 02:51:21 -070093 /* Software copy of %pcr register(s) on this cpu. */
94 u64 pcr[MAX_HWEVENTS];
David S. Millere7bef6b2010-01-20 02:59:47 -080095
96 /* Enabled/disable state. */
David S. Millerd1751382009-09-29 21:27:06 -070097 int enabled;
Lin Minga13c3af2010-04-23 13:56:33 +080098
99 unsigned int group_flag;
David S. Miller59abbd12009-09-10 06:28:20 -0700100};
Ingo Molnarcdd6c482009-09-21 12:02:48 +0200101DEFINE_PER_CPU(struct cpu_hw_events, cpu_hw_events) = { .enabled = 1, };
David S. Miller59abbd12009-09-10 06:28:20 -0700102
David S. Millere7bef6b2010-01-20 02:59:47 -0800103/* An event map describes the characteristics of a performance
104 * counter event. In particular it gives the encoding as well as
105 * a mask telling which counters the event can be measured on.
106 */
David S. Miller59abbd12009-09-10 06:28:20 -0700107struct perf_event_map {
108 u16 encoding;
109 u8 pic_mask;
110#define PIC_NONE 0x00
111#define PIC_UPPER 0x01
112#define PIC_LOWER 0x02
113};
114
David S. Millere7bef6b2010-01-20 02:59:47 -0800115/* Encode a perf_event_map entry into a long. */
David S. Millera72a8a52009-09-28 17:35:20 -0700116static unsigned long perf_event_encode(const struct perf_event_map *pmap)
117{
118 return ((unsigned long) pmap->encoding << 16) | pmap->pic_mask;
119}
120
David S. Millere7bef6b2010-01-20 02:59:47 -0800121static u8 perf_event_get_msk(unsigned long val)
David S. Millera72a8a52009-09-28 17:35:20 -0700122{
David S. Millere7bef6b2010-01-20 02:59:47 -0800123 return val & 0xff;
124}
125
126static u64 perf_event_get_enc(unsigned long val)
127{
128 return val >> 16;
David S. Millera72a8a52009-09-28 17:35:20 -0700129}
130
David S. Miller2ce4da22009-09-26 20:42:10 -0700131#define C(x) PERF_COUNT_HW_CACHE_##x
132
133#define CACHE_OP_UNSUPPORTED 0xfffe
134#define CACHE_OP_NONSENSE 0xffff
135
136typedef struct perf_event_map cache_map_t
137 [PERF_COUNT_HW_CACHE_MAX]
138 [PERF_COUNT_HW_CACHE_OP_MAX]
139 [PERF_COUNT_HW_CACHE_RESULT_MAX];
140
David S. Miller59abbd12009-09-10 06:28:20 -0700141struct sparc_pmu {
142 const struct perf_event_map *(*event_map)(int);
David S. Miller2ce4da22009-09-26 20:42:10 -0700143 const cache_map_t *cache_map;
David S. Miller59abbd12009-09-10 06:28:20 -0700144 int max_events;
David S. Miller53443032012-08-17 02:37:06 -0700145 u32 (*read_pmc)(int);
146 void (*write_pmc)(int, u64);
David S. Miller59abbd12009-09-10 06:28:20 -0700147 int upper_shift;
148 int lower_shift;
149 int event_mask;
David S. Miller7ac2ed22012-08-17 02:41:32 -0700150 int user_bit;
151 int priv_bit;
David S. Miller91b92862009-09-10 07:09:06 -0700152 int hv_bit;
David S. Miller496c07e2009-09-10 07:10:59 -0700153 int irq_bit;
David S. Miller660d1372009-09-10 07:13:26 -0700154 int upper_nop;
155 int lower_nop;
David S. Millerb38e99f2012-08-17 02:31:10 -0700156 unsigned int flags;
157#define SPARC_PMU_ALL_EXCLUDES_SAME 0x00000001
158#define SPARC_PMU_HAS_CONFLICTS 0x00000002
David S. Miller59660492012-08-17 02:33:44 -0700159 int max_hw_events;
David S. Miller3f1a2092012-08-17 02:51:21 -0700160 int num_pcrs;
161 int num_pic_regs;
David S. Miller59abbd12009-09-10 06:28:20 -0700162};
163
David S. Miller53443032012-08-17 02:37:06 -0700164static u32 sparc_default_read_pmc(int idx)
165{
166 u64 val;
167
168 val = pcr_ops->read_pic(0);
169 if (idx == PIC_UPPER_INDEX)
170 val >>= 32;
171
172 return val & 0xffffffff;
173}
174
175static void sparc_default_write_pmc(int idx, u64 val)
176{
177 u64 shift, mask, pic;
178
179 shift = 0;
180 if (idx == PIC_UPPER_INDEX)
181 shift = 32;
182
183 mask = ((u64) 0xffffffff) << shift;
184 val <<= shift;
185
186 pic = pcr_ops->read_pic(0);
187 pic &= ~mask;
188 pic |= val;
189 pcr_ops->write_pic(0, pic);
190}
191
David S. Miller28e8f9b2009-09-26 20:54:22 -0700192static const struct perf_event_map ultra3_perfmon_event_map[] = {
David S. Miller59abbd12009-09-10 06:28:20 -0700193 [PERF_COUNT_HW_CPU_CYCLES] = { 0x0000, PIC_UPPER | PIC_LOWER },
194 [PERF_COUNT_HW_INSTRUCTIONS] = { 0x0001, PIC_UPPER | PIC_LOWER },
195 [PERF_COUNT_HW_CACHE_REFERENCES] = { 0x0009, PIC_LOWER },
196 [PERF_COUNT_HW_CACHE_MISSES] = { 0x0009, PIC_UPPER },
197};
198
David S. Miller28e8f9b2009-09-26 20:54:22 -0700199static const struct perf_event_map *ultra3_event_map(int event_id)
David S. Miller59abbd12009-09-10 06:28:20 -0700200{
David S. Miller28e8f9b2009-09-26 20:54:22 -0700201 return &ultra3_perfmon_event_map[event_id];
David S. Miller59abbd12009-09-10 06:28:20 -0700202}
203
David S. Miller28e8f9b2009-09-26 20:54:22 -0700204static const cache_map_t ultra3_cache_map = {
David S. Miller2ce4da22009-09-26 20:42:10 -0700205[C(L1D)] = {
206 [C(OP_READ)] = {
207 [C(RESULT_ACCESS)] = { 0x09, PIC_LOWER, },
208 [C(RESULT_MISS)] = { 0x09, PIC_UPPER, },
209 },
210 [C(OP_WRITE)] = {
211 [C(RESULT_ACCESS)] = { 0x0a, PIC_LOWER },
212 [C(RESULT_MISS)] = { 0x0a, PIC_UPPER },
213 },
214 [C(OP_PREFETCH)] = {
215 [C(RESULT_ACCESS)] = { CACHE_OP_UNSUPPORTED },
216 [C(RESULT_MISS)] = { CACHE_OP_UNSUPPORTED },
217 },
218},
219[C(L1I)] = {
220 [C(OP_READ)] = {
221 [C(RESULT_ACCESS)] = { 0x09, PIC_LOWER, },
222 [C(RESULT_MISS)] = { 0x09, PIC_UPPER, },
223 },
224 [ C(OP_WRITE) ] = {
225 [ C(RESULT_ACCESS) ] = { CACHE_OP_NONSENSE },
226 [ C(RESULT_MISS) ] = { CACHE_OP_NONSENSE },
227 },
228 [ C(OP_PREFETCH) ] = {
229 [ C(RESULT_ACCESS) ] = { CACHE_OP_UNSUPPORTED },
230 [ C(RESULT_MISS) ] = { CACHE_OP_UNSUPPORTED },
231 },
232},
233[C(LL)] = {
234 [C(OP_READ)] = {
235 [C(RESULT_ACCESS)] = { 0x0c, PIC_LOWER, },
236 [C(RESULT_MISS)] = { 0x0c, PIC_UPPER, },
237 },
238 [C(OP_WRITE)] = {
239 [C(RESULT_ACCESS)] = { 0x0c, PIC_LOWER },
240 [C(RESULT_MISS)] = { 0x0c, PIC_UPPER },
241 },
242 [C(OP_PREFETCH)] = {
243 [C(RESULT_ACCESS)] = { CACHE_OP_UNSUPPORTED },
244 [C(RESULT_MISS)] = { CACHE_OP_UNSUPPORTED },
245 },
246},
247[C(DTLB)] = {
248 [C(OP_READ)] = {
249 [C(RESULT_ACCESS)] = { CACHE_OP_UNSUPPORTED },
250 [C(RESULT_MISS)] = { 0x12, PIC_UPPER, },
251 },
252 [ C(OP_WRITE) ] = {
253 [ C(RESULT_ACCESS) ] = { CACHE_OP_UNSUPPORTED },
254 [ C(RESULT_MISS) ] = { CACHE_OP_UNSUPPORTED },
255 },
256 [ C(OP_PREFETCH) ] = {
257 [ C(RESULT_ACCESS) ] = { CACHE_OP_UNSUPPORTED },
258 [ C(RESULT_MISS) ] = { CACHE_OP_UNSUPPORTED },
259 },
260},
261[C(ITLB)] = {
262 [C(OP_READ)] = {
263 [C(RESULT_ACCESS)] = { CACHE_OP_UNSUPPORTED },
264 [C(RESULT_MISS)] = { 0x11, PIC_UPPER, },
265 },
266 [ C(OP_WRITE) ] = {
267 [ C(RESULT_ACCESS) ] = { CACHE_OP_UNSUPPORTED },
268 [ C(RESULT_MISS) ] = { CACHE_OP_UNSUPPORTED },
269 },
270 [ C(OP_PREFETCH) ] = {
271 [ C(RESULT_ACCESS) ] = { CACHE_OP_UNSUPPORTED },
272 [ C(RESULT_MISS) ] = { CACHE_OP_UNSUPPORTED },
273 },
274},
275[C(BPU)] = {
276 [C(OP_READ)] = {
277 [C(RESULT_ACCESS)] = { CACHE_OP_UNSUPPORTED },
278 [C(RESULT_MISS)] = { CACHE_OP_UNSUPPORTED },
279 },
280 [ C(OP_WRITE) ] = {
281 [ C(RESULT_ACCESS) ] = { CACHE_OP_UNSUPPORTED },
282 [ C(RESULT_MISS) ] = { CACHE_OP_UNSUPPORTED },
283 },
284 [ C(OP_PREFETCH) ] = {
285 [ C(RESULT_ACCESS) ] = { CACHE_OP_UNSUPPORTED },
286 [ C(RESULT_MISS) ] = { CACHE_OP_UNSUPPORTED },
287 },
288},
Peter Zijlstra89d6c0b2011-04-22 23:37:06 +0200289[C(NODE)] = {
290 [C(OP_READ)] = {
291 [C(RESULT_ACCESS)] = { CACHE_OP_UNSUPPORTED },
292 [C(RESULT_MISS) ] = { CACHE_OP_UNSUPPORTED },
293 },
294 [ C(OP_WRITE) ] = {
295 [ C(RESULT_ACCESS) ] = { CACHE_OP_UNSUPPORTED },
296 [ C(RESULT_MISS) ] = { CACHE_OP_UNSUPPORTED },
297 },
298 [ C(OP_PREFETCH) ] = {
299 [ C(RESULT_ACCESS) ] = { CACHE_OP_UNSUPPORTED },
300 [ C(RESULT_MISS) ] = { CACHE_OP_UNSUPPORTED },
301 },
302},
David S. Miller2ce4da22009-09-26 20:42:10 -0700303};
304
David S. Miller28e8f9b2009-09-26 20:54:22 -0700305static const struct sparc_pmu ultra3_pmu = {
306 .event_map = ultra3_event_map,
307 .cache_map = &ultra3_cache_map,
308 .max_events = ARRAY_SIZE(ultra3_perfmon_event_map),
David S. Miller53443032012-08-17 02:37:06 -0700309 .read_pmc = sparc_default_read_pmc,
310 .write_pmc = sparc_default_write_pmc,
David S. Miller59abbd12009-09-10 06:28:20 -0700311 .upper_shift = 11,
312 .lower_shift = 4,
313 .event_mask = 0x3f,
David S. Miller7ac2ed22012-08-17 02:41:32 -0700314 .user_bit = PCR_UTRACE,
315 .priv_bit = PCR_STRACE,
David S. Miller660d1372009-09-10 07:13:26 -0700316 .upper_nop = 0x1c,
317 .lower_nop = 0x14,
David S. Millerb38e99f2012-08-17 02:31:10 -0700318 .flags = (SPARC_PMU_ALL_EXCLUDES_SAME |
319 SPARC_PMU_HAS_CONFLICTS),
David S. Miller59660492012-08-17 02:33:44 -0700320 .max_hw_events = 2,
David S. Miller3f1a2092012-08-17 02:51:21 -0700321 .num_pcrs = 1,
322 .num_pic_regs = 1,
David S. Miller59abbd12009-09-10 06:28:20 -0700323};
324
David S. Miller7eebda62009-09-26 21:23:41 -0700325/* Niagara1 is very limited. The upper PIC is hard-locked to count
326 * only instructions, so it is free running which creates all kinds of
David S. Miller6e804252009-09-29 15:10:23 -0700327 * problems. Some hardware designs make one wonder if the creator
David S. Miller7eebda62009-09-26 21:23:41 -0700328 * even looked at how this stuff gets used by software.
329 */
330static const struct perf_event_map niagara1_perfmon_event_map[] = {
331 [PERF_COUNT_HW_CPU_CYCLES] = { 0x00, PIC_UPPER },
332 [PERF_COUNT_HW_INSTRUCTIONS] = { 0x00, PIC_UPPER },
333 [PERF_COUNT_HW_CACHE_REFERENCES] = { 0, PIC_NONE },
334 [PERF_COUNT_HW_CACHE_MISSES] = { 0x03, PIC_LOWER },
335};
336
337static const struct perf_event_map *niagara1_event_map(int event_id)
338{
339 return &niagara1_perfmon_event_map[event_id];
340}
341
342static const cache_map_t niagara1_cache_map = {
343[C(L1D)] = {
344 [C(OP_READ)] = {
345 [C(RESULT_ACCESS)] = { CACHE_OP_UNSUPPORTED },
346 [C(RESULT_MISS)] = { 0x03, PIC_LOWER, },
347 },
348 [C(OP_WRITE)] = {
349 [C(RESULT_ACCESS)] = { CACHE_OP_UNSUPPORTED },
350 [C(RESULT_MISS)] = { 0x03, PIC_LOWER, },
351 },
352 [C(OP_PREFETCH)] = {
353 [C(RESULT_ACCESS)] = { CACHE_OP_UNSUPPORTED },
354 [C(RESULT_MISS)] = { CACHE_OP_UNSUPPORTED },
355 },
356},
357[C(L1I)] = {
358 [C(OP_READ)] = {
359 [C(RESULT_ACCESS)] = { 0x00, PIC_UPPER },
360 [C(RESULT_MISS)] = { 0x02, PIC_LOWER, },
361 },
362 [ C(OP_WRITE) ] = {
363 [ C(RESULT_ACCESS) ] = { CACHE_OP_NONSENSE },
364 [ C(RESULT_MISS) ] = { CACHE_OP_NONSENSE },
365 },
366 [ C(OP_PREFETCH) ] = {
367 [ C(RESULT_ACCESS) ] = { CACHE_OP_UNSUPPORTED },
368 [ C(RESULT_MISS) ] = { CACHE_OP_UNSUPPORTED },
369 },
370},
371[C(LL)] = {
372 [C(OP_READ)] = {
373 [C(RESULT_ACCESS)] = { CACHE_OP_UNSUPPORTED },
374 [C(RESULT_MISS)] = { 0x07, PIC_LOWER, },
375 },
376 [C(OP_WRITE)] = {
377 [C(RESULT_ACCESS)] = { CACHE_OP_UNSUPPORTED },
378 [C(RESULT_MISS)] = { 0x07, PIC_LOWER, },
379 },
380 [C(OP_PREFETCH)] = {
381 [C(RESULT_ACCESS)] = { CACHE_OP_UNSUPPORTED },
382 [C(RESULT_MISS)] = { CACHE_OP_UNSUPPORTED },
383 },
384},
385[C(DTLB)] = {
386 [C(OP_READ)] = {
387 [C(RESULT_ACCESS)] = { CACHE_OP_UNSUPPORTED },
388 [C(RESULT_MISS)] = { 0x05, PIC_LOWER, },
389 },
390 [ C(OP_WRITE) ] = {
391 [ C(RESULT_ACCESS) ] = { CACHE_OP_UNSUPPORTED },
392 [ C(RESULT_MISS) ] = { CACHE_OP_UNSUPPORTED },
393 },
394 [ C(OP_PREFETCH) ] = {
395 [ C(RESULT_ACCESS) ] = { CACHE_OP_UNSUPPORTED },
396 [ C(RESULT_MISS) ] = { CACHE_OP_UNSUPPORTED },
397 },
398},
399[C(ITLB)] = {
400 [C(OP_READ)] = {
401 [C(RESULT_ACCESS)] = { CACHE_OP_UNSUPPORTED },
402 [C(RESULT_MISS)] = { 0x04, PIC_LOWER, },
403 },
404 [ C(OP_WRITE) ] = {
405 [ C(RESULT_ACCESS) ] = { CACHE_OP_UNSUPPORTED },
406 [ C(RESULT_MISS) ] = { CACHE_OP_UNSUPPORTED },
407 },
408 [ C(OP_PREFETCH) ] = {
409 [ C(RESULT_ACCESS) ] = { CACHE_OP_UNSUPPORTED },
410 [ C(RESULT_MISS) ] = { CACHE_OP_UNSUPPORTED },
411 },
412},
413[C(BPU)] = {
414 [C(OP_READ)] = {
415 [C(RESULT_ACCESS)] = { CACHE_OP_UNSUPPORTED },
416 [C(RESULT_MISS)] = { CACHE_OP_UNSUPPORTED },
417 },
418 [ C(OP_WRITE) ] = {
419 [ C(RESULT_ACCESS) ] = { CACHE_OP_UNSUPPORTED },
420 [ C(RESULT_MISS) ] = { CACHE_OP_UNSUPPORTED },
421 },
422 [ C(OP_PREFETCH) ] = {
423 [ C(RESULT_ACCESS) ] = { CACHE_OP_UNSUPPORTED },
424 [ C(RESULT_MISS) ] = { CACHE_OP_UNSUPPORTED },
425 },
426},
Peter Zijlstra89d6c0b2011-04-22 23:37:06 +0200427[C(NODE)] = {
428 [C(OP_READ)] = {
429 [C(RESULT_ACCESS)] = { CACHE_OP_UNSUPPORTED },
430 [C(RESULT_MISS) ] = { CACHE_OP_UNSUPPORTED },
431 },
432 [ C(OP_WRITE) ] = {
433 [ C(RESULT_ACCESS) ] = { CACHE_OP_UNSUPPORTED },
434 [ C(RESULT_MISS) ] = { CACHE_OP_UNSUPPORTED },
435 },
436 [ C(OP_PREFETCH) ] = {
437 [ C(RESULT_ACCESS) ] = { CACHE_OP_UNSUPPORTED },
438 [ C(RESULT_MISS) ] = { CACHE_OP_UNSUPPORTED },
439 },
440},
David S. Miller7eebda62009-09-26 21:23:41 -0700441};
442
443static const struct sparc_pmu niagara1_pmu = {
444 .event_map = niagara1_event_map,
445 .cache_map = &niagara1_cache_map,
446 .max_events = ARRAY_SIZE(niagara1_perfmon_event_map),
David S. Miller53443032012-08-17 02:37:06 -0700447 .read_pmc = sparc_default_read_pmc,
448 .write_pmc = sparc_default_write_pmc,
David S. Miller7eebda62009-09-26 21:23:41 -0700449 .upper_shift = 0,
450 .lower_shift = 4,
451 .event_mask = 0x7,
David S. Miller7ac2ed22012-08-17 02:41:32 -0700452 .user_bit = PCR_UTRACE,
453 .priv_bit = PCR_STRACE,
David S. Miller7eebda62009-09-26 21:23:41 -0700454 .upper_nop = 0x0,
455 .lower_nop = 0x0,
David S. Millerb38e99f2012-08-17 02:31:10 -0700456 .flags = (SPARC_PMU_ALL_EXCLUDES_SAME |
457 SPARC_PMU_HAS_CONFLICTS),
David S. Miller59660492012-08-17 02:33:44 -0700458 .max_hw_events = 2,
David S. Miller3f1a2092012-08-17 02:51:21 -0700459 .num_pcrs = 1,
460 .num_pic_regs = 1,
David S. Miller7eebda62009-09-26 21:23:41 -0700461};
462
David S. Millerb73d8842009-09-10 07:22:18 -0700463static const struct perf_event_map niagara2_perfmon_event_map[] = {
464 [PERF_COUNT_HW_CPU_CYCLES] = { 0x02ff, PIC_UPPER | PIC_LOWER },
465 [PERF_COUNT_HW_INSTRUCTIONS] = { 0x02ff, PIC_UPPER | PIC_LOWER },
466 [PERF_COUNT_HW_CACHE_REFERENCES] = { 0x0208, PIC_UPPER | PIC_LOWER },
467 [PERF_COUNT_HW_CACHE_MISSES] = { 0x0302, PIC_UPPER | PIC_LOWER },
468 [PERF_COUNT_HW_BRANCH_INSTRUCTIONS] = { 0x0201, PIC_UPPER | PIC_LOWER },
469 [PERF_COUNT_HW_BRANCH_MISSES] = { 0x0202, PIC_UPPER | PIC_LOWER },
470};
471
Ingo Molnarcdd6c482009-09-21 12:02:48 +0200472static const struct perf_event_map *niagara2_event_map(int event_id)
David S. Millerb73d8842009-09-10 07:22:18 -0700473{
Ingo Molnarcdd6c482009-09-21 12:02:48 +0200474 return &niagara2_perfmon_event_map[event_id];
David S. Millerb73d8842009-09-10 07:22:18 -0700475}
476
David S. Millerd0b86482009-09-26 21:04:16 -0700477static const cache_map_t niagara2_cache_map = {
478[C(L1D)] = {
479 [C(OP_READ)] = {
480 [C(RESULT_ACCESS)] = { 0x0208, PIC_UPPER | PIC_LOWER, },
481 [C(RESULT_MISS)] = { 0x0302, PIC_UPPER | PIC_LOWER, },
482 },
483 [C(OP_WRITE)] = {
484 [C(RESULT_ACCESS)] = { 0x0210, PIC_UPPER | PIC_LOWER, },
485 [C(RESULT_MISS)] = { 0x0302, PIC_UPPER | PIC_LOWER, },
486 },
487 [C(OP_PREFETCH)] = {
488 [C(RESULT_ACCESS)] = { CACHE_OP_UNSUPPORTED },
489 [C(RESULT_MISS)] = { CACHE_OP_UNSUPPORTED },
490 },
491},
492[C(L1I)] = {
493 [C(OP_READ)] = {
494 [C(RESULT_ACCESS)] = { 0x02ff, PIC_UPPER | PIC_LOWER, },
495 [C(RESULT_MISS)] = { 0x0301, PIC_UPPER | PIC_LOWER, },
496 },
497 [ C(OP_WRITE) ] = {
498 [ C(RESULT_ACCESS) ] = { CACHE_OP_NONSENSE },
499 [ C(RESULT_MISS) ] = { CACHE_OP_NONSENSE },
500 },
501 [ C(OP_PREFETCH) ] = {
502 [ C(RESULT_ACCESS) ] = { CACHE_OP_UNSUPPORTED },
503 [ C(RESULT_MISS) ] = { CACHE_OP_UNSUPPORTED },
504 },
505},
506[C(LL)] = {
507 [C(OP_READ)] = {
508 [C(RESULT_ACCESS)] = { 0x0208, PIC_UPPER | PIC_LOWER, },
509 [C(RESULT_MISS)] = { 0x0330, PIC_UPPER | PIC_LOWER, },
510 },
511 [C(OP_WRITE)] = {
512 [C(RESULT_ACCESS)] = { 0x0210, PIC_UPPER | PIC_LOWER, },
513 [C(RESULT_MISS)] = { 0x0320, PIC_UPPER | PIC_LOWER, },
514 },
515 [C(OP_PREFETCH)] = {
516 [C(RESULT_ACCESS)] = { CACHE_OP_UNSUPPORTED },
517 [C(RESULT_MISS)] = { CACHE_OP_UNSUPPORTED },
518 },
519},
520[C(DTLB)] = {
521 [C(OP_READ)] = {
522 [C(RESULT_ACCESS)] = { CACHE_OP_UNSUPPORTED },
523 [C(RESULT_MISS)] = { 0x0b08, PIC_UPPER | PIC_LOWER, },
524 },
525 [ C(OP_WRITE) ] = {
526 [ C(RESULT_ACCESS) ] = { CACHE_OP_UNSUPPORTED },
527 [ C(RESULT_MISS) ] = { CACHE_OP_UNSUPPORTED },
528 },
529 [ C(OP_PREFETCH) ] = {
530 [ C(RESULT_ACCESS) ] = { CACHE_OP_UNSUPPORTED },
531 [ C(RESULT_MISS) ] = { CACHE_OP_UNSUPPORTED },
532 },
533},
534[C(ITLB)] = {
535 [C(OP_READ)] = {
536 [C(RESULT_ACCESS)] = { CACHE_OP_UNSUPPORTED },
537 [C(RESULT_MISS)] = { 0xb04, PIC_UPPER | PIC_LOWER, },
538 },
539 [ C(OP_WRITE) ] = {
540 [ C(RESULT_ACCESS) ] = { CACHE_OP_UNSUPPORTED },
541 [ C(RESULT_MISS) ] = { CACHE_OP_UNSUPPORTED },
542 },
543 [ C(OP_PREFETCH) ] = {
544 [ C(RESULT_ACCESS) ] = { CACHE_OP_UNSUPPORTED },
545 [ C(RESULT_MISS) ] = { CACHE_OP_UNSUPPORTED },
546 },
547},
548[C(BPU)] = {
549 [C(OP_READ)] = {
550 [C(RESULT_ACCESS)] = { CACHE_OP_UNSUPPORTED },
551 [C(RESULT_MISS)] = { CACHE_OP_UNSUPPORTED },
552 },
553 [ C(OP_WRITE) ] = {
554 [ C(RESULT_ACCESS) ] = { CACHE_OP_UNSUPPORTED },
555 [ C(RESULT_MISS) ] = { CACHE_OP_UNSUPPORTED },
556 },
557 [ C(OP_PREFETCH) ] = {
558 [ C(RESULT_ACCESS) ] = { CACHE_OP_UNSUPPORTED },
559 [ C(RESULT_MISS) ] = { CACHE_OP_UNSUPPORTED },
560 },
561},
Peter Zijlstra89d6c0b2011-04-22 23:37:06 +0200562[C(NODE)] = {
563 [C(OP_READ)] = {
564 [C(RESULT_ACCESS)] = { CACHE_OP_UNSUPPORTED },
565 [C(RESULT_MISS) ] = { CACHE_OP_UNSUPPORTED },
566 },
567 [ C(OP_WRITE) ] = {
568 [ C(RESULT_ACCESS) ] = { CACHE_OP_UNSUPPORTED },
569 [ C(RESULT_MISS) ] = { CACHE_OP_UNSUPPORTED },
570 },
571 [ C(OP_PREFETCH) ] = {
572 [ C(RESULT_ACCESS) ] = { CACHE_OP_UNSUPPORTED },
573 [ C(RESULT_MISS) ] = { CACHE_OP_UNSUPPORTED },
574 },
575},
David S. Millerd0b86482009-09-26 21:04:16 -0700576};
577
David S. Millerb73d8842009-09-10 07:22:18 -0700578static const struct sparc_pmu niagara2_pmu = {
579 .event_map = niagara2_event_map,
David S. Millerd0b86482009-09-26 21:04:16 -0700580 .cache_map = &niagara2_cache_map,
David S. Millerb73d8842009-09-10 07:22:18 -0700581 .max_events = ARRAY_SIZE(niagara2_perfmon_event_map),
David S. Miller53443032012-08-17 02:37:06 -0700582 .read_pmc = sparc_default_read_pmc,
583 .write_pmc = sparc_default_write_pmc,
David S. Millerb73d8842009-09-10 07:22:18 -0700584 .upper_shift = 19,
585 .lower_shift = 6,
586 .event_mask = 0xfff,
David S. Miller7ac2ed22012-08-17 02:41:32 -0700587 .user_bit = PCR_UTRACE,
588 .priv_bit = PCR_STRACE,
589 .hv_bit = PCR_N2_HTRACE,
David S. Millerde23cf32009-10-09 00:42:40 -0700590 .irq_bit = 0x30,
David S. Millerb73d8842009-09-10 07:22:18 -0700591 .upper_nop = 0x220,
592 .lower_nop = 0x220,
David S. Millerb38e99f2012-08-17 02:31:10 -0700593 .flags = (SPARC_PMU_ALL_EXCLUDES_SAME |
594 SPARC_PMU_HAS_CONFLICTS),
David S. Miller59660492012-08-17 02:33:44 -0700595 .max_hw_events = 2,
David S. Miller3f1a2092012-08-17 02:51:21 -0700596 .num_pcrs = 1,
597 .num_pic_regs = 1,
David S. Millerb73d8842009-09-10 07:22:18 -0700598};
599
David S. Miller59abbd12009-09-10 06:28:20 -0700600static const struct sparc_pmu *sparc_pmu __read_mostly;
601
Ingo Molnarcdd6c482009-09-21 12:02:48 +0200602static u64 event_encoding(u64 event_id, int idx)
David S. Miller59abbd12009-09-10 06:28:20 -0700603{
604 if (idx == PIC_UPPER_INDEX)
Ingo Molnarcdd6c482009-09-21 12:02:48 +0200605 event_id <<= sparc_pmu->upper_shift;
David S. Miller59abbd12009-09-10 06:28:20 -0700606 else
Ingo Molnarcdd6c482009-09-21 12:02:48 +0200607 event_id <<= sparc_pmu->lower_shift;
608 return event_id;
David S. Miller59abbd12009-09-10 06:28:20 -0700609}
610
611static u64 mask_for_index(int idx)
612{
613 return event_encoding(sparc_pmu->event_mask, idx);
614}
615
616static u64 nop_for_index(int idx)
617{
618 return event_encoding(idx == PIC_UPPER_INDEX ?
David S. Miller660d1372009-09-10 07:13:26 -0700619 sparc_pmu->upper_nop :
620 sparc_pmu->lower_nop, idx);
David S. Miller59abbd12009-09-10 06:28:20 -0700621}
622
David S. Millerd1751382009-09-29 21:27:06 -0700623static inline void sparc_pmu_enable_event(struct cpu_hw_events *cpuc, struct hw_perf_event *hwc, int idx)
David S. Miller59abbd12009-09-10 06:28:20 -0700624{
625 u64 val, mask = mask_for_index(idx);
David S. Millerb4f061a2012-08-17 03:14:01 -0700626 int pcr_index = 0;
David S. Miller59abbd12009-09-10 06:28:20 -0700627
David S. Millerb4f061a2012-08-17 03:14:01 -0700628 if (sparc_pmu->num_pcrs > 1)
629 pcr_index = idx;
630
631 val = cpuc->pcr[pcr_index];
David S. Millerd1751382009-09-29 21:27:06 -0700632 val &= ~mask;
633 val |= hwc->config;
David S. Millerb4f061a2012-08-17 03:14:01 -0700634 cpuc->pcr[pcr_index] = val;
David S. Millerd1751382009-09-29 21:27:06 -0700635
David S. Millerb4f061a2012-08-17 03:14:01 -0700636 pcr_ops->write_pcr(pcr_index, cpuc->pcr[pcr_index]);
David S. Miller59abbd12009-09-10 06:28:20 -0700637}
638
David S. Millerd1751382009-09-29 21:27:06 -0700639static inline void sparc_pmu_disable_event(struct cpu_hw_events *cpuc, struct hw_perf_event *hwc, int idx)
David S. Miller59abbd12009-09-10 06:28:20 -0700640{
641 u64 mask = mask_for_index(idx);
642 u64 nop = nop_for_index(idx);
David S. Millerb4f061a2012-08-17 03:14:01 -0700643 int pcr_index = 0;
David S. Millerd1751382009-09-29 21:27:06 -0700644 u64 val;
David S. Miller59abbd12009-09-10 06:28:20 -0700645
David S. Millerb4f061a2012-08-17 03:14:01 -0700646 if (sparc_pmu->num_pcrs > 1)
647 pcr_index = idx;
648
649 val = cpuc->pcr[pcr_index];
David S. Millerd1751382009-09-29 21:27:06 -0700650 val &= ~mask;
651 val |= nop;
David S. Millerb4f061a2012-08-17 03:14:01 -0700652 cpuc->pcr[pcr_index] = val;
David S. Millerd1751382009-09-29 21:27:06 -0700653
David S. Millerb4f061a2012-08-17 03:14:01 -0700654 pcr_ops->write_pcr(pcr_index, cpuc->pcr[pcr_index]);
David S. Miller59abbd12009-09-10 06:28:20 -0700655}
656
David S. Millere7bef6b2010-01-20 02:59:47 -0800657static u64 sparc_perf_event_update(struct perf_event *event,
658 struct hw_perf_event *hwc, int idx)
659{
660 int shift = 64 - 32;
661 u64 prev_raw_count, new_raw_count;
662 s64 delta;
663
664again:
Peter Zijlstrae7850592010-05-21 14:43:08 +0200665 prev_raw_count = local64_read(&hwc->prev_count);
David S. Miller53443032012-08-17 02:37:06 -0700666 new_raw_count = sparc_pmu->read_pmc(idx);
David S. Millere7bef6b2010-01-20 02:59:47 -0800667
Peter Zijlstrae7850592010-05-21 14:43:08 +0200668 if (local64_cmpxchg(&hwc->prev_count, prev_raw_count,
David S. Millere7bef6b2010-01-20 02:59:47 -0800669 new_raw_count) != prev_raw_count)
670 goto again;
671
672 delta = (new_raw_count << shift) - (prev_raw_count << shift);
673 delta >>= shift;
674
Peter Zijlstrae7850592010-05-21 14:43:08 +0200675 local64_add(delta, &event->count);
676 local64_sub(delta, &hwc->period_left);
David S. Millere7bef6b2010-01-20 02:59:47 -0800677
678 return new_raw_count;
679}
680
Ingo Molnarcdd6c482009-09-21 12:02:48 +0200681static int sparc_perf_event_set_period(struct perf_event *event,
David S. Millerd29862f2009-09-28 17:37:12 -0700682 struct hw_perf_event *hwc, int idx)
David S. Miller59abbd12009-09-10 06:28:20 -0700683{
Peter Zijlstrae7850592010-05-21 14:43:08 +0200684 s64 left = local64_read(&hwc->period_left);
David S. Miller59abbd12009-09-10 06:28:20 -0700685 s64 period = hwc->sample_period;
686 int ret = 0;
687
688 if (unlikely(left <= -period)) {
689 left = period;
Peter Zijlstrae7850592010-05-21 14:43:08 +0200690 local64_set(&hwc->period_left, left);
David S. Miller59abbd12009-09-10 06:28:20 -0700691 hwc->last_period = period;
692 ret = 1;
693 }
694
695 if (unlikely(left <= 0)) {
696 left += period;
Peter Zijlstrae7850592010-05-21 14:43:08 +0200697 local64_set(&hwc->period_left, left);
David S. Miller59abbd12009-09-10 06:28:20 -0700698 hwc->last_period = period;
699 ret = 1;
700 }
701 if (left > MAX_PERIOD)
702 left = MAX_PERIOD;
703
Peter Zijlstrae7850592010-05-21 14:43:08 +0200704 local64_set(&hwc->prev_count, (u64)-left);
David S. Miller59abbd12009-09-10 06:28:20 -0700705
David S. Miller53443032012-08-17 02:37:06 -0700706 sparc_pmu->write_pmc(idx, (u64)(-left) & 0xffffffff);
David S. Miller59abbd12009-09-10 06:28:20 -0700707
Ingo Molnarcdd6c482009-09-21 12:02:48 +0200708 perf_event_update_userpage(event);
David S. Miller59abbd12009-09-10 06:28:20 -0700709
710 return ret;
711}
712
David S. Millere7bef6b2010-01-20 02:59:47 -0800713/* If performance event entries have been added, move existing
714 * events around (if necessary) and then assign new entries to
715 * counters.
716 */
717static u64 maybe_change_configuration(struct cpu_hw_events *cpuc, u64 pcr)
David S. Miller59abbd12009-09-10 06:28:20 -0700718{
David S. Millere7bef6b2010-01-20 02:59:47 -0800719 int i;
David S. Miller59abbd12009-09-10 06:28:20 -0700720
David S. Millere7bef6b2010-01-20 02:59:47 -0800721 if (!cpuc->n_added)
722 goto out;
David S. Miller59abbd12009-09-10 06:28:20 -0700723
David S. Millere7bef6b2010-01-20 02:59:47 -0800724 /* Read in the counters which are moving. */
725 for (i = 0; i < cpuc->n_events; i++) {
726 struct perf_event *cp = cpuc->event[i];
David S. Miller59abbd12009-09-10 06:28:20 -0700727
David S. Millere7bef6b2010-01-20 02:59:47 -0800728 if (cpuc->current_idx[i] != PIC_NO_INDEX &&
729 cpuc->current_idx[i] != cp->hw.idx) {
730 sparc_perf_event_update(cp, &cp->hw,
731 cpuc->current_idx[i]);
732 cpuc->current_idx[i] = PIC_NO_INDEX;
733 }
734 }
David S. Miller59abbd12009-09-10 06:28:20 -0700735
David S. Millere7bef6b2010-01-20 02:59:47 -0800736 /* Assign to counters all unassigned events. */
737 for (i = 0; i < cpuc->n_events; i++) {
738 struct perf_event *cp = cpuc->event[i];
739 struct hw_perf_event *hwc = &cp->hw;
740 int idx = hwc->idx;
741 u64 enc;
742
743 if (cpuc->current_idx[i] != PIC_NO_INDEX)
744 continue;
745
746 sparc_perf_event_set_period(cp, hwc, idx);
747 cpuc->current_idx[i] = idx;
748
749 enc = perf_event_get_enc(cpuc->events[i]);
David S. Millerb7d45c32010-06-23 11:39:02 -0700750 pcr &= ~mask_for_index(idx);
Peter Zijlstraa4eaf7f2010-06-16 14:37:10 +0200751 if (hwc->state & PERF_HES_STOPPED)
752 pcr |= nop_for_index(idx);
753 else
754 pcr |= event_encoding(enc, idx);
David S. Millere7bef6b2010-01-20 02:59:47 -0800755 }
756out:
757 return pcr;
David S. Miller59abbd12009-09-10 06:28:20 -0700758}
759
Peter Zijlstraa4eaf7f2010-06-16 14:37:10 +0200760static void sparc_pmu_enable(struct pmu *pmu)
David S. Miller59abbd12009-09-10 06:28:20 -0700761{
David S. Millere7bef6b2010-01-20 02:59:47 -0800762 struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events);
David S. Miller59abbd12009-09-10 06:28:20 -0700763
David S. Millere7bef6b2010-01-20 02:59:47 -0800764 if (cpuc->enabled)
765 return;
David S. Miller59abbd12009-09-10 06:28:20 -0700766
David S. Millere7bef6b2010-01-20 02:59:47 -0800767 cpuc->enabled = 1;
768 barrier();
David S. Miller59abbd12009-09-10 06:28:20 -0700769
David S. Miller5ab96842012-08-17 03:09:39 -0700770 if (cpuc->n_events) {
771 u64 pcr = maybe_change_configuration(cpuc, cpuc->pcr[0]);
David S. Miller59abbd12009-09-10 06:28:20 -0700772
David S. Millere7bef6b2010-01-20 02:59:47 -0800773 /* We require that all of the events have the same
774 * configuration, so just fetch the settings from the
775 * first entry.
776 */
David S. Miller3f1a2092012-08-17 02:51:21 -0700777 cpuc->pcr[0] = pcr | cpuc->event[0]->hw.config_base;
David S. Millere7bef6b2010-01-20 02:59:47 -0800778 }
David S. Miller59abbd12009-09-10 06:28:20 -0700779
David S. Miller3f1a2092012-08-17 02:51:21 -0700780 pcr_ops->write_pcr(0, cpuc->pcr[0]);
David S. Millere7bef6b2010-01-20 02:59:47 -0800781}
782
Peter Zijlstraa4eaf7f2010-06-16 14:37:10 +0200783static void sparc_pmu_disable(struct pmu *pmu)
David S. Millere7bef6b2010-01-20 02:59:47 -0800784{
785 struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events);
David S. Miller3f1a2092012-08-17 02:51:21 -0700786 int i;
David S. Millere7bef6b2010-01-20 02:59:47 -0800787
788 if (!cpuc->enabled)
789 return;
790
791 cpuc->enabled = 0;
792 cpuc->n_added = 0;
793
David S. Miller3f1a2092012-08-17 02:51:21 -0700794 for (i = 0; i < sparc_pmu->num_pcrs; i++) {
795 u64 val = cpuc->pcr[i];
David S. Millere7bef6b2010-01-20 02:59:47 -0800796
David S. Miller3f1a2092012-08-17 02:51:21 -0700797 val &= ~(sparc_pmu->user_bit | sparc_pmu->priv_bit |
798 sparc_pmu->hv_bit | sparc_pmu->irq_bit);
799 cpuc->pcr[i] = val;
800 pcr_ops->write_pcr(i, cpuc->pcr[i]);
801 }
David S. Miller59abbd12009-09-10 06:28:20 -0700802}
803
David S. Millere7bef6b2010-01-20 02:59:47 -0800804static int active_event_index(struct cpu_hw_events *cpuc,
805 struct perf_event *event)
806{
807 int i;
808
809 for (i = 0; i < cpuc->n_events; i++) {
810 if (cpuc->event[i] == event)
811 break;
812 }
813 BUG_ON(i == cpuc->n_events);
814 return cpuc->current_idx[i];
David S. Miller59abbd12009-09-10 06:28:20 -0700815}
816
Peter Zijlstraa4eaf7f2010-06-16 14:37:10 +0200817static void sparc_pmu_start(struct perf_event *event, int flags)
818{
819 struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events);
820 int idx = active_event_index(cpuc, event);
821
822 if (flags & PERF_EF_RELOAD) {
823 WARN_ON_ONCE(!(event->hw.state & PERF_HES_UPTODATE));
824 sparc_perf_event_set_period(event, &event->hw, idx);
825 }
826
827 event->hw.state = 0;
828
829 sparc_pmu_enable_event(cpuc, &event->hw, idx);
830}
831
832static void sparc_pmu_stop(struct perf_event *event, int flags)
833{
834 struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events);
835 int idx = active_event_index(cpuc, event);
836
837 if (!(event->hw.state & PERF_HES_STOPPED)) {
838 sparc_pmu_disable_event(cpuc, &event->hw, idx);
839 event->hw.state |= PERF_HES_STOPPED;
840 }
841
842 if (!(event->hw.state & PERF_HES_UPTODATE) && (flags & PERF_EF_UPDATE)) {
843 sparc_perf_event_update(event, &event->hw, idx);
844 event->hw.state |= PERF_HES_UPTODATE;
845 }
846}
847
848static void sparc_pmu_del(struct perf_event *event, int _flags)
849{
850 struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events);
851 unsigned long flags;
852 int i;
853
854 local_irq_save(flags);
855 perf_pmu_disable(event->pmu);
856
857 for (i = 0; i < cpuc->n_events; i++) {
858 if (event == cpuc->event[i]) {
859 /* Absorb the final count and turn off the
860 * event.
861 */
862 sparc_pmu_stop(event, PERF_EF_UPDATE);
863
864 /* Shift remaining entries down into
865 * the existing slot.
866 */
867 while (++i < cpuc->n_events) {
868 cpuc->event[i - 1] = cpuc->event[i];
869 cpuc->events[i - 1] = cpuc->events[i];
870 cpuc->current_idx[i - 1] =
871 cpuc->current_idx[i];
872 }
873
874 perf_event_update_userpage(event);
875
876 cpuc->n_events--;
877 break;
878 }
879 }
880
881 perf_pmu_enable(event->pmu);
882 local_irq_restore(flags);
883}
884
Ingo Molnarcdd6c482009-09-21 12:02:48 +0200885static void sparc_pmu_read(struct perf_event *event)
David S. Miller59abbd12009-09-10 06:28:20 -0700886{
David S. Millere7bef6b2010-01-20 02:59:47 -0800887 struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events);
888 int idx = active_event_index(cpuc, event);
Ingo Molnarcdd6c482009-09-21 12:02:48 +0200889 struct hw_perf_event *hwc = &event->hw;
David S. Millerd1751382009-09-29 21:27:06 -0700890
David S. Millere7bef6b2010-01-20 02:59:47 -0800891 sparc_perf_event_update(event, hwc, idx);
David S. Miller59abbd12009-09-10 06:28:20 -0700892}
893
Ingo Molnarcdd6c482009-09-21 12:02:48 +0200894static atomic_t active_events = ATOMIC_INIT(0);
David S. Miller59abbd12009-09-10 06:28:20 -0700895static DEFINE_MUTEX(pmc_grab_mutex);
896
David S. Millerd1751382009-09-29 21:27:06 -0700897static void perf_stop_nmi_watchdog(void *unused)
898{
899 struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events);
David S. Miller3f1a2092012-08-17 02:51:21 -0700900 int i;
David S. Millerd1751382009-09-29 21:27:06 -0700901
902 stop_nmi_watchdog(NULL);
David S. Miller3f1a2092012-08-17 02:51:21 -0700903 for (i = 0; i < sparc_pmu->num_pcrs; i++)
904 cpuc->pcr[i] = pcr_ops->read_pcr(i);
David S. Millerd1751382009-09-29 21:27:06 -0700905}
906
Ingo Molnarcdd6c482009-09-21 12:02:48 +0200907void perf_event_grab_pmc(void)
David S. Miller59abbd12009-09-10 06:28:20 -0700908{
Ingo Molnarcdd6c482009-09-21 12:02:48 +0200909 if (atomic_inc_not_zero(&active_events))
David S. Miller59abbd12009-09-10 06:28:20 -0700910 return;
911
912 mutex_lock(&pmc_grab_mutex);
Ingo Molnarcdd6c482009-09-21 12:02:48 +0200913 if (atomic_read(&active_events) == 0) {
David S. Miller59abbd12009-09-10 06:28:20 -0700914 if (atomic_read(&nmi_active) > 0) {
David S. Millerd1751382009-09-29 21:27:06 -0700915 on_each_cpu(perf_stop_nmi_watchdog, NULL, 1);
David S. Miller59abbd12009-09-10 06:28:20 -0700916 BUG_ON(atomic_read(&nmi_active) != 0);
917 }
Ingo Molnarcdd6c482009-09-21 12:02:48 +0200918 atomic_inc(&active_events);
David S. Miller59abbd12009-09-10 06:28:20 -0700919 }
920 mutex_unlock(&pmc_grab_mutex);
921}
922
Ingo Molnarcdd6c482009-09-21 12:02:48 +0200923void perf_event_release_pmc(void)
David S. Miller59abbd12009-09-10 06:28:20 -0700924{
Ingo Molnarcdd6c482009-09-21 12:02:48 +0200925 if (atomic_dec_and_mutex_lock(&active_events, &pmc_grab_mutex)) {
David S. Miller59abbd12009-09-10 06:28:20 -0700926 if (atomic_read(&nmi_active) == 0)
927 on_each_cpu(start_nmi_watchdog, NULL, 1);
928 mutex_unlock(&pmc_grab_mutex);
929 }
930}
931
David S. Miller2ce4da22009-09-26 20:42:10 -0700932static const struct perf_event_map *sparc_map_cache_event(u64 config)
933{
934 unsigned int cache_type, cache_op, cache_result;
935 const struct perf_event_map *pmap;
936
937 if (!sparc_pmu->cache_map)
938 return ERR_PTR(-ENOENT);
939
940 cache_type = (config >> 0) & 0xff;
941 if (cache_type >= PERF_COUNT_HW_CACHE_MAX)
942 return ERR_PTR(-EINVAL);
943
944 cache_op = (config >> 8) & 0xff;
945 if (cache_op >= PERF_COUNT_HW_CACHE_OP_MAX)
946 return ERR_PTR(-EINVAL);
947
948 cache_result = (config >> 16) & 0xff;
949 if (cache_result >= PERF_COUNT_HW_CACHE_RESULT_MAX)
950 return ERR_PTR(-EINVAL);
951
952 pmap = &((*sparc_pmu->cache_map)[cache_type][cache_op][cache_result]);
953
954 if (pmap->encoding == CACHE_OP_UNSUPPORTED)
955 return ERR_PTR(-ENOENT);
956
957 if (pmap->encoding == CACHE_OP_NONSENSE)
958 return ERR_PTR(-EINVAL);
959
960 return pmap;
961}
962
Ingo Molnarcdd6c482009-09-21 12:02:48 +0200963static void hw_perf_event_destroy(struct perf_event *event)
David S. Miller59abbd12009-09-10 06:28:20 -0700964{
Ingo Molnarcdd6c482009-09-21 12:02:48 +0200965 perf_event_release_pmc();
David S. Miller59abbd12009-09-10 06:28:20 -0700966}
967
David S. Millera72a8a52009-09-28 17:35:20 -0700968/* Make sure all events can be scheduled into the hardware at
969 * the same time. This is simplified by the fact that we only
970 * need to support 2 simultaneous HW events.
David S. Millere7bef6b2010-01-20 02:59:47 -0800971 *
972 * As a side effect, the evts[]->hw.idx values will be assigned
973 * on success. These are pending indexes. When the events are
974 * actually programmed into the chip, these values will propagate
975 * to the per-cpu cpuc->current_idx[] slots, see the code in
976 * maybe_change_configuration() for details.
David S. Millera72a8a52009-09-28 17:35:20 -0700977 */
David S. Millere7bef6b2010-01-20 02:59:47 -0800978static int sparc_check_constraints(struct perf_event **evts,
979 unsigned long *events, int n_ev)
David S. Millera72a8a52009-09-28 17:35:20 -0700980{
David S. Millere7bef6b2010-01-20 02:59:47 -0800981 u8 msk0 = 0, msk1 = 0;
982 int idx0 = 0;
David S. Millera72a8a52009-09-28 17:35:20 -0700983
David S. Millere7bef6b2010-01-20 02:59:47 -0800984 /* This case is possible when we are invoked from
985 * hw_perf_group_sched_in().
986 */
987 if (!n_ev)
988 return 0;
David S. Millera72a8a52009-09-28 17:35:20 -0700989
David S. Miller59660492012-08-17 02:33:44 -0700990 if (n_ev > sparc_pmu->max_hw_events)
David S. Millere7bef6b2010-01-20 02:59:47 -0800991 return -1;
David S. Millera72a8a52009-09-28 17:35:20 -0700992
David S. Millerb38e99f2012-08-17 02:31:10 -0700993 if (!(sparc_pmu->flags & SPARC_PMU_HAS_CONFLICTS)) {
994 int i;
995
996 for (i = 0; i < n_ev; i++)
997 evts[i]->hw.idx = i;
998 return 0;
999 }
1000
David S. Millere7bef6b2010-01-20 02:59:47 -08001001 msk0 = perf_event_get_msk(events[0]);
1002 if (n_ev == 1) {
1003 if (msk0 & PIC_LOWER)
1004 idx0 = 1;
1005 goto success;
1006 }
1007 BUG_ON(n_ev != 2);
1008 msk1 = perf_event_get_msk(events[1]);
David S. Millera72a8a52009-09-28 17:35:20 -07001009
David S. Millere7bef6b2010-01-20 02:59:47 -08001010 /* If both events can go on any counter, OK. */
1011 if (msk0 == (PIC_UPPER | PIC_LOWER) &&
1012 msk1 == (PIC_UPPER | PIC_LOWER))
1013 goto success;
David S. Millera72a8a52009-09-28 17:35:20 -07001014
David S. Millere7bef6b2010-01-20 02:59:47 -08001015 /* If one event is limited to a specific counter,
1016 * and the other can go on both, OK.
1017 */
1018 if ((msk0 == PIC_UPPER || msk0 == PIC_LOWER) &&
1019 msk1 == (PIC_UPPER | PIC_LOWER)) {
1020 if (msk0 & PIC_LOWER)
1021 idx0 = 1;
1022 goto success;
David S. Millera72a8a52009-09-28 17:35:20 -07001023 }
1024
David S. Millere7bef6b2010-01-20 02:59:47 -08001025 if ((msk1 == PIC_UPPER || msk1 == PIC_LOWER) &&
1026 msk0 == (PIC_UPPER | PIC_LOWER)) {
1027 if (msk1 & PIC_UPPER)
1028 idx0 = 1;
1029 goto success;
1030 }
1031
1032 /* If the events are fixed to different counters, OK. */
1033 if ((msk0 == PIC_UPPER && msk1 == PIC_LOWER) ||
1034 (msk0 == PIC_LOWER && msk1 == PIC_UPPER)) {
1035 if (msk0 & PIC_LOWER)
1036 idx0 = 1;
1037 goto success;
1038 }
1039
1040 /* Otherwise, there is a conflict. */
David S. Millera72a8a52009-09-28 17:35:20 -07001041 return -1;
David S. Millere7bef6b2010-01-20 02:59:47 -08001042
1043success:
1044 evts[0]->hw.idx = idx0;
1045 if (n_ev == 2)
1046 evts[1]->hw.idx = idx0 ^ 1;
1047 return 0;
David S. Millera72a8a52009-09-28 17:35:20 -07001048}
1049
David S. Miller01552f72009-09-27 20:43:07 -07001050static int check_excludes(struct perf_event **evts, int n_prev, int n_new)
1051{
1052 int eu = 0, ek = 0, eh = 0;
1053 struct perf_event *event;
1054 int i, n, first;
1055
David S. Millerb38e99f2012-08-17 02:31:10 -07001056 if (!(sparc_pmu->flags & SPARC_PMU_ALL_EXCLUDES_SAME))
1057 return 0;
1058
David S. Miller01552f72009-09-27 20:43:07 -07001059 n = n_prev + n_new;
1060 if (n <= 1)
1061 return 0;
1062
1063 first = 1;
1064 for (i = 0; i < n; i++) {
1065 event = evts[i];
1066 if (first) {
1067 eu = event->attr.exclude_user;
1068 ek = event->attr.exclude_kernel;
1069 eh = event->attr.exclude_hv;
1070 first = 0;
1071 } else if (event->attr.exclude_user != eu ||
1072 event->attr.exclude_kernel != ek ||
1073 event->attr.exclude_hv != eh) {
1074 return -EAGAIN;
1075 }
1076 }
1077
1078 return 0;
1079}
1080
1081static int collect_events(struct perf_event *group, int max_count,
David S. Millere7bef6b2010-01-20 02:59:47 -08001082 struct perf_event *evts[], unsigned long *events,
1083 int *current_idx)
David S. Miller01552f72009-09-27 20:43:07 -07001084{
1085 struct perf_event *event;
1086 int n = 0;
1087
1088 if (!is_software_event(group)) {
1089 if (n >= max_count)
1090 return -1;
1091 evts[n] = group;
David S. Millere7bef6b2010-01-20 02:59:47 -08001092 events[n] = group->hw.event_base;
1093 current_idx[n++] = PIC_NO_INDEX;
David S. Miller01552f72009-09-27 20:43:07 -07001094 }
1095 list_for_each_entry(event, &group->sibling_list, group_entry) {
1096 if (!is_software_event(event) &&
1097 event->state != PERF_EVENT_STATE_OFF) {
1098 if (n >= max_count)
1099 return -1;
1100 evts[n] = event;
David S. Millere7bef6b2010-01-20 02:59:47 -08001101 events[n] = event->hw.event_base;
1102 current_idx[n++] = PIC_NO_INDEX;
David S. Miller01552f72009-09-27 20:43:07 -07001103 }
1104 }
1105 return n;
1106}
1107
Peter Zijlstraa4eaf7f2010-06-16 14:37:10 +02001108static int sparc_pmu_add(struct perf_event *event, int ef_flags)
David S. Millere7bef6b2010-01-20 02:59:47 -08001109{
1110 struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events);
1111 int n0, ret = -EAGAIN;
1112 unsigned long flags;
1113
1114 local_irq_save(flags);
Peter Zijlstra33696fc2010-06-14 08:49:00 +02001115 perf_pmu_disable(event->pmu);
David S. Millere7bef6b2010-01-20 02:59:47 -08001116
1117 n0 = cpuc->n_events;
David S. Miller59660492012-08-17 02:33:44 -07001118 if (n0 >= sparc_pmu->max_hw_events)
David S. Millere7bef6b2010-01-20 02:59:47 -08001119 goto out;
1120
1121 cpuc->event[n0] = event;
1122 cpuc->events[n0] = event->hw.event_base;
1123 cpuc->current_idx[n0] = PIC_NO_INDEX;
1124
Peter Zijlstraa4eaf7f2010-06-16 14:37:10 +02001125 event->hw.state = PERF_HES_UPTODATE;
1126 if (!(ef_flags & PERF_EF_START))
1127 event->hw.state |= PERF_HES_STOPPED;
1128
Lin Minga13c3af2010-04-23 13:56:33 +08001129 /*
1130 * If group events scheduling transaction was started,
Lucas De Marchi25985ed2011-03-30 22:57:33 -03001131 * skip the schedulability test here, it will be performed
Lin Minga13c3af2010-04-23 13:56:33 +08001132 * at commit time(->commit_txn) as a whole
1133 */
Peter Zijlstra8d2cacb2010-05-25 17:49:05 +02001134 if (cpuc->group_flag & PERF_EVENT_TXN)
Lin Minga13c3af2010-04-23 13:56:33 +08001135 goto nocheck;
1136
David S. Millere7bef6b2010-01-20 02:59:47 -08001137 if (check_excludes(cpuc->event, n0, 1))
1138 goto out;
1139 if (sparc_check_constraints(cpuc->event, cpuc->events, n0 + 1))
1140 goto out;
1141
Lin Minga13c3af2010-04-23 13:56:33 +08001142nocheck:
David S. Millere7bef6b2010-01-20 02:59:47 -08001143 cpuc->n_events++;
1144 cpuc->n_added++;
1145
1146 ret = 0;
1147out:
Peter Zijlstra33696fc2010-06-14 08:49:00 +02001148 perf_pmu_enable(event->pmu);
David S. Millere7bef6b2010-01-20 02:59:47 -08001149 local_irq_restore(flags);
1150 return ret;
1151}
1152
Peter Zijlstrab0a873e2010-06-11 13:35:08 +02001153static int sparc_pmu_event_init(struct perf_event *event)
David S. Miller59abbd12009-09-10 06:28:20 -07001154{
Ingo Molnarcdd6c482009-09-21 12:02:48 +02001155 struct perf_event_attr *attr = &event->attr;
David S. Miller01552f72009-09-27 20:43:07 -07001156 struct perf_event *evts[MAX_HWEVENTS];
Ingo Molnarcdd6c482009-09-21 12:02:48 +02001157 struct hw_perf_event *hwc = &event->hw;
David S. Millera72a8a52009-09-28 17:35:20 -07001158 unsigned long events[MAX_HWEVENTS];
David S. Millere7bef6b2010-01-20 02:59:47 -08001159 int current_idx_dmy[MAX_HWEVENTS];
David S. Miller59abbd12009-09-10 06:28:20 -07001160 const struct perf_event_map *pmap;
David S. Miller01552f72009-09-27 20:43:07 -07001161 int n;
David S. Miller59abbd12009-09-10 06:28:20 -07001162
1163 if (atomic_read(&nmi_active) < 0)
1164 return -ENODEV;
1165
Stephane Eranian2481c5f2012-02-09 23:20:59 +01001166 /* does not support taken branch sampling */
1167 if (has_branch_stack(event))
1168 return -EOPNOTSUPP;
1169
Peter Zijlstrab0a873e2010-06-11 13:35:08 +02001170 switch (attr->type) {
1171 case PERF_TYPE_HARDWARE:
David S. Miller2ce4da22009-09-26 20:42:10 -07001172 if (attr->config >= sparc_pmu->max_events)
1173 return -EINVAL;
1174 pmap = sparc_pmu->event_map(attr->config);
Peter Zijlstrab0a873e2010-06-11 13:35:08 +02001175 break;
1176
1177 case PERF_TYPE_HW_CACHE:
David S. Miller2ce4da22009-09-26 20:42:10 -07001178 pmap = sparc_map_cache_event(attr->config);
1179 if (IS_ERR(pmap))
1180 return PTR_ERR(pmap);
Peter Zijlstrab0a873e2010-06-11 13:35:08 +02001181 break;
1182
1183 case PERF_TYPE_RAW:
Ingo Molnard0303d72010-09-23 08:02:09 +02001184 pmap = NULL;
1185 break;
David S. Miller59abbd12009-09-10 06:28:20 -07001186
Peter Zijlstrab0a873e2010-06-11 13:35:08 +02001187 default:
1188 return -ENOENT;
1189
1190 }
1191
David S. Millerb343ae52010-09-12 17:20:24 -07001192 if (pmap) {
1193 hwc->event_base = perf_event_encode(pmap);
1194 } else {
Ingo Molnard0303d72010-09-23 08:02:09 +02001195 /*
1196 * User gives us "(encoding << 16) | pic_mask" for
David S. Millerb343ae52010-09-12 17:20:24 -07001197 * PERF_TYPE_RAW events.
1198 */
1199 hwc->event_base = attr->config;
1200 }
1201
David S. Millere7bef6b2010-01-20 02:59:47 -08001202 /* We save the enable bits in the config_base. */
David S. Miller496c07e2009-09-10 07:10:59 -07001203 hwc->config_base = sparc_pmu->irq_bit;
David S. Miller59abbd12009-09-10 06:28:20 -07001204 if (!attr->exclude_user)
David S. Miller7ac2ed22012-08-17 02:41:32 -07001205 hwc->config_base |= sparc_pmu->user_bit;
David S. Miller59abbd12009-09-10 06:28:20 -07001206 if (!attr->exclude_kernel)
David S. Miller7ac2ed22012-08-17 02:41:32 -07001207 hwc->config_base |= sparc_pmu->priv_bit;
David S. Miller91b92862009-09-10 07:09:06 -07001208 if (!attr->exclude_hv)
1209 hwc->config_base |= sparc_pmu->hv_bit;
David S. Miller59abbd12009-09-10 06:28:20 -07001210
David S. Miller01552f72009-09-27 20:43:07 -07001211 n = 0;
1212 if (event->group_leader != event) {
1213 n = collect_events(event->group_leader,
David S. Miller59660492012-08-17 02:33:44 -07001214 sparc_pmu->max_hw_events - 1,
David S. Millere7bef6b2010-01-20 02:59:47 -08001215 evts, events, current_idx_dmy);
David S. Miller01552f72009-09-27 20:43:07 -07001216 if (n < 0)
1217 return -EINVAL;
1218 }
David S. Millera72a8a52009-09-28 17:35:20 -07001219 events[n] = hwc->event_base;
David S. Miller01552f72009-09-27 20:43:07 -07001220 evts[n] = event;
1221
1222 if (check_excludes(evts, n, 1))
1223 return -EINVAL;
1224
David S. Millere7bef6b2010-01-20 02:59:47 -08001225 if (sparc_check_constraints(evts, events, n + 1))
David S. Millera72a8a52009-09-28 17:35:20 -07001226 return -EINVAL;
1227
David S. Millere7bef6b2010-01-20 02:59:47 -08001228 hwc->idx = PIC_NO_INDEX;
1229
David S. Miller01552f72009-09-27 20:43:07 -07001230 /* Try to do all error checking before this point, as unwinding
1231 * state after grabbing the PMC is difficult.
1232 */
1233 perf_event_grab_pmc();
1234 event->destroy = hw_perf_event_destroy;
1235
David S. Miller59abbd12009-09-10 06:28:20 -07001236 if (!hwc->sample_period) {
1237 hwc->sample_period = MAX_PERIOD;
1238 hwc->last_period = hwc->sample_period;
Peter Zijlstrae7850592010-05-21 14:43:08 +02001239 local64_set(&hwc->period_left, hwc->sample_period);
David S. Miller59abbd12009-09-10 06:28:20 -07001240 }
1241
David S. Miller59abbd12009-09-10 06:28:20 -07001242 return 0;
1243}
1244
Lin Minga13c3af2010-04-23 13:56:33 +08001245/*
1246 * Start group events scheduling transaction
1247 * Set the flag to make pmu::enable() not perform the
1248 * schedulability test, it will be performed at commit time
1249 */
Peter Zijlstra51b0fe32010-06-11 13:35:57 +02001250static void sparc_pmu_start_txn(struct pmu *pmu)
Lin Minga13c3af2010-04-23 13:56:33 +08001251{
1252 struct cpu_hw_events *cpuhw = &__get_cpu_var(cpu_hw_events);
1253
Peter Zijlstra33696fc2010-06-14 08:49:00 +02001254 perf_pmu_disable(pmu);
Peter Zijlstra8d2cacb2010-05-25 17:49:05 +02001255 cpuhw->group_flag |= PERF_EVENT_TXN;
Lin Minga13c3af2010-04-23 13:56:33 +08001256}
1257
1258/*
1259 * Stop group events scheduling transaction
1260 * Clear the flag and pmu::enable() will perform the
1261 * schedulability test.
1262 */
Peter Zijlstra51b0fe32010-06-11 13:35:57 +02001263static void sparc_pmu_cancel_txn(struct pmu *pmu)
Lin Minga13c3af2010-04-23 13:56:33 +08001264{
1265 struct cpu_hw_events *cpuhw = &__get_cpu_var(cpu_hw_events);
1266
Peter Zijlstra8d2cacb2010-05-25 17:49:05 +02001267 cpuhw->group_flag &= ~PERF_EVENT_TXN;
Peter Zijlstra33696fc2010-06-14 08:49:00 +02001268 perf_pmu_enable(pmu);
Lin Minga13c3af2010-04-23 13:56:33 +08001269}
1270
1271/*
1272 * Commit group events scheduling transaction
1273 * Perform the group schedulability test as a whole
1274 * Return 0 if success
1275 */
Peter Zijlstra51b0fe32010-06-11 13:35:57 +02001276static int sparc_pmu_commit_txn(struct pmu *pmu)
Lin Minga13c3af2010-04-23 13:56:33 +08001277{
1278 struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events);
1279 int n;
1280
1281 if (!sparc_pmu)
1282 return -EINVAL;
1283
1284 cpuc = &__get_cpu_var(cpu_hw_events);
1285 n = cpuc->n_events;
1286 if (check_excludes(cpuc->event, 0, n))
1287 return -EINVAL;
1288 if (sparc_check_constraints(cpuc->event, cpuc->events, n))
1289 return -EAGAIN;
1290
Peter Zijlstra8d2cacb2010-05-25 17:49:05 +02001291 cpuc->group_flag &= ~PERF_EVENT_TXN;
Peter Zijlstra33696fc2010-06-14 08:49:00 +02001292 perf_pmu_enable(pmu);
Lin Minga13c3af2010-04-23 13:56:33 +08001293 return 0;
1294}
1295
Peter Zijlstra51b0fe32010-06-11 13:35:57 +02001296static struct pmu pmu = {
Peter Zijlstraa4eaf7f2010-06-16 14:37:10 +02001297 .pmu_enable = sparc_pmu_enable,
1298 .pmu_disable = sparc_pmu_disable,
Peter Zijlstrab0a873e2010-06-11 13:35:08 +02001299 .event_init = sparc_pmu_event_init,
Peter Zijlstraa4eaf7f2010-06-16 14:37:10 +02001300 .add = sparc_pmu_add,
1301 .del = sparc_pmu_del,
1302 .start = sparc_pmu_start,
1303 .stop = sparc_pmu_stop,
David S. Miller59abbd12009-09-10 06:28:20 -07001304 .read = sparc_pmu_read,
Lin Minga13c3af2010-04-23 13:56:33 +08001305 .start_txn = sparc_pmu_start_txn,
1306 .cancel_txn = sparc_pmu_cancel_txn,
1307 .commit_txn = sparc_pmu_commit_txn,
David S. Miller59abbd12009-09-10 06:28:20 -07001308};
1309
Ingo Molnarcdd6c482009-09-21 12:02:48 +02001310void perf_event_print_debug(void)
David S. Miller59abbd12009-09-10 06:28:20 -07001311{
1312 unsigned long flags;
David S. Miller3f1a2092012-08-17 02:51:21 -07001313 int cpu, i;
David S. Miller59abbd12009-09-10 06:28:20 -07001314
1315 if (!sparc_pmu)
1316 return;
1317
1318 local_irq_save(flags);
1319
1320 cpu = smp_processor_id();
1321
David S. Miller59abbd12009-09-10 06:28:20 -07001322 pr_info("\n");
David S. Miller3f1a2092012-08-17 02:51:21 -07001323 for (i = 0; i < sparc_pmu->num_pcrs; i++)
1324 pr_info("CPU#%d: PCR%d[%016llx]\n",
1325 cpu, i, pcr_ops->read_pcr(i));
1326 for (i = 0; i < sparc_pmu->num_pic_regs; i++)
1327 pr_info("CPU#%d: PIC%d[%016llx]\n",
1328 cpu, i, pcr_ops->read_pic(i));
David S. Miller59abbd12009-09-10 06:28:20 -07001329
1330 local_irq_restore(flags);
1331}
1332
Ingo Molnarcdd6c482009-09-21 12:02:48 +02001333static int __kprobes perf_event_nmi_handler(struct notifier_block *self,
David S. Millerd29862f2009-09-28 17:37:12 -07001334 unsigned long cmd, void *__args)
David S. Miller59abbd12009-09-10 06:28:20 -07001335{
1336 struct die_args *args = __args;
1337 struct perf_sample_data data;
Ingo Molnarcdd6c482009-09-21 12:02:48 +02001338 struct cpu_hw_events *cpuc;
David S. Miller59abbd12009-09-10 06:28:20 -07001339 struct pt_regs *regs;
David S. Millere7bef6b2010-01-20 02:59:47 -08001340 int i;
David S. Miller59abbd12009-09-10 06:28:20 -07001341
Ingo Molnarcdd6c482009-09-21 12:02:48 +02001342 if (!atomic_read(&active_events))
David S. Miller59abbd12009-09-10 06:28:20 -07001343 return NOTIFY_DONE;
1344
1345 switch (cmd) {
1346 case DIE_NMI:
1347 break;
1348
1349 default:
1350 return NOTIFY_DONE;
1351 }
1352
1353 regs = args->regs;
1354
Ingo Molnarcdd6c482009-09-21 12:02:48 +02001355 cpuc = &__get_cpu_var(cpu_hw_events);
David S. Millere04ed382010-01-04 23:16:03 -08001356
1357 /* If the PMU has the TOE IRQ enable bits, we need to do a
1358 * dummy write to the %pcr to clear the overflow bits and thus
1359 * the interrupt.
1360 *
1361 * Do this before we peek at the counters to determine
1362 * overflow so we don't lose any events.
1363 */
David S. Miller3f1a2092012-08-17 02:51:21 -07001364 if (sparc_pmu->irq_bit &&
1365 sparc_pmu->num_pcrs == 1)
1366 pcr_ops->write_pcr(0, cpuc->pcr[0]);
David S. Millere04ed382010-01-04 23:16:03 -08001367
David S. Millere7bef6b2010-01-20 02:59:47 -08001368 for (i = 0; i < cpuc->n_events; i++) {
1369 struct perf_event *event = cpuc->event[i];
1370 int idx = cpuc->current_idx[i];
Ingo Molnarcdd6c482009-09-21 12:02:48 +02001371 struct hw_perf_event *hwc;
David S. Miller59abbd12009-09-10 06:28:20 -07001372 u64 val;
1373
David S. Miller3f1a2092012-08-17 02:51:21 -07001374 if (sparc_pmu->irq_bit &&
1375 sparc_pmu->num_pcrs > 1)
1376 pcr_ops->write_pcr(idx, cpuc->pcr[idx]);
1377
Ingo Molnarcdd6c482009-09-21 12:02:48 +02001378 hwc = &event->hw;
1379 val = sparc_perf_event_update(event, hwc, idx);
David S. Miller59abbd12009-09-10 06:28:20 -07001380 if (val & (1ULL << 31))
1381 continue;
1382
Robert Richterfd0d0002012-04-02 20:19:08 +02001383 perf_sample_data_init(&data, 0, hwc->last_period);
Ingo Molnarcdd6c482009-09-21 12:02:48 +02001384 if (!sparc_perf_event_set_period(event, hwc, idx))
David S. Miller59abbd12009-09-10 06:28:20 -07001385 continue;
1386
Peter Zijlstraa8b0ca12011-06-27 14:41:57 +02001387 if (perf_event_overflow(event, &data, regs))
Peter Zijlstraa4eaf7f2010-06-16 14:37:10 +02001388 sparc_pmu_stop(event, 0);
David S. Miller59abbd12009-09-10 06:28:20 -07001389 }
1390
1391 return NOTIFY_STOP;
1392}
1393
Ingo Molnarcdd6c482009-09-21 12:02:48 +02001394static __read_mostly struct notifier_block perf_event_nmi_notifier = {
1395 .notifier_call = perf_event_nmi_handler,
David S. Miller59abbd12009-09-10 06:28:20 -07001396};
1397
1398static bool __init supported_pmu(void)
1399{
David S. Miller28e8f9b2009-09-26 20:54:22 -07001400 if (!strcmp(sparc_pmu_type, "ultra3") ||
1401 !strcmp(sparc_pmu_type, "ultra3+") ||
1402 !strcmp(sparc_pmu_type, "ultra3i") ||
1403 !strcmp(sparc_pmu_type, "ultra4+")) {
1404 sparc_pmu = &ultra3_pmu;
David S. Miller59abbd12009-09-10 06:28:20 -07001405 return true;
1406 }
David S. Miller7eebda62009-09-26 21:23:41 -07001407 if (!strcmp(sparc_pmu_type, "niagara")) {
1408 sparc_pmu = &niagara1_pmu;
1409 return true;
1410 }
David S. Miller4ba991d2011-07-27 21:06:16 -07001411 if (!strcmp(sparc_pmu_type, "niagara2") ||
1412 !strcmp(sparc_pmu_type, "niagara3")) {
David S. Millerb73d8842009-09-10 07:22:18 -07001413 sparc_pmu = &niagara2_pmu;
1414 return true;
1415 }
David S. Miller59abbd12009-09-10 06:28:20 -07001416 return false;
1417}
1418
Peter Zijlstra004417a2010-11-25 18:38:29 +01001419int __init init_hw_perf_events(void)
David S. Miller59abbd12009-09-10 06:28:20 -07001420{
Ingo Molnarcdd6c482009-09-21 12:02:48 +02001421 pr_info("Performance events: ");
David S. Miller59abbd12009-09-10 06:28:20 -07001422
1423 if (!supported_pmu()) {
1424 pr_cont("No support for PMU type '%s'\n", sparc_pmu_type);
Peter Zijlstra004417a2010-11-25 18:38:29 +01001425 return 0;
David S. Miller59abbd12009-09-10 06:28:20 -07001426 }
1427
1428 pr_cont("Supported PMU type is '%s'\n", sparc_pmu_type);
1429
Peter Zijlstra2e80a822010-11-17 23:17:36 +01001430 perf_pmu_register(&pmu, "cpu", PERF_TYPE_RAW);
Ingo Molnarcdd6c482009-09-21 12:02:48 +02001431 register_die_notifier(&perf_event_nmi_notifier);
Peter Zijlstra004417a2010-11-25 18:38:29 +01001432
1433 return 0;
David S. Miller59abbd12009-09-10 06:28:20 -07001434}
Ingo Molnarefc70d22010-12-10 00:27:23 +01001435early_initcall(init_hw_perf_events);
David S. Miller4f6dbe42010-01-19 00:26:13 -08001436
Frederic Weisbecker56962b42010-06-30 23:03:51 +02001437void perf_callchain_kernel(struct perf_callchain_entry *entry,
1438 struct pt_regs *regs)
David S. Miller4f6dbe42010-01-19 00:26:13 -08001439{
1440 unsigned long ksp, fp;
David S. Miller667f0ce2010-04-21 03:08:11 -07001441#ifdef CONFIG_FUNCTION_GRAPH_TRACER
1442 int graph = 0;
1443#endif
David S. Miller4f6dbe42010-01-19 00:26:13 -08001444
Frederic Weisbecker56962b42010-06-30 23:03:51 +02001445 stack_trace_flush();
1446
Frederic Weisbecker70791ce2010-06-29 19:34:05 +02001447 perf_callchain_store(entry, regs->tpc);
David S. Miller4f6dbe42010-01-19 00:26:13 -08001448
1449 ksp = regs->u_regs[UREG_I6];
1450 fp = ksp + STACK_BIAS;
1451 do {
1452 struct sparc_stackf *sf;
1453 struct pt_regs *regs;
1454 unsigned long pc;
1455
1456 if (!kstack_valid(current_thread_info(), fp))
1457 break;
1458
1459 sf = (struct sparc_stackf *) fp;
1460 regs = (struct pt_regs *) (sf + 1);
1461
1462 if (kstack_is_trap_frame(current_thread_info(), regs)) {
1463 if (user_mode(regs))
1464 break;
1465 pc = regs->tpc;
1466 fp = regs->u_regs[UREG_I6] + STACK_BIAS;
1467 } else {
1468 pc = sf->callers_pc;
1469 fp = (unsigned long)sf->fp + STACK_BIAS;
1470 }
Frederic Weisbecker70791ce2010-06-29 19:34:05 +02001471 perf_callchain_store(entry, pc);
David S. Miller667f0ce2010-04-21 03:08:11 -07001472#ifdef CONFIG_FUNCTION_GRAPH_TRACER
1473 if ((pc + 8UL) == (unsigned long) &return_to_handler) {
1474 int index = current->curr_ret_stack;
1475 if (current->ret_stack && index >= graph) {
1476 pc = current->ret_stack[index - graph].ret;
Frederic Weisbecker70791ce2010-06-29 19:34:05 +02001477 perf_callchain_store(entry, pc);
David S. Miller667f0ce2010-04-21 03:08:11 -07001478 graph++;
1479 }
1480 }
1481#endif
David S. Miller4f6dbe42010-01-19 00:26:13 -08001482 } while (entry->nr < PERF_MAX_STACK_DEPTH);
1483}
1484
Frederic Weisbecker56962b42010-06-30 23:03:51 +02001485static void perf_callchain_user_64(struct perf_callchain_entry *entry,
1486 struct pt_regs *regs)
David S. Miller4f6dbe42010-01-19 00:26:13 -08001487{
1488 unsigned long ufp;
1489
Frederic Weisbecker70791ce2010-06-29 19:34:05 +02001490 perf_callchain_store(entry, regs->tpc);
David S. Miller4f6dbe42010-01-19 00:26:13 -08001491
1492 ufp = regs->u_regs[UREG_I6] + STACK_BIAS;
1493 do {
1494 struct sparc_stackf *usf, sf;
1495 unsigned long pc;
1496
1497 usf = (struct sparc_stackf *) ufp;
1498 if (__copy_from_user_inatomic(&sf, usf, sizeof(sf)))
1499 break;
1500
1501 pc = sf.callers_pc;
1502 ufp = (unsigned long)sf.fp + STACK_BIAS;
Frederic Weisbecker70791ce2010-06-29 19:34:05 +02001503 perf_callchain_store(entry, pc);
David S. Miller4f6dbe42010-01-19 00:26:13 -08001504 } while (entry->nr < PERF_MAX_STACK_DEPTH);
1505}
1506
Frederic Weisbecker56962b42010-06-30 23:03:51 +02001507static void perf_callchain_user_32(struct perf_callchain_entry *entry,
1508 struct pt_regs *regs)
David S. Miller4f6dbe42010-01-19 00:26:13 -08001509{
1510 unsigned long ufp;
1511
Frederic Weisbecker70791ce2010-06-29 19:34:05 +02001512 perf_callchain_store(entry, regs->tpc);
David S. Miller4f6dbe42010-01-19 00:26:13 -08001513
David S. Miller9e8307e2010-03-29 13:08:52 -07001514 ufp = regs->u_regs[UREG_I6] & 0xffffffffUL;
David S. Miller4f6dbe42010-01-19 00:26:13 -08001515 do {
1516 struct sparc_stackf32 *usf, sf;
1517 unsigned long pc;
1518
1519 usf = (struct sparc_stackf32 *) ufp;
1520 if (__copy_from_user_inatomic(&sf, usf, sizeof(sf)))
1521 break;
1522
1523 pc = sf.callers_pc;
1524 ufp = (unsigned long)sf.fp;
Frederic Weisbecker70791ce2010-06-29 19:34:05 +02001525 perf_callchain_store(entry, pc);
David S. Miller4f6dbe42010-01-19 00:26:13 -08001526 } while (entry->nr < PERF_MAX_STACK_DEPTH);
1527}
1528
Frederic Weisbecker56962b42010-06-30 23:03:51 +02001529void
1530perf_callchain_user(struct perf_callchain_entry *entry, struct pt_regs *regs)
David S. Miller4f6dbe42010-01-19 00:26:13 -08001531{
Frederic Weisbecker56962b42010-06-30 23:03:51 +02001532 flushw_user();
1533 if (test_thread_flag(TIF_32BIT))
1534 perf_callchain_user_32(entry, regs);
1535 else
1536 perf_callchain_user_64(entry, regs);
David S. Miller4f6dbe42010-01-19 00:26:13 -08001537}