blob: 18853705282b42a7ae38eb811e472b3fb38857a3 [file] [log] [blame]
Ingo Molnarcdd6c482009-09-21 12:02:48 +02001/* Performance event support for sparc64.
David S. Miller59abbd12009-09-10 06:28:20 -07002 *
David S. Miller4f6dbe42010-01-19 00:26:13 -08003 * Copyright (C) 2009, 2010 David S. Miller <davem@davemloft.net>
David S. Miller59abbd12009-09-10 06:28:20 -07004 *
Ingo Molnarcdd6c482009-09-21 12:02:48 +02005 * This code is based almost entirely upon the x86 perf event
David S. Miller59abbd12009-09-10 06:28:20 -07006 * code, which is:
7 *
8 * Copyright (C) 2008 Thomas Gleixner <tglx@linutronix.de>
9 * Copyright (C) 2008-2009 Red Hat, Inc., Ingo Molnar
10 * Copyright (C) 2009 Jaswinder Singh Rajput
11 * Copyright (C) 2009 Advanced Micro Devices, Inc., Robert Richter
12 * Copyright (C) 2008-2009 Red Hat, Inc., Peter Zijlstra <pzijlstr@redhat.com>
13 */
14
Ingo Molnarcdd6c482009-09-21 12:02:48 +020015#include <linux/perf_event.h>
David S. Miller59abbd12009-09-10 06:28:20 -070016#include <linux/kprobes.h>
David S. Miller667f0ce2010-04-21 03:08:11 -070017#include <linux/ftrace.h>
David S. Miller59abbd12009-09-10 06:28:20 -070018#include <linux/kernel.h>
19#include <linux/kdebug.h>
20#include <linux/mutex.h>
21
David S. Miller4f6dbe42010-01-19 00:26:13 -080022#include <asm/stacktrace.h>
David S. Miller59abbd12009-09-10 06:28:20 -070023#include <asm/cpudata.h>
David S. Miller4f6dbe42010-01-19 00:26:13 -080024#include <asm/uaccess.h>
Arun Sharma600634972011-07-26 16:09:06 -070025#include <linux/atomic.h>
David S. Miller59abbd12009-09-10 06:28:20 -070026#include <asm/nmi.h>
27#include <asm/pcr.h>
David Howellsd550bbd2012-03-28 18:30:03 +010028#include <asm/cacheflush.h>
David S. Miller59abbd12009-09-10 06:28:20 -070029
Sam Ravnborgcb1b8202011-04-21 15:45:45 -070030#include "kernel.h"
David S. Miller4f6dbe42010-01-19 00:26:13 -080031#include "kstack.h"
32
David S. Miller59abbd12009-09-10 06:28:20 -070033/* Sparc64 chips have two performance counters, 32-bits each, with
34 * overflow interrupts generated on transition from 0xffffffff to 0.
35 * The counters are accessed in one go using a 64-bit register.
36 *
37 * Both counters are controlled using a single control register. The
38 * only way to stop all sampling is to clear all of the context (user,
39 * supervisor, hypervisor) sampling enable bits. But these bits apply
40 * to both counters, thus the two counters can't be enabled/disabled
41 * individually.
42 *
43 * The control register has two event fields, one for each of the two
44 * counters. It's thus nearly impossible to have one counter going
45 * while keeping the other one stopped. Therefore it is possible to
46 * get overflow interrupts for counters not currently "in use" and
47 * that condition must be checked in the overflow interrupt handler.
48 *
49 * So we use a hack, in that we program inactive counters with the
50 * "sw_count0" and "sw_count1" events. These count how many times
51 * the instruction "sethi %hi(0xfc000), %g0" is executed. It's an
52 * unusual way to encode a NOP and therefore will not trigger in
53 * normal code.
54 */
55
David S. Miller035ea282012-08-17 23:06:09 -070056#define MAX_HWEVENTS 4
57#define MAX_PCRS 4
David S. Miller59abbd12009-09-10 06:28:20 -070058#define MAX_PERIOD ((1UL << 32) - 1)
59
60#define PIC_UPPER_INDEX 0
61#define PIC_LOWER_INDEX 1
David S. Millere7bef6b2010-01-20 02:59:47 -080062#define PIC_NO_INDEX -1
David S. Miller59abbd12009-09-10 06:28:20 -070063
Ingo Molnarcdd6c482009-09-21 12:02:48 +020064struct cpu_hw_events {
David S. Millere7bef6b2010-01-20 02:59:47 -080065 /* Number of events currently scheduled onto this cpu.
66 * This tells how many entries in the arrays below
67 * are valid.
68 */
69 int n_events;
70
71 /* Number of new events added since the last hw_perf_disable().
72 * This works because the perf event layer always adds new
73 * events inside of a perf_{disable,enable}() sequence.
74 */
75 int n_added;
76
77 /* Array of events current scheduled on this cpu. */
78 struct perf_event *event[MAX_HWEVENTS];
79
80 /* Array of encoded longs, specifying the %pcr register
81 * encoding and the mask of PIC counters this even can
82 * be scheduled on. See perf_event_encode() et al.
83 */
84 unsigned long events[MAX_HWEVENTS];
85
86 /* The current counter index assigned to an event. When the
87 * event hasn't been programmed into the cpu yet, this will
88 * hold PIC_NO_INDEX. The event->hw.idx value tells us where
89 * we ought to schedule the event.
90 */
91 int current_idx[MAX_HWEVENTS];
92
David S. Miller3f1a2092012-08-17 02:51:21 -070093 /* Software copy of %pcr register(s) on this cpu. */
94 u64 pcr[MAX_HWEVENTS];
David S. Millere7bef6b2010-01-20 02:59:47 -080095
96 /* Enabled/disable state. */
David S. Millerd1751382009-09-29 21:27:06 -070097 int enabled;
Lin Minga13c3af2010-04-23 13:56:33 +080098
99 unsigned int group_flag;
David S. Miller59abbd12009-09-10 06:28:20 -0700100};
Ingo Molnarcdd6c482009-09-21 12:02:48 +0200101DEFINE_PER_CPU(struct cpu_hw_events, cpu_hw_events) = { .enabled = 1, };
David S. Miller59abbd12009-09-10 06:28:20 -0700102
David S. Millere7bef6b2010-01-20 02:59:47 -0800103/* An event map describes the characteristics of a performance
104 * counter event. In particular it gives the encoding as well as
105 * a mask telling which counters the event can be measured on.
106 */
David S. Miller59abbd12009-09-10 06:28:20 -0700107struct perf_event_map {
108 u16 encoding;
109 u8 pic_mask;
110#define PIC_NONE 0x00
111#define PIC_UPPER 0x01
112#define PIC_LOWER 0x02
113};
114
David S. Millere7bef6b2010-01-20 02:59:47 -0800115/* Encode a perf_event_map entry into a long. */
David S. Millera72a8a52009-09-28 17:35:20 -0700116static unsigned long perf_event_encode(const struct perf_event_map *pmap)
117{
118 return ((unsigned long) pmap->encoding << 16) | pmap->pic_mask;
119}
120
David S. Millere7bef6b2010-01-20 02:59:47 -0800121static u8 perf_event_get_msk(unsigned long val)
David S. Millera72a8a52009-09-28 17:35:20 -0700122{
David S. Millere7bef6b2010-01-20 02:59:47 -0800123 return val & 0xff;
124}
125
126static u64 perf_event_get_enc(unsigned long val)
127{
128 return val >> 16;
David S. Millera72a8a52009-09-28 17:35:20 -0700129}
130
David S. Miller2ce4da22009-09-26 20:42:10 -0700131#define C(x) PERF_COUNT_HW_CACHE_##x
132
133#define CACHE_OP_UNSUPPORTED 0xfffe
134#define CACHE_OP_NONSENSE 0xffff
135
136typedef struct perf_event_map cache_map_t
137 [PERF_COUNT_HW_CACHE_MAX]
138 [PERF_COUNT_HW_CACHE_OP_MAX]
139 [PERF_COUNT_HW_CACHE_RESULT_MAX];
140
David S. Miller59abbd12009-09-10 06:28:20 -0700141struct sparc_pmu {
142 const struct perf_event_map *(*event_map)(int);
David S. Miller2ce4da22009-09-26 20:42:10 -0700143 const cache_map_t *cache_map;
David S. Miller59abbd12009-09-10 06:28:20 -0700144 int max_events;
David S. Miller53443032012-08-17 02:37:06 -0700145 u32 (*read_pmc)(int);
146 void (*write_pmc)(int, u64);
David S. Miller59abbd12009-09-10 06:28:20 -0700147 int upper_shift;
148 int lower_shift;
149 int event_mask;
David S. Miller7ac2ed22012-08-17 02:41:32 -0700150 int user_bit;
151 int priv_bit;
David S. Miller91b92862009-09-10 07:09:06 -0700152 int hv_bit;
David S. Miller496c07e2009-09-10 07:10:59 -0700153 int irq_bit;
David S. Miller660d1372009-09-10 07:13:26 -0700154 int upper_nop;
155 int lower_nop;
David S. Millerb38e99f2012-08-17 02:31:10 -0700156 unsigned int flags;
157#define SPARC_PMU_ALL_EXCLUDES_SAME 0x00000001
158#define SPARC_PMU_HAS_CONFLICTS 0x00000002
David S. Miller59660492012-08-17 02:33:44 -0700159 int max_hw_events;
David S. Miller3f1a2092012-08-17 02:51:21 -0700160 int num_pcrs;
161 int num_pic_regs;
David S. Miller59abbd12009-09-10 06:28:20 -0700162};
163
David S. Miller53443032012-08-17 02:37:06 -0700164static u32 sparc_default_read_pmc(int idx)
165{
166 u64 val;
167
168 val = pcr_ops->read_pic(0);
169 if (idx == PIC_UPPER_INDEX)
170 val >>= 32;
171
172 return val & 0xffffffff;
173}
174
175static void sparc_default_write_pmc(int idx, u64 val)
176{
177 u64 shift, mask, pic;
178
179 shift = 0;
180 if (idx == PIC_UPPER_INDEX)
181 shift = 32;
182
183 mask = ((u64) 0xffffffff) << shift;
184 val <<= shift;
185
186 pic = pcr_ops->read_pic(0);
187 pic &= ~mask;
188 pic |= val;
189 pcr_ops->write_pic(0, pic);
190}
191
David S. Miller28e8f9b2009-09-26 20:54:22 -0700192static const struct perf_event_map ultra3_perfmon_event_map[] = {
David S. Miller59abbd12009-09-10 06:28:20 -0700193 [PERF_COUNT_HW_CPU_CYCLES] = { 0x0000, PIC_UPPER | PIC_LOWER },
194 [PERF_COUNT_HW_INSTRUCTIONS] = { 0x0001, PIC_UPPER | PIC_LOWER },
195 [PERF_COUNT_HW_CACHE_REFERENCES] = { 0x0009, PIC_LOWER },
196 [PERF_COUNT_HW_CACHE_MISSES] = { 0x0009, PIC_UPPER },
197};
198
David S. Miller28e8f9b2009-09-26 20:54:22 -0700199static const struct perf_event_map *ultra3_event_map(int event_id)
David S. Miller59abbd12009-09-10 06:28:20 -0700200{
David S. Miller28e8f9b2009-09-26 20:54:22 -0700201 return &ultra3_perfmon_event_map[event_id];
David S. Miller59abbd12009-09-10 06:28:20 -0700202}
203
David S. Miller28e8f9b2009-09-26 20:54:22 -0700204static const cache_map_t ultra3_cache_map = {
David S. Miller2ce4da22009-09-26 20:42:10 -0700205[C(L1D)] = {
206 [C(OP_READ)] = {
207 [C(RESULT_ACCESS)] = { 0x09, PIC_LOWER, },
208 [C(RESULT_MISS)] = { 0x09, PIC_UPPER, },
209 },
210 [C(OP_WRITE)] = {
211 [C(RESULT_ACCESS)] = { 0x0a, PIC_LOWER },
212 [C(RESULT_MISS)] = { 0x0a, PIC_UPPER },
213 },
214 [C(OP_PREFETCH)] = {
215 [C(RESULT_ACCESS)] = { CACHE_OP_UNSUPPORTED },
216 [C(RESULT_MISS)] = { CACHE_OP_UNSUPPORTED },
217 },
218},
219[C(L1I)] = {
220 [C(OP_READ)] = {
221 [C(RESULT_ACCESS)] = { 0x09, PIC_LOWER, },
222 [C(RESULT_MISS)] = { 0x09, PIC_UPPER, },
223 },
224 [ C(OP_WRITE) ] = {
225 [ C(RESULT_ACCESS) ] = { CACHE_OP_NONSENSE },
226 [ C(RESULT_MISS) ] = { CACHE_OP_NONSENSE },
227 },
228 [ C(OP_PREFETCH) ] = {
229 [ C(RESULT_ACCESS) ] = { CACHE_OP_UNSUPPORTED },
230 [ C(RESULT_MISS) ] = { CACHE_OP_UNSUPPORTED },
231 },
232},
233[C(LL)] = {
234 [C(OP_READ)] = {
235 [C(RESULT_ACCESS)] = { 0x0c, PIC_LOWER, },
236 [C(RESULT_MISS)] = { 0x0c, PIC_UPPER, },
237 },
238 [C(OP_WRITE)] = {
239 [C(RESULT_ACCESS)] = { 0x0c, PIC_LOWER },
240 [C(RESULT_MISS)] = { 0x0c, PIC_UPPER },
241 },
242 [C(OP_PREFETCH)] = {
243 [C(RESULT_ACCESS)] = { CACHE_OP_UNSUPPORTED },
244 [C(RESULT_MISS)] = { CACHE_OP_UNSUPPORTED },
245 },
246},
247[C(DTLB)] = {
248 [C(OP_READ)] = {
249 [C(RESULT_ACCESS)] = { CACHE_OP_UNSUPPORTED },
250 [C(RESULT_MISS)] = { 0x12, PIC_UPPER, },
251 },
252 [ C(OP_WRITE) ] = {
253 [ C(RESULT_ACCESS) ] = { CACHE_OP_UNSUPPORTED },
254 [ C(RESULT_MISS) ] = { CACHE_OP_UNSUPPORTED },
255 },
256 [ C(OP_PREFETCH) ] = {
257 [ C(RESULT_ACCESS) ] = { CACHE_OP_UNSUPPORTED },
258 [ C(RESULT_MISS) ] = { CACHE_OP_UNSUPPORTED },
259 },
260},
261[C(ITLB)] = {
262 [C(OP_READ)] = {
263 [C(RESULT_ACCESS)] = { CACHE_OP_UNSUPPORTED },
264 [C(RESULT_MISS)] = { 0x11, PIC_UPPER, },
265 },
266 [ C(OP_WRITE) ] = {
267 [ C(RESULT_ACCESS) ] = { CACHE_OP_UNSUPPORTED },
268 [ C(RESULT_MISS) ] = { CACHE_OP_UNSUPPORTED },
269 },
270 [ C(OP_PREFETCH) ] = {
271 [ C(RESULT_ACCESS) ] = { CACHE_OP_UNSUPPORTED },
272 [ C(RESULT_MISS) ] = { CACHE_OP_UNSUPPORTED },
273 },
274},
275[C(BPU)] = {
276 [C(OP_READ)] = {
277 [C(RESULT_ACCESS)] = { CACHE_OP_UNSUPPORTED },
278 [C(RESULT_MISS)] = { CACHE_OP_UNSUPPORTED },
279 },
280 [ C(OP_WRITE) ] = {
281 [ C(RESULT_ACCESS) ] = { CACHE_OP_UNSUPPORTED },
282 [ C(RESULT_MISS) ] = { CACHE_OP_UNSUPPORTED },
283 },
284 [ C(OP_PREFETCH) ] = {
285 [ C(RESULT_ACCESS) ] = { CACHE_OP_UNSUPPORTED },
286 [ C(RESULT_MISS) ] = { CACHE_OP_UNSUPPORTED },
287 },
288},
Peter Zijlstra89d6c0b2011-04-22 23:37:06 +0200289[C(NODE)] = {
290 [C(OP_READ)] = {
291 [C(RESULT_ACCESS)] = { CACHE_OP_UNSUPPORTED },
292 [C(RESULT_MISS) ] = { CACHE_OP_UNSUPPORTED },
293 },
294 [ C(OP_WRITE) ] = {
295 [ C(RESULT_ACCESS) ] = { CACHE_OP_UNSUPPORTED },
296 [ C(RESULT_MISS) ] = { CACHE_OP_UNSUPPORTED },
297 },
298 [ C(OP_PREFETCH) ] = {
299 [ C(RESULT_ACCESS) ] = { CACHE_OP_UNSUPPORTED },
300 [ C(RESULT_MISS) ] = { CACHE_OP_UNSUPPORTED },
301 },
302},
David S. Miller2ce4da22009-09-26 20:42:10 -0700303};
304
David S. Miller28e8f9b2009-09-26 20:54:22 -0700305static const struct sparc_pmu ultra3_pmu = {
306 .event_map = ultra3_event_map,
307 .cache_map = &ultra3_cache_map,
308 .max_events = ARRAY_SIZE(ultra3_perfmon_event_map),
David S. Miller53443032012-08-17 02:37:06 -0700309 .read_pmc = sparc_default_read_pmc,
310 .write_pmc = sparc_default_write_pmc,
David S. Miller59abbd12009-09-10 06:28:20 -0700311 .upper_shift = 11,
312 .lower_shift = 4,
313 .event_mask = 0x3f,
David S. Miller7ac2ed22012-08-17 02:41:32 -0700314 .user_bit = PCR_UTRACE,
315 .priv_bit = PCR_STRACE,
David S. Miller660d1372009-09-10 07:13:26 -0700316 .upper_nop = 0x1c,
317 .lower_nop = 0x14,
David S. Millerb38e99f2012-08-17 02:31:10 -0700318 .flags = (SPARC_PMU_ALL_EXCLUDES_SAME |
319 SPARC_PMU_HAS_CONFLICTS),
David S. Miller59660492012-08-17 02:33:44 -0700320 .max_hw_events = 2,
David S. Miller3f1a2092012-08-17 02:51:21 -0700321 .num_pcrs = 1,
322 .num_pic_regs = 1,
David S. Miller59abbd12009-09-10 06:28:20 -0700323};
324
David S. Miller7eebda62009-09-26 21:23:41 -0700325/* Niagara1 is very limited. The upper PIC is hard-locked to count
326 * only instructions, so it is free running which creates all kinds of
David S. Miller6e804252009-09-29 15:10:23 -0700327 * problems. Some hardware designs make one wonder if the creator
David S. Miller7eebda62009-09-26 21:23:41 -0700328 * even looked at how this stuff gets used by software.
329 */
330static const struct perf_event_map niagara1_perfmon_event_map[] = {
331 [PERF_COUNT_HW_CPU_CYCLES] = { 0x00, PIC_UPPER },
332 [PERF_COUNT_HW_INSTRUCTIONS] = { 0x00, PIC_UPPER },
333 [PERF_COUNT_HW_CACHE_REFERENCES] = { 0, PIC_NONE },
334 [PERF_COUNT_HW_CACHE_MISSES] = { 0x03, PIC_LOWER },
335};
336
337static const struct perf_event_map *niagara1_event_map(int event_id)
338{
339 return &niagara1_perfmon_event_map[event_id];
340}
341
342static const cache_map_t niagara1_cache_map = {
343[C(L1D)] = {
344 [C(OP_READ)] = {
345 [C(RESULT_ACCESS)] = { CACHE_OP_UNSUPPORTED },
346 [C(RESULT_MISS)] = { 0x03, PIC_LOWER, },
347 },
348 [C(OP_WRITE)] = {
349 [C(RESULT_ACCESS)] = { CACHE_OP_UNSUPPORTED },
350 [C(RESULT_MISS)] = { 0x03, PIC_LOWER, },
351 },
352 [C(OP_PREFETCH)] = {
353 [C(RESULT_ACCESS)] = { CACHE_OP_UNSUPPORTED },
354 [C(RESULT_MISS)] = { CACHE_OP_UNSUPPORTED },
355 },
356},
357[C(L1I)] = {
358 [C(OP_READ)] = {
359 [C(RESULT_ACCESS)] = { 0x00, PIC_UPPER },
360 [C(RESULT_MISS)] = { 0x02, PIC_LOWER, },
361 },
362 [ C(OP_WRITE) ] = {
363 [ C(RESULT_ACCESS) ] = { CACHE_OP_NONSENSE },
364 [ C(RESULT_MISS) ] = { CACHE_OP_NONSENSE },
365 },
366 [ C(OP_PREFETCH) ] = {
367 [ C(RESULT_ACCESS) ] = { CACHE_OP_UNSUPPORTED },
368 [ C(RESULT_MISS) ] = { CACHE_OP_UNSUPPORTED },
369 },
370},
371[C(LL)] = {
372 [C(OP_READ)] = {
373 [C(RESULT_ACCESS)] = { CACHE_OP_UNSUPPORTED },
374 [C(RESULT_MISS)] = { 0x07, PIC_LOWER, },
375 },
376 [C(OP_WRITE)] = {
377 [C(RESULT_ACCESS)] = { CACHE_OP_UNSUPPORTED },
378 [C(RESULT_MISS)] = { 0x07, PIC_LOWER, },
379 },
380 [C(OP_PREFETCH)] = {
381 [C(RESULT_ACCESS)] = { CACHE_OP_UNSUPPORTED },
382 [C(RESULT_MISS)] = { CACHE_OP_UNSUPPORTED },
383 },
384},
385[C(DTLB)] = {
386 [C(OP_READ)] = {
387 [C(RESULT_ACCESS)] = { CACHE_OP_UNSUPPORTED },
388 [C(RESULT_MISS)] = { 0x05, PIC_LOWER, },
389 },
390 [ C(OP_WRITE) ] = {
391 [ C(RESULT_ACCESS) ] = { CACHE_OP_UNSUPPORTED },
392 [ C(RESULT_MISS) ] = { CACHE_OP_UNSUPPORTED },
393 },
394 [ C(OP_PREFETCH) ] = {
395 [ C(RESULT_ACCESS) ] = { CACHE_OP_UNSUPPORTED },
396 [ C(RESULT_MISS) ] = { CACHE_OP_UNSUPPORTED },
397 },
398},
399[C(ITLB)] = {
400 [C(OP_READ)] = {
401 [C(RESULT_ACCESS)] = { CACHE_OP_UNSUPPORTED },
402 [C(RESULT_MISS)] = { 0x04, PIC_LOWER, },
403 },
404 [ C(OP_WRITE) ] = {
405 [ C(RESULT_ACCESS) ] = { CACHE_OP_UNSUPPORTED },
406 [ C(RESULT_MISS) ] = { CACHE_OP_UNSUPPORTED },
407 },
408 [ C(OP_PREFETCH) ] = {
409 [ C(RESULT_ACCESS) ] = { CACHE_OP_UNSUPPORTED },
410 [ C(RESULT_MISS) ] = { CACHE_OP_UNSUPPORTED },
411 },
412},
413[C(BPU)] = {
414 [C(OP_READ)] = {
415 [C(RESULT_ACCESS)] = { CACHE_OP_UNSUPPORTED },
416 [C(RESULT_MISS)] = { CACHE_OP_UNSUPPORTED },
417 },
418 [ C(OP_WRITE) ] = {
419 [ C(RESULT_ACCESS) ] = { CACHE_OP_UNSUPPORTED },
420 [ C(RESULT_MISS) ] = { CACHE_OP_UNSUPPORTED },
421 },
422 [ C(OP_PREFETCH) ] = {
423 [ C(RESULT_ACCESS) ] = { CACHE_OP_UNSUPPORTED },
424 [ C(RESULT_MISS) ] = { CACHE_OP_UNSUPPORTED },
425 },
426},
Peter Zijlstra89d6c0b2011-04-22 23:37:06 +0200427[C(NODE)] = {
428 [C(OP_READ)] = {
429 [C(RESULT_ACCESS)] = { CACHE_OP_UNSUPPORTED },
430 [C(RESULT_MISS) ] = { CACHE_OP_UNSUPPORTED },
431 },
432 [ C(OP_WRITE) ] = {
433 [ C(RESULT_ACCESS) ] = { CACHE_OP_UNSUPPORTED },
434 [ C(RESULT_MISS) ] = { CACHE_OP_UNSUPPORTED },
435 },
436 [ C(OP_PREFETCH) ] = {
437 [ C(RESULT_ACCESS) ] = { CACHE_OP_UNSUPPORTED },
438 [ C(RESULT_MISS) ] = { CACHE_OP_UNSUPPORTED },
439 },
440},
David S. Miller7eebda62009-09-26 21:23:41 -0700441};
442
443static const struct sparc_pmu niagara1_pmu = {
444 .event_map = niagara1_event_map,
445 .cache_map = &niagara1_cache_map,
446 .max_events = ARRAY_SIZE(niagara1_perfmon_event_map),
David S. Miller53443032012-08-17 02:37:06 -0700447 .read_pmc = sparc_default_read_pmc,
448 .write_pmc = sparc_default_write_pmc,
David S. Miller7eebda62009-09-26 21:23:41 -0700449 .upper_shift = 0,
450 .lower_shift = 4,
451 .event_mask = 0x7,
David S. Miller7ac2ed22012-08-17 02:41:32 -0700452 .user_bit = PCR_UTRACE,
453 .priv_bit = PCR_STRACE,
David S. Miller7eebda62009-09-26 21:23:41 -0700454 .upper_nop = 0x0,
455 .lower_nop = 0x0,
David S. Millerb38e99f2012-08-17 02:31:10 -0700456 .flags = (SPARC_PMU_ALL_EXCLUDES_SAME |
457 SPARC_PMU_HAS_CONFLICTS),
David S. Miller59660492012-08-17 02:33:44 -0700458 .max_hw_events = 2,
David S. Miller3f1a2092012-08-17 02:51:21 -0700459 .num_pcrs = 1,
460 .num_pic_regs = 1,
David S. Miller7eebda62009-09-26 21:23:41 -0700461};
462
David S. Millerb73d8842009-09-10 07:22:18 -0700463static const struct perf_event_map niagara2_perfmon_event_map[] = {
464 [PERF_COUNT_HW_CPU_CYCLES] = { 0x02ff, PIC_UPPER | PIC_LOWER },
465 [PERF_COUNT_HW_INSTRUCTIONS] = { 0x02ff, PIC_UPPER | PIC_LOWER },
466 [PERF_COUNT_HW_CACHE_REFERENCES] = { 0x0208, PIC_UPPER | PIC_LOWER },
467 [PERF_COUNT_HW_CACHE_MISSES] = { 0x0302, PIC_UPPER | PIC_LOWER },
468 [PERF_COUNT_HW_BRANCH_INSTRUCTIONS] = { 0x0201, PIC_UPPER | PIC_LOWER },
469 [PERF_COUNT_HW_BRANCH_MISSES] = { 0x0202, PIC_UPPER | PIC_LOWER },
470};
471
Ingo Molnarcdd6c482009-09-21 12:02:48 +0200472static const struct perf_event_map *niagara2_event_map(int event_id)
David S. Millerb73d8842009-09-10 07:22:18 -0700473{
Ingo Molnarcdd6c482009-09-21 12:02:48 +0200474 return &niagara2_perfmon_event_map[event_id];
David S. Millerb73d8842009-09-10 07:22:18 -0700475}
476
David S. Millerd0b86482009-09-26 21:04:16 -0700477static const cache_map_t niagara2_cache_map = {
478[C(L1D)] = {
479 [C(OP_READ)] = {
480 [C(RESULT_ACCESS)] = { 0x0208, PIC_UPPER | PIC_LOWER, },
481 [C(RESULT_MISS)] = { 0x0302, PIC_UPPER | PIC_LOWER, },
482 },
483 [C(OP_WRITE)] = {
484 [C(RESULT_ACCESS)] = { 0x0210, PIC_UPPER | PIC_LOWER, },
485 [C(RESULT_MISS)] = { 0x0302, PIC_UPPER | PIC_LOWER, },
486 },
487 [C(OP_PREFETCH)] = {
488 [C(RESULT_ACCESS)] = { CACHE_OP_UNSUPPORTED },
489 [C(RESULT_MISS)] = { CACHE_OP_UNSUPPORTED },
490 },
491},
492[C(L1I)] = {
493 [C(OP_READ)] = {
494 [C(RESULT_ACCESS)] = { 0x02ff, PIC_UPPER | PIC_LOWER, },
495 [C(RESULT_MISS)] = { 0x0301, PIC_UPPER | PIC_LOWER, },
496 },
497 [ C(OP_WRITE) ] = {
498 [ C(RESULT_ACCESS) ] = { CACHE_OP_NONSENSE },
499 [ C(RESULT_MISS) ] = { CACHE_OP_NONSENSE },
500 },
501 [ C(OP_PREFETCH) ] = {
502 [ C(RESULT_ACCESS) ] = { CACHE_OP_UNSUPPORTED },
503 [ C(RESULT_MISS) ] = { CACHE_OP_UNSUPPORTED },
504 },
505},
506[C(LL)] = {
507 [C(OP_READ)] = {
508 [C(RESULT_ACCESS)] = { 0x0208, PIC_UPPER | PIC_LOWER, },
509 [C(RESULT_MISS)] = { 0x0330, PIC_UPPER | PIC_LOWER, },
510 },
511 [C(OP_WRITE)] = {
512 [C(RESULT_ACCESS)] = { 0x0210, PIC_UPPER | PIC_LOWER, },
513 [C(RESULT_MISS)] = { 0x0320, PIC_UPPER | PIC_LOWER, },
514 },
515 [C(OP_PREFETCH)] = {
516 [C(RESULT_ACCESS)] = { CACHE_OP_UNSUPPORTED },
517 [C(RESULT_MISS)] = { CACHE_OP_UNSUPPORTED },
518 },
519},
520[C(DTLB)] = {
521 [C(OP_READ)] = {
522 [C(RESULT_ACCESS)] = { CACHE_OP_UNSUPPORTED },
523 [C(RESULT_MISS)] = { 0x0b08, PIC_UPPER | PIC_LOWER, },
524 },
525 [ C(OP_WRITE) ] = {
526 [ C(RESULT_ACCESS) ] = { CACHE_OP_UNSUPPORTED },
527 [ C(RESULT_MISS) ] = { CACHE_OP_UNSUPPORTED },
528 },
529 [ C(OP_PREFETCH) ] = {
530 [ C(RESULT_ACCESS) ] = { CACHE_OP_UNSUPPORTED },
531 [ C(RESULT_MISS) ] = { CACHE_OP_UNSUPPORTED },
532 },
533},
534[C(ITLB)] = {
535 [C(OP_READ)] = {
536 [C(RESULT_ACCESS)] = { CACHE_OP_UNSUPPORTED },
537 [C(RESULT_MISS)] = { 0xb04, PIC_UPPER | PIC_LOWER, },
538 },
539 [ C(OP_WRITE) ] = {
540 [ C(RESULT_ACCESS) ] = { CACHE_OP_UNSUPPORTED },
541 [ C(RESULT_MISS) ] = { CACHE_OP_UNSUPPORTED },
542 },
543 [ C(OP_PREFETCH) ] = {
544 [ C(RESULT_ACCESS) ] = { CACHE_OP_UNSUPPORTED },
545 [ C(RESULT_MISS) ] = { CACHE_OP_UNSUPPORTED },
546 },
547},
548[C(BPU)] = {
549 [C(OP_READ)] = {
550 [C(RESULT_ACCESS)] = { CACHE_OP_UNSUPPORTED },
551 [C(RESULT_MISS)] = { CACHE_OP_UNSUPPORTED },
552 },
553 [ C(OP_WRITE) ] = {
554 [ C(RESULT_ACCESS) ] = { CACHE_OP_UNSUPPORTED },
555 [ C(RESULT_MISS) ] = { CACHE_OP_UNSUPPORTED },
556 },
557 [ C(OP_PREFETCH) ] = {
558 [ C(RESULT_ACCESS) ] = { CACHE_OP_UNSUPPORTED },
559 [ C(RESULT_MISS) ] = { CACHE_OP_UNSUPPORTED },
560 },
561},
Peter Zijlstra89d6c0b2011-04-22 23:37:06 +0200562[C(NODE)] = {
563 [C(OP_READ)] = {
564 [C(RESULT_ACCESS)] = { CACHE_OP_UNSUPPORTED },
565 [C(RESULT_MISS) ] = { CACHE_OP_UNSUPPORTED },
566 },
567 [ C(OP_WRITE) ] = {
568 [ C(RESULT_ACCESS) ] = { CACHE_OP_UNSUPPORTED },
569 [ C(RESULT_MISS) ] = { CACHE_OP_UNSUPPORTED },
570 },
571 [ C(OP_PREFETCH) ] = {
572 [ C(RESULT_ACCESS) ] = { CACHE_OP_UNSUPPORTED },
573 [ C(RESULT_MISS) ] = { CACHE_OP_UNSUPPORTED },
574 },
575},
David S. Millerd0b86482009-09-26 21:04:16 -0700576};
577
David S. Millerb73d8842009-09-10 07:22:18 -0700578static const struct sparc_pmu niagara2_pmu = {
579 .event_map = niagara2_event_map,
David S. Millerd0b86482009-09-26 21:04:16 -0700580 .cache_map = &niagara2_cache_map,
David S. Millerb73d8842009-09-10 07:22:18 -0700581 .max_events = ARRAY_SIZE(niagara2_perfmon_event_map),
David S. Miller53443032012-08-17 02:37:06 -0700582 .read_pmc = sparc_default_read_pmc,
583 .write_pmc = sparc_default_write_pmc,
David S. Millerb73d8842009-09-10 07:22:18 -0700584 .upper_shift = 19,
585 .lower_shift = 6,
586 .event_mask = 0xfff,
David S. Miller7ac2ed22012-08-17 02:41:32 -0700587 .user_bit = PCR_UTRACE,
588 .priv_bit = PCR_STRACE,
589 .hv_bit = PCR_N2_HTRACE,
David S. Millerde23cf32009-10-09 00:42:40 -0700590 .irq_bit = 0x30,
David S. Millerb73d8842009-09-10 07:22:18 -0700591 .upper_nop = 0x220,
592 .lower_nop = 0x220,
David S. Millerb38e99f2012-08-17 02:31:10 -0700593 .flags = (SPARC_PMU_ALL_EXCLUDES_SAME |
594 SPARC_PMU_HAS_CONFLICTS),
David S. Miller59660492012-08-17 02:33:44 -0700595 .max_hw_events = 2,
David S. Miller3f1a2092012-08-17 02:51:21 -0700596 .num_pcrs = 1,
597 .num_pic_regs = 1,
David S. Millerb73d8842009-09-10 07:22:18 -0700598};
599
David S. Miller035ea282012-08-17 23:06:09 -0700600static const struct perf_event_map niagara4_perfmon_event_map[] = {
601 [PERF_COUNT_HW_CPU_CYCLES] = { (26 << 6) },
602 [PERF_COUNT_HW_INSTRUCTIONS] = { (3 << 6) | 0x3f },
603 [PERF_COUNT_HW_CACHE_REFERENCES] = { (3 << 6) | 0x04 },
604 [PERF_COUNT_HW_CACHE_MISSES] = { (16 << 6) | 0x07 },
605 [PERF_COUNT_HW_BRANCH_INSTRUCTIONS] = { (4 << 6) | 0x01 },
606 [PERF_COUNT_HW_BRANCH_MISSES] = { (25 << 6) | 0x0f },
607};
608
609static const struct perf_event_map *niagara4_event_map(int event_id)
610{
611 return &niagara4_perfmon_event_map[event_id];
612}
613
614static const cache_map_t niagara4_cache_map = {
615[C(L1D)] = {
616 [C(OP_READ)] = {
617 [C(RESULT_ACCESS)] = { (3 << 6) | 0x04 },
618 [C(RESULT_MISS)] = { (16 << 6) | 0x07 },
619 },
620 [C(OP_WRITE)] = {
621 [C(RESULT_ACCESS)] = { (3 << 6) | 0x08 },
622 [C(RESULT_MISS)] = { (16 << 6) | 0x07 },
623 },
624 [C(OP_PREFETCH)] = {
625 [C(RESULT_ACCESS)] = { CACHE_OP_UNSUPPORTED },
626 [C(RESULT_MISS)] = { CACHE_OP_UNSUPPORTED },
627 },
628},
629[C(L1I)] = {
630 [C(OP_READ)] = {
631 [C(RESULT_ACCESS)] = { (3 << 6) | 0x3f },
632 [C(RESULT_MISS)] = { (11 << 6) | 0x03 },
633 },
634 [ C(OP_WRITE) ] = {
635 [ C(RESULT_ACCESS) ] = { CACHE_OP_NONSENSE },
636 [ C(RESULT_MISS) ] = { CACHE_OP_NONSENSE },
637 },
638 [ C(OP_PREFETCH) ] = {
639 [ C(RESULT_ACCESS) ] = { CACHE_OP_UNSUPPORTED },
640 [ C(RESULT_MISS) ] = { CACHE_OP_UNSUPPORTED },
641 },
642},
643[C(LL)] = {
644 [C(OP_READ)] = {
645 [C(RESULT_ACCESS)] = { (3 << 6) | 0x04 },
646 [C(RESULT_MISS)] = { CACHE_OP_UNSUPPORTED },
647 },
648 [C(OP_WRITE)] = {
649 [C(RESULT_ACCESS)] = { (3 << 6) | 0x08 },
650 [C(RESULT_MISS)] = { CACHE_OP_UNSUPPORTED },
651 },
652 [C(OP_PREFETCH)] = {
653 [C(RESULT_ACCESS)] = { CACHE_OP_UNSUPPORTED },
654 [C(RESULT_MISS)] = { CACHE_OP_UNSUPPORTED },
655 },
656},
657[C(DTLB)] = {
658 [C(OP_READ)] = {
659 [C(RESULT_ACCESS)] = { CACHE_OP_UNSUPPORTED },
660 [C(RESULT_MISS)] = { (17 << 6) | 0x3f },
661 },
662 [ C(OP_WRITE) ] = {
663 [ C(RESULT_ACCESS) ] = { CACHE_OP_UNSUPPORTED },
664 [ C(RESULT_MISS) ] = { CACHE_OP_UNSUPPORTED },
665 },
666 [ C(OP_PREFETCH) ] = {
667 [ C(RESULT_ACCESS) ] = { CACHE_OP_UNSUPPORTED },
668 [ C(RESULT_MISS) ] = { CACHE_OP_UNSUPPORTED },
669 },
670},
671[C(ITLB)] = {
672 [C(OP_READ)] = {
673 [C(RESULT_ACCESS)] = { CACHE_OP_UNSUPPORTED },
674 [C(RESULT_MISS)] = { (6 << 6) | 0x3f },
675 },
676 [ C(OP_WRITE) ] = {
677 [ C(RESULT_ACCESS) ] = { CACHE_OP_UNSUPPORTED },
678 [ C(RESULT_MISS) ] = { CACHE_OP_UNSUPPORTED },
679 },
680 [ C(OP_PREFETCH) ] = {
681 [ C(RESULT_ACCESS) ] = { CACHE_OP_UNSUPPORTED },
682 [ C(RESULT_MISS) ] = { CACHE_OP_UNSUPPORTED },
683 },
684},
685[C(BPU)] = {
686 [C(OP_READ)] = {
687 [C(RESULT_ACCESS)] = { CACHE_OP_UNSUPPORTED },
688 [C(RESULT_MISS)] = { CACHE_OP_UNSUPPORTED },
689 },
690 [ C(OP_WRITE) ] = {
691 [ C(RESULT_ACCESS) ] = { CACHE_OP_UNSUPPORTED },
692 [ C(RESULT_MISS) ] = { CACHE_OP_UNSUPPORTED },
693 },
694 [ C(OP_PREFETCH) ] = {
695 [ C(RESULT_ACCESS) ] = { CACHE_OP_UNSUPPORTED },
696 [ C(RESULT_MISS) ] = { CACHE_OP_UNSUPPORTED },
697 },
698},
699[C(NODE)] = {
700 [C(OP_READ)] = {
701 [C(RESULT_ACCESS)] = { CACHE_OP_UNSUPPORTED },
702 [C(RESULT_MISS) ] = { CACHE_OP_UNSUPPORTED },
703 },
704 [ C(OP_WRITE) ] = {
705 [ C(RESULT_ACCESS) ] = { CACHE_OP_UNSUPPORTED },
706 [ C(RESULT_MISS) ] = { CACHE_OP_UNSUPPORTED },
707 },
708 [ C(OP_PREFETCH) ] = {
709 [ C(RESULT_ACCESS) ] = { CACHE_OP_UNSUPPORTED },
710 [ C(RESULT_MISS) ] = { CACHE_OP_UNSUPPORTED },
711 },
712},
713};
714
715static u32 sparc_vt_read_pmc(int idx)
716{
717 u64 val = pcr_ops->read_pic(idx);
718
719 return val & 0xffffffff;
720}
721
722static void sparc_vt_write_pmc(int idx, u64 val)
723{
724 u64 pcr;
725
726 /* There seems to be an internal latch on the overflow event
727 * on SPARC-T4 that prevents it from triggering unless you
728 * update the PIC exactly as we do here. The requirement
729 * seems to be that you have to turn off event counting in the
730 * PCR around the PIC update.
731 *
732 * For example, after the following sequence:
733 *
734 * 1) set PIC to -1
735 * 2) enable event counting and overflow reporting in PCR
736 * 3) overflow triggers, softint 15 handler invoked
737 * 4) clear OV bit in PCR
738 * 5) write PIC to -1
739 *
740 * a subsequent overflow event will not trigger. This
741 * sequence works on SPARC-T3 and previous chips.
742 */
743 pcr = pcr_ops->read_pcr(idx);
744 pcr_ops->write_pcr(idx, PCR_N4_PICNPT);
745
746 pcr_ops->write_pic(idx, val & 0xffffffff);
747
748 pcr_ops->write_pcr(idx, pcr);
749}
750
751static const struct sparc_pmu niagara4_pmu = {
752 .event_map = niagara4_event_map,
753 .cache_map = &niagara4_cache_map,
754 .max_events = ARRAY_SIZE(niagara4_perfmon_event_map),
755 .read_pmc = sparc_vt_read_pmc,
756 .write_pmc = sparc_vt_write_pmc,
757 .upper_shift = 5,
758 .lower_shift = 5,
759 .event_mask = 0x7ff,
760 .user_bit = PCR_N4_UTRACE,
761 .priv_bit = PCR_N4_STRACE,
762
763 /* We explicitly don't support hypervisor tracing. The T4
764 * generates the overflow event for precise events via a trap
765 * which will not be generated (ie. it's completely lost) if
766 * we happen to be in the hypervisor when the event triggers.
767 * Essentially, the overflow event reporting is completely
768 * unusable when you have hypervisor mode tracing enabled.
769 */
770 .hv_bit = 0,
771
772 .irq_bit = PCR_N4_TOE,
773 .upper_nop = 0,
774 .lower_nop = 0,
775 .flags = 0,
776 .max_hw_events = 4,
777 .num_pcrs = 4,
778 .num_pic_regs = 4,
779};
780
David S. Miller59abbd12009-09-10 06:28:20 -0700781static const struct sparc_pmu *sparc_pmu __read_mostly;
782
Ingo Molnarcdd6c482009-09-21 12:02:48 +0200783static u64 event_encoding(u64 event_id, int idx)
David S. Miller59abbd12009-09-10 06:28:20 -0700784{
785 if (idx == PIC_UPPER_INDEX)
Ingo Molnarcdd6c482009-09-21 12:02:48 +0200786 event_id <<= sparc_pmu->upper_shift;
David S. Miller59abbd12009-09-10 06:28:20 -0700787 else
Ingo Molnarcdd6c482009-09-21 12:02:48 +0200788 event_id <<= sparc_pmu->lower_shift;
789 return event_id;
David S. Miller59abbd12009-09-10 06:28:20 -0700790}
791
792static u64 mask_for_index(int idx)
793{
794 return event_encoding(sparc_pmu->event_mask, idx);
795}
796
797static u64 nop_for_index(int idx)
798{
799 return event_encoding(idx == PIC_UPPER_INDEX ?
David S. Miller660d1372009-09-10 07:13:26 -0700800 sparc_pmu->upper_nop :
801 sparc_pmu->lower_nop, idx);
David S. Miller59abbd12009-09-10 06:28:20 -0700802}
803
David S. Millerd1751382009-09-29 21:27:06 -0700804static inline void sparc_pmu_enable_event(struct cpu_hw_events *cpuc, struct hw_perf_event *hwc, int idx)
David S. Miller59abbd12009-09-10 06:28:20 -0700805{
806 u64 val, mask = mask_for_index(idx);
David S. Millerb4f061a2012-08-17 03:14:01 -0700807 int pcr_index = 0;
David S. Miller59abbd12009-09-10 06:28:20 -0700808
David S. Millerb4f061a2012-08-17 03:14:01 -0700809 if (sparc_pmu->num_pcrs > 1)
810 pcr_index = idx;
811
812 val = cpuc->pcr[pcr_index];
David S. Millerd1751382009-09-29 21:27:06 -0700813 val &= ~mask;
814 val |= hwc->config;
David S. Millerb4f061a2012-08-17 03:14:01 -0700815 cpuc->pcr[pcr_index] = val;
David S. Millerd1751382009-09-29 21:27:06 -0700816
David S. Millerb4f061a2012-08-17 03:14:01 -0700817 pcr_ops->write_pcr(pcr_index, cpuc->pcr[pcr_index]);
David S. Miller59abbd12009-09-10 06:28:20 -0700818}
819
David S. Millerd1751382009-09-29 21:27:06 -0700820static inline void sparc_pmu_disable_event(struct cpu_hw_events *cpuc, struct hw_perf_event *hwc, int idx)
David S. Miller59abbd12009-09-10 06:28:20 -0700821{
822 u64 mask = mask_for_index(idx);
823 u64 nop = nop_for_index(idx);
David S. Millerb4f061a2012-08-17 03:14:01 -0700824 int pcr_index = 0;
David S. Millerd1751382009-09-29 21:27:06 -0700825 u64 val;
David S. Miller59abbd12009-09-10 06:28:20 -0700826
David S. Millerb4f061a2012-08-17 03:14:01 -0700827 if (sparc_pmu->num_pcrs > 1)
828 pcr_index = idx;
829
830 val = cpuc->pcr[pcr_index];
David S. Millerd1751382009-09-29 21:27:06 -0700831 val &= ~mask;
832 val |= nop;
David S. Millerb4f061a2012-08-17 03:14:01 -0700833 cpuc->pcr[pcr_index] = val;
David S. Millerd1751382009-09-29 21:27:06 -0700834
David S. Millerb4f061a2012-08-17 03:14:01 -0700835 pcr_ops->write_pcr(pcr_index, cpuc->pcr[pcr_index]);
David S. Miller59abbd12009-09-10 06:28:20 -0700836}
837
David S. Millere7bef6b2010-01-20 02:59:47 -0800838static u64 sparc_perf_event_update(struct perf_event *event,
839 struct hw_perf_event *hwc, int idx)
840{
841 int shift = 64 - 32;
842 u64 prev_raw_count, new_raw_count;
843 s64 delta;
844
845again:
Peter Zijlstrae7850592010-05-21 14:43:08 +0200846 prev_raw_count = local64_read(&hwc->prev_count);
David S. Miller53443032012-08-17 02:37:06 -0700847 new_raw_count = sparc_pmu->read_pmc(idx);
David S. Millere7bef6b2010-01-20 02:59:47 -0800848
Peter Zijlstrae7850592010-05-21 14:43:08 +0200849 if (local64_cmpxchg(&hwc->prev_count, prev_raw_count,
David S. Millere7bef6b2010-01-20 02:59:47 -0800850 new_raw_count) != prev_raw_count)
851 goto again;
852
853 delta = (new_raw_count << shift) - (prev_raw_count << shift);
854 delta >>= shift;
855
Peter Zijlstrae7850592010-05-21 14:43:08 +0200856 local64_add(delta, &event->count);
857 local64_sub(delta, &hwc->period_left);
David S. Millere7bef6b2010-01-20 02:59:47 -0800858
859 return new_raw_count;
860}
861
Ingo Molnarcdd6c482009-09-21 12:02:48 +0200862static int sparc_perf_event_set_period(struct perf_event *event,
David S. Millerd29862f2009-09-28 17:37:12 -0700863 struct hw_perf_event *hwc, int idx)
David S. Miller59abbd12009-09-10 06:28:20 -0700864{
Peter Zijlstrae7850592010-05-21 14:43:08 +0200865 s64 left = local64_read(&hwc->period_left);
David S. Miller59abbd12009-09-10 06:28:20 -0700866 s64 period = hwc->sample_period;
867 int ret = 0;
868
869 if (unlikely(left <= -period)) {
870 left = period;
Peter Zijlstrae7850592010-05-21 14:43:08 +0200871 local64_set(&hwc->period_left, left);
David S. Miller59abbd12009-09-10 06:28:20 -0700872 hwc->last_period = period;
873 ret = 1;
874 }
875
876 if (unlikely(left <= 0)) {
877 left += period;
Peter Zijlstrae7850592010-05-21 14:43:08 +0200878 local64_set(&hwc->period_left, left);
David S. Miller59abbd12009-09-10 06:28:20 -0700879 hwc->last_period = period;
880 ret = 1;
881 }
882 if (left > MAX_PERIOD)
883 left = MAX_PERIOD;
884
Peter Zijlstrae7850592010-05-21 14:43:08 +0200885 local64_set(&hwc->prev_count, (u64)-left);
David S. Miller59abbd12009-09-10 06:28:20 -0700886
David S. Miller53443032012-08-17 02:37:06 -0700887 sparc_pmu->write_pmc(idx, (u64)(-left) & 0xffffffff);
David S. Miller59abbd12009-09-10 06:28:20 -0700888
Ingo Molnarcdd6c482009-09-21 12:02:48 +0200889 perf_event_update_userpage(event);
David S. Miller59abbd12009-09-10 06:28:20 -0700890
891 return ret;
892}
893
David S. Miller7a37a0b2012-08-17 03:29:05 -0700894static void read_in_all_counters(struct cpu_hw_events *cpuc)
David S. Miller59abbd12009-09-10 06:28:20 -0700895{
David S. Millere7bef6b2010-01-20 02:59:47 -0800896 int i;
David S. Miller59abbd12009-09-10 06:28:20 -0700897
David S. Millere7bef6b2010-01-20 02:59:47 -0800898 for (i = 0; i < cpuc->n_events; i++) {
899 struct perf_event *cp = cpuc->event[i];
David S. Miller59abbd12009-09-10 06:28:20 -0700900
David S. Millere7bef6b2010-01-20 02:59:47 -0800901 if (cpuc->current_idx[i] != PIC_NO_INDEX &&
902 cpuc->current_idx[i] != cp->hw.idx) {
903 sparc_perf_event_update(cp, &cp->hw,
904 cpuc->current_idx[i]);
905 cpuc->current_idx[i] = PIC_NO_INDEX;
906 }
907 }
David S. Miller7a37a0b2012-08-17 03:29:05 -0700908}
909
910/* On this PMU all PICs are programmed using a single PCR. Calculate
911 * the combined control register value.
912 *
913 * For such chips we require that all of the events have the same
914 * configuration, so just fetch the settings from the first entry.
915 */
916static void calculate_single_pcr(struct cpu_hw_events *cpuc)
917{
918 int i;
919
920 if (!cpuc->n_added)
921 goto out;
David S. Miller59abbd12009-09-10 06:28:20 -0700922
David S. Millere7bef6b2010-01-20 02:59:47 -0800923 /* Assign to counters all unassigned events. */
924 for (i = 0; i < cpuc->n_events; i++) {
925 struct perf_event *cp = cpuc->event[i];
926 struct hw_perf_event *hwc = &cp->hw;
927 int idx = hwc->idx;
928 u64 enc;
929
930 if (cpuc->current_idx[i] != PIC_NO_INDEX)
931 continue;
932
933 sparc_perf_event_set_period(cp, hwc, idx);
934 cpuc->current_idx[i] = idx;
935
936 enc = perf_event_get_enc(cpuc->events[i]);
David S. Miller7a37a0b2012-08-17 03:29:05 -0700937 cpuc->pcr[0] &= ~mask_for_index(idx);
Peter Zijlstraa4eaf7f2010-06-16 14:37:10 +0200938 if (hwc->state & PERF_HES_STOPPED)
David S. Miller7a37a0b2012-08-17 03:29:05 -0700939 cpuc->pcr[0] |= nop_for_index(idx);
Peter Zijlstraa4eaf7f2010-06-16 14:37:10 +0200940 else
David S. Miller7a37a0b2012-08-17 03:29:05 -0700941 cpuc->pcr[0] |= event_encoding(enc, idx);
David S. Millere7bef6b2010-01-20 02:59:47 -0800942 }
943out:
David S. Miller7a37a0b2012-08-17 03:29:05 -0700944 cpuc->pcr[0] |= cpuc->event[0]->hw.config_base;
945}
946
947/* On this PMU each PIC has it's own PCR control register. */
948static void calculate_multiple_pcrs(struct cpu_hw_events *cpuc)
949{
950 int i;
951
952 if (!cpuc->n_added)
953 goto out;
954
955 for (i = 0; i < cpuc->n_events; i++) {
956 struct perf_event *cp = cpuc->event[i];
957 struct hw_perf_event *hwc = &cp->hw;
958 int idx = hwc->idx;
959 u64 enc;
960
961 if (cpuc->current_idx[i] != PIC_NO_INDEX)
962 continue;
963
964 sparc_perf_event_set_period(cp, hwc, idx);
965 cpuc->current_idx[i] = idx;
966
967 enc = perf_event_get_enc(cpuc->events[i]);
968 cpuc->pcr[idx] &= ~mask_for_index(idx);
969 if (hwc->state & PERF_HES_STOPPED)
970 cpuc->pcr[idx] |= nop_for_index(idx);
971 else
972 cpuc->pcr[idx] |= event_encoding(enc, idx);
973 }
974out:
975 for (i = 0; i < cpuc->n_events; i++) {
976 struct perf_event *cp = cpuc->event[i];
977 int idx = cp->hw.idx;
978
979 cpuc->pcr[idx] |= cp->hw.config_base;
980 }
981}
982
983/* If performance event entries have been added, move existing events
984 * around (if necessary) and then assign new entries to counters.
985 */
986static void update_pcrs_for_enable(struct cpu_hw_events *cpuc)
987{
988 if (cpuc->n_added)
989 read_in_all_counters(cpuc);
990
991 if (sparc_pmu->num_pcrs == 1) {
992 calculate_single_pcr(cpuc);
993 } else {
994 calculate_multiple_pcrs(cpuc);
995 }
David S. Miller59abbd12009-09-10 06:28:20 -0700996}
997
Peter Zijlstraa4eaf7f2010-06-16 14:37:10 +0200998static void sparc_pmu_enable(struct pmu *pmu)
David S. Miller59abbd12009-09-10 06:28:20 -0700999{
David S. Millere7bef6b2010-01-20 02:59:47 -08001000 struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events);
David S. Miller7a37a0b2012-08-17 03:29:05 -07001001 int i;
David S. Miller59abbd12009-09-10 06:28:20 -07001002
David S. Millere7bef6b2010-01-20 02:59:47 -08001003 if (cpuc->enabled)
1004 return;
David S. Miller59abbd12009-09-10 06:28:20 -07001005
David S. Millere7bef6b2010-01-20 02:59:47 -08001006 cpuc->enabled = 1;
1007 barrier();
David S. Miller59abbd12009-09-10 06:28:20 -07001008
David S. Miller7a37a0b2012-08-17 03:29:05 -07001009 if (cpuc->n_events)
1010 update_pcrs_for_enable(cpuc);
David S. Miller59abbd12009-09-10 06:28:20 -07001011
David S. Miller7a37a0b2012-08-17 03:29:05 -07001012 for (i = 0; i < sparc_pmu->num_pcrs; i++)
1013 pcr_ops->write_pcr(i, cpuc->pcr[i]);
David S. Millere7bef6b2010-01-20 02:59:47 -08001014}
1015
Peter Zijlstraa4eaf7f2010-06-16 14:37:10 +02001016static void sparc_pmu_disable(struct pmu *pmu)
David S. Millere7bef6b2010-01-20 02:59:47 -08001017{
1018 struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events);
David S. Miller3f1a2092012-08-17 02:51:21 -07001019 int i;
David S. Millere7bef6b2010-01-20 02:59:47 -08001020
1021 if (!cpuc->enabled)
1022 return;
1023
1024 cpuc->enabled = 0;
1025 cpuc->n_added = 0;
1026
David S. Miller3f1a2092012-08-17 02:51:21 -07001027 for (i = 0; i < sparc_pmu->num_pcrs; i++) {
1028 u64 val = cpuc->pcr[i];
David S. Millere7bef6b2010-01-20 02:59:47 -08001029
David S. Miller3f1a2092012-08-17 02:51:21 -07001030 val &= ~(sparc_pmu->user_bit | sparc_pmu->priv_bit |
1031 sparc_pmu->hv_bit | sparc_pmu->irq_bit);
1032 cpuc->pcr[i] = val;
1033 pcr_ops->write_pcr(i, cpuc->pcr[i]);
1034 }
David S. Miller59abbd12009-09-10 06:28:20 -07001035}
1036
David S. Millere7bef6b2010-01-20 02:59:47 -08001037static int active_event_index(struct cpu_hw_events *cpuc,
1038 struct perf_event *event)
1039{
1040 int i;
1041
1042 for (i = 0; i < cpuc->n_events; i++) {
1043 if (cpuc->event[i] == event)
1044 break;
1045 }
1046 BUG_ON(i == cpuc->n_events);
1047 return cpuc->current_idx[i];
David S. Miller59abbd12009-09-10 06:28:20 -07001048}
1049
Peter Zijlstraa4eaf7f2010-06-16 14:37:10 +02001050static void sparc_pmu_start(struct perf_event *event, int flags)
1051{
1052 struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events);
1053 int idx = active_event_index(cpuc, event);
1054
1055 if (flags & PERF_EF_RELOAD) {
1056 WARN_ON_ONCE(!(event->hw.state & PERF_HES_UPTODATE));
1057 sparc_perf_event_set_period(event, &event->hw, idx);
1058 }
1059
1060 event->hw.state = 0;
1061
1062 sparc_pmu_enable_event(cpuc, &event->hw, idx);
1063}
1064
1065static void sparc_pmu_stop(struct perf_event *event, int flags)
1066{
1067 struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events);
1068 int idx = active_event_index(cpuc, event);
1069
1070 if (!(event->hw.state & PERF_HES_STOPPED)) {
1071 sparc_pmu_disable_event(cpuc, &event->hw, idx);
1072 event->hw.state |= PERF_HES_STOPPED;
1073 }
1074
1075 if (!(event->hw.state & PERF_HES_UPTODATE) && (flags & PERF_EF_UPDATE)) {
1076 sparc_perf_event_update(event, &event->hw, idx);
1077 event->hw.state |= PERF_HES_UPTODATE;
1078 }
1079}
1080
1081static void sparc_pmu_del(struct perf_event *event, int _flags)
1082{
1083 struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events);
1084 unsigned long flags;
1085 int i;
1086
1087 local_irq_save(flags);
1088 perf_pmu_disable(event->pmu);
1089
1090 for (i = 0; i < cpuc->n_events; i++) {
1091 if (event == cpuc->event[i]) {
1092 /* Absorb the final count and turn off the
1093 * event.
1094 */
1095 sparc_pmu_stop(event, PERF_EF_UPDATE);
1096
1097 /* Shift remaining entries down into
1098 * the existing slot.
1099 */
1100 while (++i < cpuc->n_events) {
1101 cpuc->event[i - 1] = cpuc->event[i];
1102 cpuc->events[i - 1] = cpuc->events[i];
1103 cpuc->current_idx[i - 1] =
1104 cpuc->current_idx[i];
1105 }
1106
1107 perf_event_update_userpage(event);
1108
1109 cpuc->n_events--;
1110 break;
1111 }
1112 }
1113
1114 perf_pmu_enable(event->pmu);
1115 local_irq_restore(flags);
1116}
1117
Ingo Molnarcdd6c482009-09-21 12:02:48 +02001118static void sparc_pmu_read(struct perf_event *event)
David S. Miller59abbd12009-09-10 06:28:20 -07001119{
David S. Millere7bef6b2010-01-20 02:59:47 -08001120 struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events);
1121 int idx = active_event_index(cpuc, event);
Ingo Molnarcdd6c482009-09-21 12:02:48 +02001122 struct hw_perf_event *hwc = &event->hw;
David S. Millerd1751382009-09-29 21:27:06 -07001123
David S. Millere7bef6b2010-01-20 02:59:47 -08001124 sparc_perf_event_update(event, hwc, idx);
David S. Miller59abbd12009-09-10 06:28:20 -07001125}
1126
Ingo Molnarcdd6c482009-09-21 12:02:48 +02001127static atomic_t active_events = ATOMIC_INIT(0);
David S. Miller59abbd12009-09-10 06:28:20 -07001128static DEFINE_MUTEX(pmc_grab_mutex);
1129
David S. Millerd1751382009-09-29 21:27:06 -07001130static void perf_stop_nmi_watchdog(void *unused)
1131{
1132 struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events);
David S. Miller3f1a2092012-08-17 02:51:21 -07001133 int i;
David S. Millerd1751382009-09-29 21:27:06 -07001134
1135 stop_nmi_watchdog(NULL);
David S. Miller3f1a2092012-08-17 02:51:21 -07001136 for (i = 0; i < sparc_pmu->num_pcrs; i++)
1137 cpuc->pcr[i] = pcr_ops->read_pcr(i);
David S. Millerd1751382009-09-29 21:27:06 -07001138}
1139
Ingo Molnarcdd6c482009-09-21 12:02:48 +02001140void perf_event_grab_pmc(void)
David S. Miller59abbd12009-09-10 06:28:20 -07001141{
Ingo Molnarcdd6c482009-09-21 12:02:48 +02001142 if (atomic_inc_not_zero(&active_events))
David S. Miller59abbd12009-09-10 06:28:20 -07001143 return;
1144
1145 mutex_lock(&pmc_grab_mutex);
Ingo Molnarcdd6c482009-09-21 12:02:48 +02001146 if (atomic_read(&active_events) == 0) {
David S. Miller59abbd12009-09-10 06:28:20 -07001147 if (atomic_read(&nmi_active) > 0) {
David S. Millerd1751382009-09-29 21:27:06 -07001148 on_each_cpu(perf_stop_nmi_watchdog, NULL, 1);
David S. Miller59abbd12009-09-10 06:28:20 -07001149 BUG_ON(atomic_read(&nmi_active) != 0);
1150 }
Ingo Molnarcdd6c482009-09-21 12:02:48 +02001151 atomic_inc(&active_events);
David S. Miller59abbd12009-09-10 06:28:20 -07001152 }
1153 mutex_unlock(&pmc_grab_mutex);
1154}
1155
Ingo Molnarcdd6c482009-09-21 12:02:48 +02001156void perf_event_release_pmc(void)
David S. Miller59abbd12009-09-10 06:28:20 -07001157{
Ingo Molnarcdd6c482009-09-21 12:02:48 +02001158 if (atomic_dec_and_mutex_lock(&active_events, &pmc_grab_mutex)) {
David S. Miller59abbd12009-09-10 06:28:20 -07001159 if (atomic_read(&nmi_active) == 0)
1160 on_each_cpu(start_nmi_watchdog, NULL, 1);
1161 mutex_unlock(&pmc_grab_mutex);
1162 }
1163}
1164
David S. Miller2ce4da22009-09-26 20:42:10 -07001165static const struct perf_event_map *sparc_map_cache_event(u64 config)
1166{
1167 unsigned int cache_type, cache_op, cache_result;
1168 const struct perf_event_map *pmap;
1169
1170 if (!sparc_pmu->cache_map)
1171 return ERR_PTR(-ENOENT);
1172
1173 cache_type = (config >> 0) & 0xff;
1174 if (cache_type >= PERF_COUNT_HW_CACHE_MAX)
1175 return ERR_PTR(-EINVAL);
1176
1177 cache_op = (config >> 8) & 0xff;
1178 if (cache_op >= PERF_COUNT_HW_CACHE_OP_MAX)
1179 return ERR_PTR(-EINVAL);
1180
1181 cache_result = (config >> 16) & 0xff;
1182 if (cache_result >= PERF_COUNT_HW_CACHE_RESULT_MAX)
1183 return ERR_PTR(-EINVAL);
1184
1185 pmap = &((*sparc_pmu->cache_map)[cache_type][cache_op][cache_result]);
1186
1187 if (pmap->encoding == CACHE_OP_UNSUPPORTED)
1188 return ERR_PTR(-ENOENT);
1189
1190 if (pmap->encoding == CACHE_OP_NONSENSE)
1191 return ERR_PTR(-EINVAL);
1192
1193 return pmap;
1194}
1195
Ingo Molnarcdd6c482009-09-21 12:02:48 +02001196static void hw_perf_event_destroy(struct perf_event *event)
David S. Miller59abbd12009-09-10 06:28:20 -07001197{
Ingo Molnarcdd6c482009-09-21 12:02:48 +02001198 perf_event_release_pmc();
David S. Miller59abbd12009-09-10 06:28:20 -07001199}
1200
David S. Millera72a8a52009-09-28 17:35:20 -07001201/* Make sure all events can be scheduled into the hardware at
1202 * the same time. This is simplified by the fact that we only
1203 * need to support 2 simultaneous HW events.
David S. Millere7bef6b2010-01-20 02:59:47 -08001204 *
1205 * As a side effect, the evts[]->hw.idx values will be assigned
1206 * on success. These are pending indexes. When the events are
1207 * actually programmed into the chip, these values will propagate
1208 * to the per-cpu cpuc->current_idx[] slots, see the code in
1209 * maybe_change_configuration() for details.
David S. Millera72a8a52009-09-28 17:35:20 -07001210 */
David S. Millere7bef6b2010-01-20 02:59:47 -08001211static int sparc_check_constraints(struct perf_event **evts,
1212 unsigned long *events, int n_ev)
David S. Millera72a8a52009-09-28 17:35:20 -07001213{
David S. Millere7bef6b2010-01-20 02:59:47 -08001214 u8 msk0 = 0, msk1 = 0;
1215 int idx0 = 0;
David S. Millera72a8a52009-09-28 17:35:20 -07001216
David S. Millere7bef6b2010-01-20 02:59:47 -08001217 /* This case is possible when we are invoked from
1218 * hw_perf_group_sched_in().
1219 */
1220 if (!n_ev)
1221 return 0;
David S. Millera72a8a52009-09-28 17:35:20 -07001222
David S. Miller59660492012-08-17 02:33:44 -07001223 if (n_ev > sparc_pmu->max_hw_events)
David S. Millere7bef6b2010-01-20 02:59:47 -08001224 return -1;
David S. Millera72a8a52009-09-28 17:35:20 -07001225
David S. Millerb38e99f2012-08-17 02:31:10 -07001226 if (!(sparc_pmu->flags & SPARC_PMU_HAS_CONFLICTS)) {
1227 int i;
1228
1229 for (i = 0; i < n_ev; i++)
1230 evts[i]->hw.idx = i;
1231 return 0;
1232 }
1233
David S. Millere7bef6b2010-01-20 02:59:47 -08001234 msk0 = perf_event_get_msk(events[0]);
1235 if (n_ev == 1) {
1236 if (msk0 & PIC_LOWER)
1237 idx0 = 1;
1238 goto success;
1239 }
1240 BUG_ON(n_ev != 2);
1241 msk1 = perf_event_get_msk(events[1]);
David S. Millera72a8a52009-09-28 17:35:20 -07001242
David S. Millere7bef6b2010-01-20 02:59:47 -08001243 /* If both events can go on any counter, OK. */
1244 if (msk0 == (PIC_UPPER | PIC_LOWER) &&
1245 msk1 == (PIC_UPPER | PIC_LOWER))
1246 goto success;
David S. Millera72a8a52009-09-28 17:35:20 -07001247
David S. Millere7bef6b2010-01-20 02:59:47 -08001248 /* If one event is limited to a specific counter,
1249 * and the other can go on both, OK.
1250 */
1251 if ((msk0 == PIC_UPPER || msk0 == PIC_LOWER) &&
1252 msk1 == (PIC_UPPER | PIC_LOWER)) {
1253 if (msk0 & PIC_LOWER)
1254 idx0 = 1;
1255 goto success;
David S. Millera72a8a52009-09-28 17:35:20 -07001256 }
1257
David S. Millere7bef6b2010-01-20 02:59:47 -08001258 if ((msk1 == PIC_UPPER || msk1 == PIC_LOWER) &&
1259 msk0 == (PIC_UPPER | PIC_LOWER)) {
1260 if (msk1 & PIC_UPPER)
1261 idx0 = 1;
1262 goto success;
1263 }
1264
1265 /* If the events are fixed to different counters, OK. */
1266 if ((msk0 == PIC_UPPER && msk1 == PIC_LOWER) ||
1267 (msk0 == PIC_LOWER && msk1 == PIC_UPPER)) {
1268 if (msk0 & PIC_LOWER)
1269 idx0 = 1;
1270 goto success;
1271 }
1272
1273 /* Otherwise, there is a conflict. */
David S. Millera72a8a52009-09-28 17:35:20 -07001274 return -1;
David S. Millere7bef6b2010-01-20 02:59:47 -08001275
1276success:
1277 evts[0]->hw.idx = idx0;
1278 if (n_ev == 2)
1279 evts[1]->hw.idx = idx0 ^ 1;
1280 return 0;
David S. Millera72a8a52009-09-28 17:35:20 -07001281}
1282
David S. Miller01552f72009-09-27 20:43:07 -07001283static int check_excludes(struct perf_event **evts, int n_prev, int n_new)
1284{
1285 int eu = 0, ek = 0, eh = 0;
1286 struct perf_event *event;
1287 int i, n, first;
1288
David S. Millerb38e99f2012-08-17 02:31:10 -07001289 if (!(sparc_pmu->flags & SPARC_PMU_ALL_EXCLUDES_SAME))
1290 return 0;
1291
David S. Miller01552f72009-09-27 20:43:07 -07001292 n = n_prev + n_new;
1293 if (n <= 1)
1294 return 0;
1295
1296 first = 1;
1297 for (i = 0; i < n; i++) {
1298 event = evts[i];
1299 if (first) {
1300 eu = event->attr.exclude_user;
1301 ek = event->attr.exclude_kernel;
1302 eh = event->attr.exclude_hv;
1303 first = 0;
1304 } else if (event->attr.exclude_user != eu ||
1305 event->attr.exclude_kernel != ek ||
1306 event->attr.exclude_hv != eh) {
1307 return -EAGAIN;
1308 }
1309 }
1310
1311 return 0;
1312}
1313
1314static int collect_events(struct perf_event *group, int max_count,
David S. Millere7bef6b2010-01-20 02:59:47 -08001315 struct perf_event *evts[], unsigned long *events,
1316 int *current_idx)
David S. Miller01552f72009-09-27 20:43:07 -07001317{
1318 struct perf_event *event;
1319 int n = 0;
1320
1321 if (!is_software_event(group)) {
1322 if (n >= max_count)
1323 return -1;
1324 evts[n] = group;
David S. Millere7bef6b2010-01-20 02:59:47 -08001325 events[n] = group->hw.event_base;
1326 current_idx[n++] = PIC_NO_INDEX;
David S. Miller01552f72009-09-27 20:43:07 -07001327 }
1328 list_for_each_entry(event, &group->sibling_list, group_entry) {
1329 if (!is_software_event(event) &&
1330 event->state != PERF_EVENT_STATE_OFF) {
1331 if (n >= max_count)
1332 return -1;
1333 evts[n] = event;
David S. Millere7bef6b2010-01-20 02:59:47 -08001334 events[n] = event->hw.event_base;
1335 current_idx[n++] = PIC_NO_INDEX;
David S. Miller01552f72009-09-27 20:43:07 -07001336 }
1337 }
1338 return n;
1339}
1340
Peter Zijlstraa4eaf7f2010-06-16 14:37:10 +02001341static int sparc_pmu_add(struct perf_event *event, int ef_flags)
David S. Millere7bef6b2010-01-20 02:59:47 -08001342{
1343 struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events);
1344 int n0, ret = -EAGAIN;
1345 unsigned long flags;
1346
1347 local_irq_save(flags);
Peter Zijlstra33696fc2010-06-14 08:49:00 +02001348 perf_pmu_disable(event->pmu);
David S. Millere7bef6b2010-01-20 02:59:47 -08001349
1350 n0 = cpuc->n_events;
David S. Miller59660492012-08-17 02:33:44 -07001351 if (n0 >= sparc_pmu->max_hw_events)
David S. Millere7bef6b2010-01-20 02:59:47 -08001352 goto out;
1353
1354 cpuc->event[n0] = event;
1355 cpuc->events[n0] = event->hw.event_base;
1356 cpuc->current_idx[n0] = PIC_NO_INDEX;
1357
Peter Zijlstraa4eaf7f2010-06-16 14:37:10 +02001358 event->hw.state = PERF_HES_UPTODATE;
1359 if (!(ef_flags & PERF_EF_START))
1360 event->hw.state |= PERF_HES_STOPPED;
1361
Lin Minga13c3af2010-04-23 13:56:33 +08001362 /*
1363 * If group events scheduling transaction was started,
Lucas De Marchi25985ed2011-03-30 22:57:33 -03001364 * skip the schedulability test here, it will be performed
Lin Minga13c3af2010-04-23 13:56:33 +08001365 * at commit time(->commit_txn) as a whole
1366 */
Peter Zijlstra8d2cacb2010-05-25 17:49:05 +02001367 if (cpuc->group_flag & PERF_EVENT_TXN)
Lin Minga13c3af2010-04-23 13:56:33 +08001368 goto nocheck;
1369
David S. Millere7bef6b2010-01-20 02:59:47 -08001370 if (check_excludes(cpuc->event, n0, 1))
1371 goto out;
1372 if (sparc_check_constraints(cpuc->event, cpuc->events, n0 + 1))
1373 goto out;
1374
Lin Minga13c3af2010-04-23 13:56:33 +08001375nocheck:
David S. Millere7bef6b2010-01-20 02:59:47 -08001376 cpuc->n_events++;
1377 cpuc->n_added++;
1378
1379 ret = 0;
1380out:
Peter Zijlstra33696fc2010-06-14 08:49:00 +02001381 perf_pmu_enable(event->pmu);
David S. Millere7bef6b2010-01-20 02:59:47 -08001382 local_irq_restore(flags);
1383 return ret;
1384}
1385
Peter Zijlstrab0a873e2010-06-11 13:35:08 +02001386static int sparc_pmu_event_init(struct perf_event *event)
David S. Miller59abbd12009-09-10 06:28:20 -07001387{
Ingo Molnarcdd6c482009-09-21 12:02:48 +02001388 struct perf_event_attr *attr = &event->attr;
David S. Miller01552f72009-09-27 20:43:07 -07001389 struct perf_event *evts[MAX_HWEVENTS];
Ingo Molnarcdd6c482009-09-21 12:02:48 +02001390 struct hw_perf_event *hwc = &event->hw;
David S. Millera72a8a52009-09-28 17:35:20 -07001391 unsigned long events[MAX_HWEVENTS];
David S. Millere7bef6b2010-01-20 02:59:47 -08001392 int current_idx_dmy[MAX_HWEVENTS];
David S. Miller59abbd12009-09-10 06:28:20 -07001393 const struct perf_event_map *pmap;
David S. Miller01552f72009-09-27 20:43:07 -07001394 int n;
David S. Miller59abbd12009-09-10 06:28:20 -07001395
1396 if (atomic_read(&nmi_active) < 0)
1397 return -ENODEV;
1398
Stephane Eranian2481c5f2012-02-09 23:20:59 +01001399 /* does not support taken branch sampling */
1400 if (has_branch_stack(event))
1401 return -EOPNOTSUPP;
1402
Peter Zijlstrab0a873e2010-06-11 13:35:08 +02001403 switch (attr->type) {
1404 case PERF_TYPE_HARDWARE:
David S. Miller2ce4da22009-09-26 20:42:10 -07001405 if (attr->config >= sparc_pmu->max_events)
1406 return -EINVAL;
1407 pmap = sparc_pmu->event_map(attr->config);
Peter Zijlstrab0a873e2010-06-11 13:35:08 +02001408 break;
1409
1410 case PERF_TYPE_HW_CACHE:
David S. Miller2ce4da22009-09-26 20:42:10 -07001411 pmap = sparc_map_cache_event(attr->config);
1412 if (IS_ERR(pmap))
1413 return PTR_ERR(pmap);
Peter Zijlstrab0a873e2010-06-11 13:35:08 +02001414 break;
1415
1416 case PERF_TYPE_RAW:
Ingo Molnard0303d72010-09-23 08:02:09 +02001417 pmap = NULL;
1418 break;
David S. Miller59abbd12009-09-10 06:28:20 -07001419
Peter Zijlstrab0a873e2010-06-11 13:35:08 +02001420 default:
1421 return -ENOENT;
1422
1423 }
1424
David S. Millerb343ae52010-09-12 17:20:24 -07001425 if (pmap) {
1426 hwc->event_base = perf_event_encode(pmap);
1427 } else {
Ingo Molnard0303d72010-09-23 08:02:09 +02001428 /*
1429 * User gives us "(encoding << 16) | pic_mask" for
David S. Millerb343ae52010-09-12 17:20:24 -07001430 * PERF_TYPE_RAW events.
1431 */
1432 hwc->event_base = attr->config;
1433 }
1434
David S. Millere7bef6b2010-01-20 02:59:47 -08001435 /* We save the enable bits in the config_base. */
David S. Miller496c07e2009-09-10 07:10:59 -07001436 hwc->config_base = sparc_pmu->irq_bit;
David S. Miller59abbd12009-09-10 06:28:20 -07001437 if (!attr->exclude_user)
David S. Miller7ac2ed22012-08-17 02:41:32 -07001438 hwc->config_base |= sparc_pmu->user_bit;
David S. Miller59abbd12009-09-10 06:28:20 -07001439 if (!attr->exclude_kernel)
David S. Miller7ac2ed22012-08-17 02:41:32 -07001440 hwc->config_base |= sparc_pmu->priv_bit;
David S. Miller91b92862009-09-10 07:09:06 -07001441 if (!attr->exclude_hv)
1442 hwc->config_base |= sparc_pmu->hv_bit;
David S. Miller59abbd12009-09-10 06:28:20 -07001443
David S. Miller01552f72009-09-27 20:43:07 -07001444 n = 0;
1445 if (event->group_leader != event) {
1446 n = collect_events(event->group_leader,
David S. Miller59660492012-08-17 02:33:44 -07001447 sparc_pmu->max_hw_events - 1,
David S. Millere7bef6b2010-01-20 02:59:47 -08001448 evts, events, current_idx_dmy);
David S. Miller01552f72009-09-27 20:43:07 -07001449 if (n < 0)
1450 return -EINVAL;
1451 }
David S. Millera72a8a52009-09-28 17:35:20 -07001452 events[n] = hwc->event_base;
David S. Miller01552f72009-09-27 20:43:07 -07001453 evts[n] = event;
1454
1455 if (check_excludes(evts, n, 1))
1456 return -EINVAL;
1457
David S. Millere7bef6b2010-01-20 02:59:47 -08001458 if (sparc_check_constraints(evts, events, n + 1))
David S. Millera72a8a52009-09-28 17:35:20 -07001459 return -EINVAL;
1460
David S. Millere7bef6b2010-01-20 02:59:47 -08001461 hwc->idx = PIC_NO_INDEX;
1462
David S. Miller01552f72009-09-27 20:43:07 -07001463 /* Try to do all error checking before this point, as unwinding
1464 * state after grabbing the PMC is difficult.
1465 */
1466 perf_event_grab_pmc();
1467 event->destroy = hw_perf_event_destroy;
1468
David S. Miller59abbd12009-09-10 06:28:20 -07001469 if (!hwc->sample_period) {
1470 hwc->sample_period = MAX_PERIOD;
1471 hwc->last_period = hwc->sample_period;
Peter Zijlstrae7850592010-05-21 14:43:08 +02001472 local64_set(&hwc->period_left, hwc->sample_period);
David S. Miller59abbd12009-09-10 06:28:20 -07001473 }
1474
David S. Miller59abbd12009-09-10 06:28:20 -07001475 return 0;
1476}
1477
Lin Minga13c3af2010-04-23 13:56:33 +08001478/*
1479 * Start group events scheduling transaction
1480 * Set the flag to make pmu::enable() not perform the
1481 * schedulability test, it will be performed at commit time
1482 */
Peter Zijlstra51b0fe32010-06-11 13:35:57 +02001483static void sparc_pmu_start_txn(struct pmu *pmu)
Lin Minga13c3af2010-04-23 13:56:33 +08001484{
1485 struct cpu_hw_events *cpuhw = &__get_cpu_var(cpu_hw_events);
1486
Peter Zijlstra33696fc2010-06-14 08:49:00 +02001487 perf_pmu_disable(pmu);
Peter Zijlstra8d2cacb2010-05-25 17:49:05 +02001488 cpuhw->group_flag |= PERF_EVENT_TXN;
Lin Minga13c3af2010-04-23 13:56:33 +08001489}
1490
1491/*
1492 * Stop group events scheduling transaction
1493 * Clear the flag and pmu::enable() will perform the
1494 * schedulability test.
1495 */
Peter Zijlstra51b0fe32010-06-11 13:35:57 +02001496static void sparc_pmu_cancel_txn(struct pmu *pmu)
Lin Minga13c3af2010-04-23 13:56:33 +08001497{
1498 struct cpu_hw_events *cpuhw = &__get_cpu_var(cpu_hw_events);
1499
Peter Zijlstra8d2cacb2010-05-25 17:49:05 +02001500 cpuhw->group_flag &= ~PERF_EVENT_TXN;
Peter Zijlstra33696fc2010-06-14 08:49:00 +02001501 perf_pmu_enable(pmu);
Lin Minga13c3af2010-04-23 13:56:33 +08001502}
1503
1504/*
1505 * Commit group events scheduling transaction
1506 * Perform the group schedulability test as a whole
1507 * Return 0 if success
1508 */
Peter Zijlstra51b0fe32010-06-11 13:35:57 +02001509static int sparc_pmu_commit_txn(struct pmu *pmu)
Lin Minga13c3af2010-04-23 13:56:33 +08001510{
1511 struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events);
1512 int n;
1513
1514 if (!sparc_pmu)
1515 return -EINVAL;
1516
1517 cpuc = &__get_cpu_var(cpu_hw_events);
1518 n = cpuc->n_events;
1519 if (check_excludes(cpuc->event, 0, n))
1520 return -EINVAL;
1521 if (sparc_check_constraints(cpuc->event, cpuc->events, n))
1522 return -EAGAIN;
1523
Peter Zijlstra8d2cacb2010-05-25 17:49:05 +02001524 cpuc->group_flag &= ~PERF_EVENT_TXN;
Peter Zijlstra33696fc2010-06-14 08:49:00 +02001525 perf_pmu_enable(pmu);
Lin Minga13c3af2010-04-23 13:56:33 +08001526 return 0;
1527}
1528
Peter Zijlstra51b0fe32010-06-11 13:35:57 +02001529static struct pmu pmu = {
Peter Zijlstraa4eaf7f2010-06-16 14:37:10 +02001530 .pmu_enable = sparc_pmu_enable,
1531 .pmu_disable = sparc_pmu_disable,
Peter Zijlstrab0a873e2010-06-11 13:35:08 +02001532 .event_init = sparc_pmu_event_init,
Peter Zijlstraa4eaf7f2010-06-16 14:37:10 +02001533 .add = sparc_pmu_add,
1534 .del = sparc_pmu_del,
1535 .start = sparc_pmu_start,
1536 .stop = sparc_pmu_stop,
David S. Miller59abbd12009-09-10 06:28:20 -07001537 .read = sparc_pmu_read,
Lin Minga13c3af2010-04-23 13:56:33 +08001538 .start_txn = sparc_pmu_start_txn,
1539 .cancel_txn = sparc_pmu_cancel_txn,
1540 .commit_txn = sparc_pmu_commit_txn,
David S. Miller59abbd12009-09-10 06:28:20 -07001541};
1542
Ingo Molnarcdd6c482009-09-21 12:02:48 +02001543void perf_event_print_debug(void)
David S. Miller59abbd12009-09-10 06:28:20 -07001544{
1545 unsigned long flags;
David S. Miller3f1a2092012-08-17 02:51:21 -07001546 int cpu, i;
David S. Miller59abbd12009-09-10 06:28:20 -07001547
1548 if (!sparc_pmu)
1549 return;
1550
1551 local_irq_save(flags);
1552
1553 cpu = smp_processor_id();
1554
David S. Miller59abbd12009-09-10 06:28:20 -07001555 pr_info("\n");
David S. Miller3f1a2092012-08-17 02:51:21 -07001556 for (i = 0; i < sparc_pmu->num_pcrs; i++)
1557 pr_info("CPU#%d: PCR%d[%016llx]\n",
1558 cpu, i, pcr_ops->read_pcr(i));
1559 for (i = 0; i < sparc_pmu->num_pic_regs; i++)
1560 pr_info("CPU#%d: PIC%d[%016llx]\n",
1561 cpu, i, pcr_ops->read_pic(i));
David S. Miller59abbd12009-09-10 06:28:20 -07001562
1563 local_irq_restore(flags);
1564}
1565
Ingo Molnarcdd6c482009-09-21 12:02:48 +02001566static int __kprobes perf_event_nmi_handler(struct notifier_block *self,
David S. Millerd29862f2009-09-28 17:37:12 -07001567 unsigned long cmd, void *__args)
David S. Miller59abbd12009-09-10 06:28:20 -07001568{
1569 struct die_args *args = __args;
1570 struct perf_sample_data data;
Ingo Molnarcdd6c482009-09-21 12:02:48 +02001571 struct cpu_hw_events *cpuc;
David S. Miller59abbd12009-09-10 06:28:20 -07001572 struct pt_regs *regs;
David S. Millere7bef6b2010-01-20 02:59:47 -08001573 int i;
David S. Miller59abbd12009-09-10 06:28:20 -07001574
Ingo Molnarcdd6c482009-09-21 12:02:48 +02001575 if (!atomic_read(&active_events))
David S. Miller59abbd12009-09-10 06:28:20 -07001576 return NOTIFY_DONE;
1577
1578 switch (cmd) {
1579 case DIE_NMI:
1580 break;
1581
1582 default:
1583 return NOTIFY_DONE;
1584 }
1585
1586 regs = args->regs;
1587
Ingo Molnarcdd6c482009-09-21 12:02:48 +02001588 cpuc = &__get_cpu_var(cpu_hw_events);
David S. Millere04ed382010-01-04 23:16:03 -08001589
1590 /* If the PMU has the TOE IRQ enable bits, we need to do a
1591 * dummy write to the %pcr to clear the overflow bits and thus
1592 * the interrupt.
1593 *
1594 * Do this before we peek at the counters to determine
1595 * overflow so we don't lose any events.
1596 */
David S. Miller3f1a2092012-08-17 02:51:21 -07001597 if (sparc_pmu->irq_bit &&
1598 sparc_pmu->num_pcrs == 1)
1599 pcr_ops->write_pcr(0, cpuc->pcr[0]);
David S. Millere04ed382010-01-04 23:16:03 -08001600
David S. Millere7bef6b2010-01-20 02:59:47 -08001601 for (i = 0; i < cpuc->n_events; i++) {
1602 struct perf_event *event = cpuc->event[i];
1603 int idx = cpuc->current_idx[i];
Ingo Molnarcdd6c482009-09-21 12:02:48 +02001604 struct hw_perf_event *hwc;
David S. Miller59abbd12009-09-10 06:28:20 -07001605 u64 val;
1606
David S. Miller3f1a2092012-08-17 02:51:21 -07001607 if (sparc_pmu->irq_bit &&
1608 sparc_pmu->num_pcrs > 1)
1609 pcr_ops->write_pcr(idx, cpuc->pcr[idx]);
1610
Ingo Molnarcdd6c482009-09-21 12:02:48 +02001611 hwc = &event->hw;
1612 val = sparc_perf_event_update(event, hwc, idx);
David S. Miller59abbd12009-09-10 06:28:20 -07001613 if (val & (1ULL << 31))
1614 continue;
1615
Robert Richterfd0d0002012-04-02 20:19:08 +02001616 perf_sample_data_init(&data, 0, hwc->last_period);
Ingo Molnarcdd6c482009-09-21 12:02:48 +02001617 if (!sparc_perf_event_set_period(event, hwc, idx))
David S. Miller59abbd12009-09-10 06:28:20 -07001618 continue;
1619
Peter Zijlstraa8b0ca12011-06-27 14:41:57 +02001620 if (perf_event_overflow(event, &data, regs))
Peter Zijlstraa4eaf7f2010-06-16 14:37:10 +02001621 sparc_pmu_stop(event, 0);
David S. Miller59abbd12009-09-10 06:28:20 -07001622 }
1623
1624 return NOTIFY_STOP;
1625}
1626
Ingo Molnarcdd6c482009-09-21 12:02:48 +02001627static __read_mostly struct notifier_block perf_event_nmi_notifier = {
1628 .notifier_call = perf_event_nmi_handler,
David S. Miller59abbd12009-09-10 06:28:20 -07001629};
1630
1631static bool __init supported_pmu(void)
1632{
David S. Miller28e8f9b2009-09-26 20:54:22 -07001633 if (!strcmp(sparc_pmu_type, "ultra3") ||
1634 !strcmp(sparc_pmu_type, "ultra3+") ||
1635 !strcmp(sparc_pmu_type, "ultra3i") ||
1636 !strcmp(sparc_pmu_type, "ultra4+")) {
1637 sparc_pmu = &ultra3_pmu;
David S. Miller59abbd12009-09-10 06:28:20 -07001638 return true;
1639 }
David S. Miller7eebda62009-09-26 21:23:41 -07001640 if (!strcmp(sparc_pmu_type, "niagara")) {
1641 sparc_pmu = &niagara1_pmu;
1642 return true;
1643 }
David S. Miller4ba991d2011-07-27 21:06:16 -07001644 if (!strcmp(sparc_pmu_type, "niagara2") ||
1645 !strcmp(sparc_pmu_type, "niagara3")) {
David S. Millerb73d8842009-09-10 07:22:18 -07001646 sparc_pmu = &niagara2_pmu;
1647 return true;
1648 }
David S. Miller035ea282012-08-17 23:06:09 -07001649 if (!strcmp(sparc_pmu_type, "niagara4")) {
1650 sparc_pmu = &niagara4_pmu;
1651 return true;
1652 }
David S. Miller59abbd12009-09-10 06:28:20 -07001653 return false;
1654}
1655
Peter Zijlstra004417a2010-11-25 18:38:29 +01001656int __init init_hw_perf_events(void)
David S. Miller59abbd12009-09-10 06:28:20 -07001657{
Ingo Molnarcdd6c482009-09-21 12:02:48 +02001658 pr_info("Performance events: ");
David S. Miller59abbd12009-09-10 06:28:20 -07001659
1660 if (!supported_pmu()) {
1661 pr_cont("No support for PMU type '%s'\n", sparc_pmu_type);
Peter Zijlstra004417a2010-11-25 18:38:29 +01001662 return 0;
David S. Miller59abbd12009-09-10 06:28:20 -07001663 }
1664
1665 pr_cont("Supported PMU type is '%s'\n", sparc_pmu_type);
1666
Peter Zijlstra2e80a822010-11-17 23:17:36 +01001667 perf_pmu_register(&pmu, "cpu", PERF_TYPE_RAW);
Ingo Molnarcdd6c482009-09-21 12:02:48 +02001668 register_die_notifier(&perf_event_nmi_notifier);
Peter Zijlstra004417a2010-11-25 18:38:29 +01001669
1670 return 0;
David S. Miller59abbd12009-09-10 06:28:20 -07001671}
Ingo Molnarefc70d22010-12-10 00:27:23 +01001672early_initcall(init_hw_perf_events);
David S. Miller4f6dbe42010-01-19 00:26:13 -08001673
Frederic Weisbecker56962b42010-06-30 23:03:51 +02001674void perf_callchain_kernel(struct perf_callchain_entry *entry,
1675 struct pt_regs *regs)
David S. Miller4f6dbe42010-01-19 00:26:13 -08001676{
1677 unsigned long ksp, fp;
David S. Miller667f0ce2010-04-21 03:08:11 -07001678#ifdef CONFIG_FUNCTION_GRAPH_TRACER
1679 int graph = 0;
1680#endif
David S. Miller4f6dbe42010-01-19 00:26:13 -08001681
Frederic Weisbecker56962b42010-06-30 23:03:51 +02001682 stack_trace_flush();
1683
Frederic Weisbecker70791ce2010-06-29 19:34:05 +02001684 perf_callchain_store(entry, regs->tpc);
David S. Miller4f6dbe42010-01-19 00:26:13 -08001685
1686 ksp = regs->u_regs[UREG_I6];
1687 fp = ksp + STACK_BIAS;
1688 do {
1689 struct sparc_stackf *sf;
1690 struct pt_regs *regs;
1691 unsigned long pc;
1692
1693 if (!kstack_valid(current_thread_info(), fp))
1694 break;
1695
1696 sf = (struct sparc_stackf *) fp;
1697 regs = (struct pt_regs *) (sf + 1);
1698
1699 if (kstack_is_trap_frame(current_thread_info(), regs)) {
1700 if (user_mode(regs))
1701 break;
1702 pc = regs->tpc;
1703 fp = regs->u_regs[UREG_I6] + STACK_BIAS;
1704 } else {
1705 pc = sf->callers_pc;
1706 fp = (unsigned long)sf->fp + STACK_BIAS;
1707 }
Frederic Weisbecker70791ce2010-06-29 19:34:05 +02001708 perf_callchain_store(entry, pc);
David S. Miller667f0ce2010-04-21 03:08:11 -07001709#ifdef CONFIG_FUNCTION_GRAPH_TRACER
1710 if ((pc + 8UL) == (unsigned long) &return_to_handler) {
1711 int index = current->curr_ret_stack;
1712 if (current->ret_stack && index >= graph) {
1713 pc = current->ret_stack[index - graph].ret;
Frederic Weisbecker70791ce2010-06-29 19:34:05 +02001714 perf_callchain_store(entry, pc);
David S. Miller667f0ce2010-04-21 03:08:11 -07001715 graph++;
1716 }
1717 }
1718#endif
David S. Miller4f6dbe42010-01-19 00:26:13 -08001719 } while (entry->nr < PERF_MAX_STACK_DEPTH);
1720}
1721
Frederic Weisbecker56962b42010-06-30 23:03:51 +02001722static void perf_callchain_user_64(struct perf_callchain_entry *entry,
1723 struct pt_regs *regs)
David S. Miller4f6dbe42010-01-19 00:26:13 -08001724{
1725 unsigned long ufp;
1726
Frederic Weisbecker70791ce2010-06-29 19:34:05 +02001727 perf_callchain_store(entry, regs->tpc);
David S. Miller4f6dbe42010-01-19 00:26:13 -08001728
1729 ufp = regs->u_regs[UREG_I6] + STACK_BIAS;
1730 do {
1731 struct sparc_stackf *usf, sf;
1732 unsigned long pc;
1733
1734 usf = (struct sparc_stackf *) ufp;
1735 if (__copy_from_user_inatomic(&sf, usf, sizeof(sf)))
1736 break;
1737
1738 pc = sf.callers_pc;
1739 ufp = (unsigned long)sf.fp + STACK_BIAS;
Frederic Weisbecker70791ce2010-06-29 19:34:05 +02001740 perf_callchain_store(entry, pc);
David S. Miller4f6dbe42010-01-19 00:26:13 -08001741 } while (entry->nr < PERF_MAX_STACK_DEPTH);
1742}
1743
Frederic Weisbecker56962b42010-06-30 23:03:51 +02001744static void perf_callchain_user_32(struct perf_callchain_entry *entry,
1745 struct pt_regs *regs)
David S. Miller4f6dbe42010-01-19 00:26:13 -08001746{
1747 unsigned long ufp;
1748
Frederic Weisbecker70791ce2010-06-29 19:34:05 +02001749 perf_callchain_store(entry, regs->tpc);
David S. Miller4f6dbe42010-01-19 00:26:13 -08001750
David S. Miller9e8307e2010-03-29 13:08:52 -07001751 ufp = regs->u_regs[UREG_I6] & 0xffffffffUL;
David S. Miller4f6dbe42010-01-19 00:26:13 -08001752 do {
1753 struct sparc_stackf32 *usf, sf;
1754 unsigned long pc;
1755
1756 usf = (struct sparc_stackf32 *) ufp;
1757 if (__copy_from_user_inatomic(&sf, usf, sizeof(sf)))
1758 break;
1759
1760 pc = sf.callers_pc;
1761 ufp = (unsigned long)sf.fp;
Frederic Weisbecker70791ce2010-06-29 19:34:05 +02001762 perf_callchain_store(entry, pc);
David S. Miller4f6dbe42010-01-19 00:26:13 -08001763 } while (entry->nr < PERF_MAX_STACK_DEPTH);
1764}
1765
Frederic Weisbecker56962b42010-06-30 23:03:51 +02001766void
1767perf_callchain_user(struct perf_callchain_entry *entry, struct pt_regs *regs)
David S. Miller4f6dbe42010-01-19 00:26:13 -08001768{
Frederic Weisbecker56962b42010-06-30 23:03:51 +02001769 flushw_user();
1770 if (test_thread_flag(TIF_32BIT))
1771 perf_callchain_user_32(entry, regs);
1772 else
1773 perf_callchain_user_64(entry, regs);
David S. Miller4f6dbe42010-01-19 00:26:13 -08001774}