blob: fbd80299a4bbd67d966c6693c374789e9a1fc17e [file] [log] [blame]
Ingo Molnarcdd6c482009-09-21 12:02:48 +02001/* Performance event support for sparc64.
David S. Miller59abbd12009-09-10 06:28:20 -07002 *
David S. Miller4f6dbe42010-01-19 00:26:13 -08003 * Copyright (C) 2009, 2010 David S. Miller <davem@davemloft.net>
David S. Miller59abbd12009-09-10 06:28:20 -07004 *
Ingo Molnarcdd6c482009-09-21 12:02:48 +02005 * This code is based almost entirely upon the x86 perf event
David S. Miller59abbd12009-09-10 06:28:20 -07006 * code, which is:
7 *
8 * Copyright (C) 2008 Thomas Gleixner <tglx@linutronix.de>
9 * Copyright (C) 2008-2009 Red Hat, Inc., Ingo Molnar
10 * Copyright (C) 2009 Jaswinder Singh Rajput
11 * Copyright (C) 2009 Advanced Micro Devices, Inc., Robert Richter
12 * Copyright (C) 2008-2009 Red Hat, Inc., Peter Zijlstra <pzijlstr@redhat.com>
13 */
14
Ingo Molnarcdd6c482009-09-21 12:02:48 +020015#include <linux/perf_event.h>
David S. Miller59abbd12009-09-10 06:28:20 -070016#include <linux/kprobes.h>
David S. Miller667f0ce2010-04-21 03:08:11 -070017#include <linux/ftrace.h>
David S. Miller59abbd12009-09-10 06:28:20 -070018#include <linux/kernel.h>
19#include <linux/kdebug.h>
20#include <linux/mutex.h>
21
David S. Miller4f6dbe42010-01-19 00:26:13 -080022#include <asm/stacktrace.h>
David S. Miller59abbd12009-09-10 06:28:20 -070023#include <asm/cpudata.h>
David S. Miller4f6dbe42010-01-19 00:26:13 -080024#include <asm/uaccess.h>
Arun Sharma600634972011-07-26 16:09:06 -070025#include <linux/atomic.h>
David S. Miller59abbd12009-09-10 06:28:20 -070026#include <asm/nmi.h>
27#include <asm/pcr.h>
David Howellsd550bbd2012-03-28 18:30:03 +010028#include <asm/cacheflush.h>
David S. Miller59abbd12009-09-10 06:28:20 -070029
Sam Ravnborgcb1b8202011-04-21 15:45:45 -070030#include "kernel.h"
David S. Miller4f6dbe42010-01-19 00:26:13 -080031#include "kstack.h"
32
David S. Miller59abbd12009-09-10 06:28:20 -070033/* Sparc64 chips have two performance counters, 32-bits each, with
34 * overflow interrupts generated on transition from 0xffffffff to 0.
35 * The counters are accessed in one go using a 64-bit register.
36 *
37 * Both counters are controlled using a single control register. The
38 * only way to stop all sampling is to clear all of the context (user,
39 * supervisor, hypervisor) sampling enable bits. But these bits apply
40 * to both counters, thus the two counters can't be enabled/disabled
41 * individually.
42 *
43 * The control register has two event fields, one for each of the two
44 * counters. It's thus nearly impossible to have one counter going
45 * while keeping the other one stopped. Therefore it is possible to
46 * get overflow interrupts for counters not currently "in use" and
47 * that condition must be checked in the overflow interrupt handler.
48 *
49 * So we use a hack, in that we program inactive counters with the
50 * "sw_count0" and "sw_count1" events. These count how many times
51 * the instruction "sethi %hi(0xfc000), %g0" is executed. It's an
52 * unusual way to encode a NOP and therefore will not trigger in
53 * normal code.
54 */
55
Ingo Molnarcdd6c482009-09-21 12:02:48 +020056#define MAX_HWEVENTS 2
David S. Miller59abbd12009-09-10 06:28:20 -070057#define MAX_PERIOD ((1UL << 32) - 1)
58
59#define PIC_UPPER_INDEX 0
60#define PIC_LOWER_INDEX 1
David S. Millere7bef6b2010-01-20 02:59:47 -080061#define PIC_NO_INDEX -1
David S. Miller59abbd12009-09-10 06:28:20 -070062
Ingo Molnarcdd6c482009-09-21 12:02:48 +020063struct cpu_hw_events {
David S. Millere7bef6b2010-01-20 02:59:47 -080064 /* Number of events currently scheduled onto this cpu.
65 * This tells how many entries in the arrays below
66 * are valid.
67 */
68 int n_events;
69
70 /* Number of new events added since the last hw_perf_disable().
71 * This works because the perf event layer always adds new
72 * events inside of a perf_{disable,enable}() sequence.
73 */
74 int n_added;
75
76 /* Array of events current scheduled on this cpu. */
77 struct perf_event *event[MAX_HWEVENTS];
78
79 /* Array of encoded longs, specifying the %pcr register
80 * encoding and the mask of PIC counters this even can
81 * be scheduled on. See perf_event_encode() et al.
82 */
83 unsigned long events[MAX_HWEVENTS];
84
85 /* The current counter index assigned to an event. When the
86 * event hasn't been programmed into the cpu yet, this will
87 * hold PIC_NO_INDEX. The event->hw.idx value tells us where
88 * we ought to schedule the event.
89 */
90 int current_idx[MAX_HWEVENTS];
91
92 /* Software copy of %pcr register on this cpu. */
David S. Millerd1751382009-09-29 21:27:06 -070093 u64 pcr;
David S. Millere7bef6b2010-01-20 02:59:47 -080094
95 /* Enabled/disable state. */
David S. Millerd1751382009-09-29 21:27:06 -070096 int enabled;
Lin Minga13c3af2010-04-23 13:56:33 +080097
98 unsigned int group_flag;
David S. Miller59abbd12009-09-10 06:28:20 -070099};
Ingo Molnarcdd6c482009-09-21 12:02:48 +0200100DEFINE_PER_CPU(struct cpu_hw_events, cpu_hw_events) = { .enabled = 1, };
David S. Miller59abbd12009-09-10 06:28:20 -0700101
David S. Millere7bef6b2010-01-20 02:59:47 -0800102/* An event map describes the characteristics of a performance
103 * counter event. In particular it gives the encoding as well as
104 * a mask telling which counters the event can be measured on.
105 */
David S. Miller59abbd12009-09-10 06:28:20 -0700106struct perf_event_map {
107 u16 encoding;
108 u8 pic_mask;
109#define PIC_NONE 0x00
110#define PIC_UPPER 0x01
111#define PIC_LOWER 0x02
112};
113
David S. Millere7bef6b2010-01-20 02:59:47 -0800114/* Encode a perf_event_map entry into a long. */
David S. Millera72a8a52009-09-28 17:35:20 -0700115static unsigned long perf_event_encode(const struct perf_event_map *pmap)
116{
117 return ((unsigned long) pmap->encoding << 16) | pmap->pic_mask;
118}
119
David S. Millere7bef6b2010-01-20 02:59:47 -0800120static u8 perf_event_get_msk(unsigned long val)
David S. Millera72a8a52009-09-28 17:35:20 -0700121{
David S. Millere7bef6b2010-01-20 02:59:47 -0800122 return val & 0xff;
123}
124
125static u64 perf_event_get_enc(unsigned long val)
126{
127 return val >> 16;
David S. Millera72a8a52009-09-28 17:35:20 -0700128}
129
David S. Miller2ce4da22009-09-26 20:42:10 -0700130#define C(x) PERF_COUNT_HW_CACHE_##x
131
132#define CACHE_OP_UNSUPPORTED 0xfffe
133#define CACHE_OP_NONSENSE 0xffff
134
135typedef struct perf_event_map cache_map_t
136 [PERF_COUNT_HW_CACHE_MAX]
137 [PERF_COUNT_HW_CACHE_OP_MAX]
138 [PERF_COUNT_HW_CACHE_RESULT_MAX];
139
David S. Miller59abbd12009-09-10 06:28:20 -0700140struct sparc_pmu {
141 const struct perf_event_map *(*event_map)(int);
David S. Miller2ce4da22009-09-26 20:42:10 -0700142 const cache_map_t *cache_map;
David S. Miller59abbd12009-09-10 06:28:20 -0700143 int max_events;
David S. Miller53443032012-08-17 02:37:06 -0700144 u32 (*read_pmc)(int);
145 void (*write_pmc)(int, u64);
David S. Miller59abbd12009-09-10 06:28:20 -0700146 int upper_shift;
147 int lower_shift;
148 int event_mask;
David S. Miller91b92862009-09-10 07:09:06 -0700149 int hv_bit;
David S. Miller496c07e2009-09-10 07:10:59 -0700150 int irq_bit;
David S. Miller660d1372009-09-10 07:13:26 -0700151 int upper_nop;
152 int lower_nop;
David S. Millerb38e99f2012-08-17 02:31:10 -0700153 unsigned int flags;
154#define SPARC_PMU_ALL_EXCLUDES_SAME 0x00000001
155#define SPARC_PMU_HAS_CONFLICTS 0x00000002
David S. Miller59660492012-08-17 02:33:44 -0700156 int max_hw_events;
David S. Miller59abbd12009-09-10 06:28:20 -0700157};
158
David S. Miller53443032012-08-17 02:37:06 -0700159static u32 sparc_default_read_pmc(int idx)
160{
161 u64 val;
162
163 val = pcr_ops->read_pic(0);
164 if (idx == PIC_UPPER_INDEX)
165 val >>= 32;
166
167 return val & 0xffffffff;
168}
169
170static void sparc_default_write_pmc(int idx, u64 val)
171{
172 u64 shift, mask, pic;
173
174 shift = 0;
175 if (idx == PIC_UPPER_INDEX)
176 shift = 32;
177
178 mask = ((u64) 0xffffffff) << shift;
179 val <<= shift;
180
181 pic = pcr_ops->read_pic(0);
182 pic &= ~mask;
183 pic |= val;
184 pcr_ops->write_pic(0, pic);
185}
186
David S. Miller28e8f9b2009-09-26 20:54:22 -0700187static const struct perf_event_map ultra3_perfmon_event_map[] = {
David S. Miller59abbd12009-09-10 06:28:20 -0700188 [PERF_COUNT_HW_CPU_CYCLES] = { 0x0000, PIC_UPPER | PIC_LOWER },
189 [PERF_COUNT_HW_INSTRUCTIONS] = { 0x0001, PIC_UPPER | PIC_LOWER },
190 [PERF_COUNT_HW_CACHE_REFERENCES] = { 0x0009, PIC_LOWER },
191 [PERF_COUNT_HW_CACHE_MISSES] = { 0x0009, PIC_UPPER },
192};
193
David S. Miller28e8f9b2009-09-26 20:54:22 -0700194static const struct perf_event_map *ultra3_event_map(int event_id)
David S. Miller59abbd12009-09-10 06:28:20 -0700195{
David S. Miller28e8f9b2009-09-26 20:54:22 -0700196 return &ultra3_perfmon_event_map[event_id];
David S. Miller59abbd12009-09-10 06:28:20 -0700197}
198
David S. Miller28e8f9b2009-09-26 20:54:22 -0700199static const cache_map_t ultra3_cache_map = {
David S. Miller2ce4da22009-09-26 20:42:10 -0700200[C(L1D)] = {
201 [C(OP_READ)] = {
202 [C(RESULT_ACCESS)] = { 0x09, PIC_LOWER, },
203 [C(RESULT_MISS)] = { 0x09, PIC_UPPER, },
204 },
205 [C(OP_WRITE)] = {
206 [C(RESULT_ACCESS)] = { 0x0a, PIC_LOWER },
207 [C(RESULT_MISS)] = { 0x0a, PIC_UPPER },
208 },
209 [C(OP_PREFETCH)] = {
210 [C(RESULT_ACCESS)] = { CACHE_OP_UNSUPPORTED },
211 [C(RESULT_MISS)] = { CACHE_OP_UNSUPPORTED },
212 },
213},
214[C(L1I)] = {
215 [C(OP_READ)] = {
216 [C(RESULT_ACCESS)] = { 0x09, PIC_LOWER, },
217 [C(RESULT_MISS)] = { 0x09, PIC_UPPER, },
218 },
219 [ C(OP_WRITE) ] = {
220 [ C(RESULT_ACCESS) ] = { CACHE_OP_NONSENSE },
221 [ C(RESULT_MISS) ] = { CACHE_OP_NONSENSE },
222 },
223 [ C(OP_PREFETCH) ] = {
224 [ C(RESULT_ACCESS) ] = { CACHE_OP_UNSUPPORTED },
225 [ C(RESULT_MISS) ] = { CACHE_OP_UNSUPPORTED },
226 },
227},
228[C(LL)] = {
229 [C(OP_READ)] = {
230 [C(RESULT_ACCESS)] = { 0x0c, PIC_LOWER, },
231 [C(RESULT_MISS)] = { 0x0c, PIC_UPPER, },
232 },
233 [C(OP_WRITE)] = {
234 [C(RESULT_ACCESS)] = { 0x0c, PIC_LOWER },
235 [C(RESULT_MISS)] = { 0x0c, PIC_UPPER },
236 },
237 [C(OP_PREFETCH)] = {
238 [C(RESULT_ACCESS)] = { CACHE_OP_UNSUPPORTED },
239 [C(RESULT_MISS)] = { CACHE_OP_UNSUPPORTED },
240 },
241},
242[C(DTLB)] = {
243 [C(OP_READ)] = {
244 [C(RESULT_ACCESS)] = { CACHE_OP_UNSUPPORTED },
245 [C(RESULT_MISS)] = { 0x12, PIC_UPPER, },
246 },
247 [ C(OP_WRITE) ] = {
248 [ C(RESULT_ACCESS) ] = { CACHE_OP_UNSUPPORTED },
249 [ C(RESULT_MISS) ] = { CACHE_OP_UNSUPPORTED },
250 },
251 [ C(OP_PREFETCH) ] = {
252 [ C(RESULT_ACCESS) ] = { CACHE_OP_UNSUPPORTED },
253 [ C(RESULT_MISS) ] = { CACHE_OP_UNSUPPORTED },
254 },
255},
256[C(ITLB)] = {
257 [C(OP_READ)] = {
258 [C(RESULT_ACCESS)] = { CACHE_OP_UNSUPPORTED },
259 [C(RESULT_MISS)] = { 0x11, PIC_UPPER, },
260 },
261 [ C(OP_WRITE) ] = {
262 [ C(RESULT_ACCESS) ] = { CACHE_OP_UNSUPPORTED },
263 [ C(RESULT_MISS) ] = { CACHE_OP_UNSUPPORTED },
264 },
265 [ C(OP_PREFETCH) ] = {
266 [ C(RESULT_ACCESS) ] = { CACHE_OP_UNSUPPORTED },
267 [ C(RESULT_MISS) ] = { CACHE_OP_UNSUPPORTED },
268 },
269},
270[C(BPU)] = {
271 [C(OP_READ)] = {
272 [C(RESULT_ACCESS)] = { CACHE_OP_UNSUPPORTED },
273 [C(RESULT_MISS)] = { CACHE_OP_UNSUPPORTED },
274 },
275 [ C(OP_WRITE) ] = {
276 [ C(RESULT_ACCESS) ] = { CACHE_OP_UNSUPPORTED },
277 [ C(RESULT_MISS) ] = { CACHE_OP_UNSUPPORTED },
278 },
279 [ C(OP_PREFETCH) ] = {
280 [ C(RESULT_ACCESS) ] = { CACHE_OP_UNSUPPORTED },
281 [ C(RESULT_MISS) ] = { CACHE_OP_UNSUPPORTED },
282 },
283},
Peter Zijlstra89d6c0b2011-04-22 23:37:06 +0200284[C(NODE)] = {
285 [C(OP_READ)] = {
286 [C(RESULT_ACCESS)] = { CACHE_OP_UNSUPPORTED },
287 [C(RESULT_MISS) ] = { CACHE_OP_UNSUPPORTED },
288 },
289 [ C(OP_WRITE) ] = {
290 [ C(RESULT_ACCESS) ] = { CACHE_OP_UNSUPPORTED },
291 [ C(RESULT_MISS) ] = { CACHE_OP_UNSUPPORTED },
292 },
293 [ C(OP_PREFETCH) ] = {
294 [ C(RESULT_ACCESS) ] = { CACHE_OP_UNSUPPORTED },
295 [ C(RESULT_MISS) ] = { CACHE_OP_UNSUPPORTED },
296 },
297},
David S. Miller2ce4da22009-09-26 20:42:10 -0700298};
299
David S. Miller28e8f9b2009-09-26 20:54:22 -0700300static const struct sparc_pmu ultra3_pmu = {
301 .event_map = ultra3_event_map,
302 .cache_map = &ultra3_cache_map,
303 .max_events = ARRAY_SIZE(ultra3_perfmon_event_map),
David S. Miller53443032012-08-17 02:37:06 -0700304 .read_pmc = sparc_default_read_pmc,
305 .write_pmc = sparc_default_write_pmc,
David S. Miller59abbd12009-09-10 06:28:20 -0700306 .upper_shift = 11,
307 .lower_shift = 4,
308 .event_mask = 0x3f,
David S. Miller660d1372009-09-10 07:13:26 -0700309 .upper_nop = 0x1c,
310 .lower_nop = 0x14,
David S. Millerb38e99f2012-08-17 02:31:10 -0700311 .flags = (SPARC_PMU_ALL_EXCLUDES_SAME |
312 SPARC_PMU_HAS_CONFLICTS),
David S. Miller59660492012-08-17 02:33:44 -0700313 .max_hw_events = 2,
David S. Miller59abbd12009-09-10 06:28:20 -0700314};
315
David S. Miller7eebda62009-09-26 21:23:41 -0700316/* Niagara1 is very limited. The upper PIC is hard-locked to count
317 * only instructions, so it is free running which creates all kinds of
David S. Miller6e804252009-09-29 15:10:23 -0700318 * problems. Some hardware designs make one wonder if the creator
David S. Miller7eebda62009-09-26 21:23:41 -0700319 * even looked at how this stuff gets used by software.
320 */
321static const struct perf_event_map niagara1_perfmon_event_map[] = {
322 [PERF_COUNT_HW_CPU_CYCLES] = { 0x00, PIC_UPPER },
323 [PERF_COUNT_HW_INSTRUCTIONS] = { 0x00, PIC_UPPER },
324 [PERF_COUNT_HW_CACHE_REFERENCES] = { 0, PIC_NONE },
325 [PERF_COUNT_HW_CACHE_MISSES] = { 0x03, PIC_LOWER },
326};
327
328static const struct perf_event_map *niagara1_event_map(int event_id)
329{
330 return &niagara1_perfmon_event_map[event_id];
331}
332
333static const cache_map_t niagara1_cache_map = {
334[C(L1D)] = {
335 [C(OP_READ)] = {
336 [C(RESULT_ACCESS)] = { CACHE_OP_UNSUPPORTED },
337 [C(RESULT_MISS)] = { 0x03, PIC_LOWER, },
338 },
339 [C(OP_WRITE)] = {
340 [C(RESULT_ACCESS)] = { CACHE_OP_UNSUPPORTED },
341 [C(RESULT_MISS)] = { 0x03, PIC_LOWER, },
342 },
343 [C(OP_PREFETCH)] = {
344 [C(RESULT_ACCESS)] = { CACHE_OP_UNSUPPORTED },
345 [C(RESULT_MISS)] = { CACHE_OP_UNSUPPORTED },
346 },
347},
348[C(L1I)] = {
349 [C(OP_READ)] = {
350 [C(RESULT_ACCESS)] = { 0x00, PIC_UPPER },
351 [C(RESULT_MISS)] = { 0x02, PIC_LOWER, },
352 },
353 [ C(OP_WRITE) ] = {
354 [ C(RESULT_ACCESS) ] = { CACHE_OP_NONSENSE },
355 [ C(RESULT_MISS) ] = { CACHE_OP_NONSENSE },
356 },
357 [ C(OP_PREFETCH) ] = {
358 [ C(RESULT_ACCESS) ] = { CACHE_OP_UNSUPPORTED },
359 [ C(RESULT_MISS) ] = { CACHE_OP_UNSUPPORTED },
360 },
361},
362[C(LL)] = {
363 [C(OP_READ)] = {
364 [C(RESULT_ACCESS)] = { CACHE_OP_UNSUPPORTED },
365 [C(RESULT_MISS)] = { 0x07, PIC_LOWER, },
366 },
367 [C(OP_WRITE)] = {
368 [C(RESULT_ACCESS)] = { CACHE_OP_UNSUPPORTED },
369 [C(RESULT_MISS)] = { 0x07, PIC_LOWER, },
370 },
371 [C(OP_PREFETCH)] = {
372 [C(RESULT_ACCESS)] = { CACHE_OP_UNSUPPORTED },
373 [C(RESULT_MISS)] = { CACHE_OP_UNSUPPORTED },
374 },
375},
376[C(DTLB)] = {
377 [C(OP_READ)] = {
378 [C(RESULT_ACCESS)] = { CACHE_OP_UNSUPPORTED },
379 [C(RESULT_MISS)] = { 0x05, PIC_LOWER, },
380 },
381 [ C(OP_WRITE) ] = {
382 [ C(RESULT_ACCESS) ] = { CACHE_OP_UNSUPPORTED },
383 [ C(RESULT_MISS) ] = { CACHE_OP_UNSUPPORTED },
384 },
385 [ C(OP_PREFETCH) ] = {
386 [ C(RESULT_ACCESS) ] = { CACHE_OP_UNSUPPORTED },
387 [ C(RESULT_MISS) ] = { CACHE_OP_UNSUPPORTED },
388 },
389},
390[C(ITLB)] = {
391 [C(OP_READ)] = {
392 [C(RESULT_ACCESS)] = { CACHE_OP_UNSUPPORTED },
393 [C(RESULT_MISS)] = { 0x04, PIC_LOWER, },
394 },
395 [ C(OP_WRITE) ] = {
396 [ C(RESULT_ACCESS) ] = { CACHE_OP_UNSUPPORTED },
397 [ C(RESULT_MISS) ] = { CACHE_OP_UNSUPPORTED },
398 },
399 [ C(OP_PREFETCH) ] = {
400 [ C(RESULT_ACCESS) ] = { CACHE_OP_UNSUPPORTED },
401 [ C(RESULT_MISS) ] = { CACHE_OP_UNSUPPORTED },
402 },
403},
404[C(BPU)] = {
405 [C(OP_READ)] = {
406 [C(RESULT_ACCESS)] = { CACHE_OP_UNSUPPORTED },
407 [C(RESULT_MISS)] = { CACHE_OP_UNSUPPORTED },
408 },
409 [ C(OP_WRITE) ] = {
410 [ C(RESULT_ACCESS) ] = { CACHE_OP_UNSUPPORTED },
411 [ C(RESULT_MISS) ] = { CACHE_OP_UNSUPPORTED },
412 },
413 [ C(OP_PREFETCH) ] = {
414 [ C(RESULT_ACCESS) ] = { CACHE_OP_UNSUPPORTED },
415 [ C(RESULT_MISS) ] = { CACHE_OP_UNSUPPORTED },
416 },
417},
Peter Zijlstra89d6c0b2011-04-22 23:37:06 +0200418[C(NODE)] = {
419 [C(OP_READ)] = {
420 [C(RESULT_ACCESS)] = { CACHE_OP_UNSUPPORTED },
421 [C(RESULT_MISS) ] = { CACHE_OP_UNSUPPORTED },
422 },
423 [ C(OP_WRITE) ] = {
424 [ C(RESULT_ACCESS) ] = { CACHE_OP_UNSUPPORTED },
425 [ C(RESULT_MISS) ] = { CACHE_OP_UNSUPPORTED },
426 },
427 [ C(OP_PREFETCH) ] = {
428 [ C(RESULT_ACCESS) ] = { CACHE_OP_UNSUPPORTED },
429 [ C(RESULT_MISS) ] = { CACHE_OP_UNSUPPORTED },
430 },
431},
David S. Miller7eebda62009-09-26 21:23:41 -0700432};
433
434static const struct sparc_pmu niagara1_pmu = {
435 .event_map = niagara1_event_map,
436 .cache_map = &niagara1_cache_map,
437 .max_events = ARRAY_SIZE(niagara1_perfmon_event_map),
David S. Miller53443032012-08-17 02:37:06 -0700438 .read_pmc = sparc_default_read_pmc,
439 .write_pmc = sparc_default_write_pmc,
David S. Miller7eebda62009-09-26 21:23:41 -0700440 .upper_shift = 0,
441 .lower_shift = 4,
442 .event_mask = 0x7,
443 .upper_nop = 0x0,
444 .lower_nop = 0x0,
David S. Millerb38e99f2012-08-17 02:31:10 -0700445 .flags = (SPARC_PMU_ALL_EXCLUDES_SAME |
446 SPARC_PMU_HAS_CONFLICTS),
David S. Miller59660492012-08-17 02:33:44 -0700447 .max_hw_events = 2,
David S. Miller7eebda62009-09-26 21:23:41 -0700448};
449
David S. Millerb73d8842009-09-10 07:22:18 -0700450static const struct perf_event_map niagara2_perfmon_event_map[] = {
451 [PERF_COUNT_HW_CPU_CYCLES] = { 0x02ff, PIC_UPPER | PIC_LOWER },
452 [PERF_COUNT_HW_INSTRUCTIONS] = { 0x02ff, PIC_UPPER | PIC_LOWER },
453 [PERF_COUNT_HW_CACHE_REFERENCES] = { 0x0208, PIC_UPPER | PIC_LOWER },
454 [PERF_COUNT_HW_CACHE_MISSES] = { 0x0302, PIC_UPPER | PIC_LOWER },
455 [PERF_COUNT_HW_BRANCH_INSTRUCTIONS] = { 0x0201, PIC_UPPER | PIC_LOWER },
456 [PERF_COUNT_HW_BRANCH_MISSES] = { 0x0202, PIC_UPPER | PIC_LOWER },
457};
458
Ingo Molnarcdd6c482009-09-21 12:02:48 +0200459static const struct perf_event_map *niagara2_event_map(int event_id)
David S. Millerb73d8842009-09-10 07:22:18 -0700460{
Ingo Molnarcdd6c482009-09-21 12:02:48 +0200461 return &niagara2_perfmon_event_map[event_id];
David S. Millerb73d8842009-09-10 07:22:18 -0700462}
463
David S. Millerd0b86482009-09-26 21:04:16 -0700464static const cache_map_t niagara2_cache_map = {
465[C(L1D)] = {
466 [C(OP_READ)] = {
467 [C(RESULT_ACCESS)] = { 0x0208, PIC_UPPER | PIC_LOWER, },
468 [C(RESULT_MISS)] = { 0x0302, PIC_UPPER | PIC_LOWER, },
469 },
470 [C(OP_WRITE)] = {
471 [C(RESULT_ACCESS)] = { 0x0210, PIC_UPPER | PIC_LOWER, },
472 [C(RESULT_MISS)] = { 0x0302, PIC_UPPER | PIC_LOWER, },
473 },
474 [C(OP_PREFETCH)] = {
475 [C(RESULT_ACCESS)] = { CACHE_OP_UNSUPPORTED },
476 [C(RESULT_MISS)] = { CACHE_OP_UNSUPPORTED },
477 },
478},
479[C(L1I)] = {
480 [C(OP_READ)] = {
481 [C(RESULT_ACCESS)] = { 0x02ff, PIC_UPPER | PIC_LOWER, },
482 [C(RESULT_MISS)] = { 0x0301, PIC_UPPER | PIC_LOWER, },
483 },
484 [ C(OP_WRITE) ] = {
485 [ C(RESULT_ACCESS) ] = { CACHE_OP_NONSENSE },
486 [ C(RESULT_MISS) ] = { CACHE_OP_NONSENSE },
487 },
488 [ C(OP_PREFETCH) ] = {
489 [ C(RESULT_ACCESS) ] = { CACHE_OP_UNSUPPORTED },
490 [ C(RESULT_MISS) ] = { CACHE_OP_UNSUPPORTED },
491 },
492},
493[C(LL)] = {
494 [C(OP_READ)] = {
495 [C(RESULT_ACCESS)] = { 0x0208, PIC_UPPER | PIC_LOWER, },
496 [C(RESULT_MISS)] = { 0x0330, PIC_UPPER | PIC_LOWER, },
497 },
498 [C(OP_WRITE)] = {
499 [C(RESULT_ACCESS)] = { 0x0210, PIC_UPPER | PIC_LOWER, },
500 [C(RESULT_MISS)] = { 0x0320, PIC_UPPER | PIC_LOWER, },
501 },
502 [C(OP_PREFETCH)] = {
503 [C(RESULT_ACCESS)] = { CACHE_OP_UNSUPPORTED },
504 [C(RESULT_MISS)] = { CACHE_OP_UNSUPPORTED },
505 },
506},
507[C(DTLB)] = {
508 [C(OP_READ)] = {
509 [C(RESULT_ACCESS)] = { CACHE_OP_UNSUPPORTED },
510 [C(RESULT_MISS)] = { 0x0b08, PIC_UPPER | PIC_LOWER, },
511 },
512 [ C(OP_WRITE) ] = {
513 [ C(RESULT_ACCESS) ] = { CACHE_OP_UNSUPPORTED },
514 [ C(RESULT_MISS) ] = { CACHE_OP_UNSUPPORTED },
515 },
516 [ C(OP_PREFETCH) ] = {
517 [ C(RESULT_ACCESS) ] = { CACHE_OP_UNSUPPORTED },
518 [ C(RESULT_MISS) ] = { CACHE_OP_UNSUPPORTED },
519 },
520},
521[C(ITLB)] = {
522 [C(OP_READ)] = {
523 [C(RESULT_ACCESS)] = { CACHE_OP_UNSUPPORTED },
524 [C(RESULT_MISS)] = { 0xb04, PIC_UPPER | PIC_LOWER, },
525 },
526 [ C(OP_WRITE) ] = {
527 [ C(RESULT_ACCESS) ] = { CACHE_OP_UNSUPPORTED },
528 [ C(RESULT_MISS) ] = { CACHE_OP_UNSUPPORTED },
529 },
530 [ C(OP_PREFETCH) ] = {
531 [ C(RESULT_ACCESS) ] = { CACHE_OP_UNSUPPORTED },
532 [ C(RESULT_MISS) ] = { CACHE_OP_UNSUPPORTED },
533 },
534},
535[C(BPU)] = {
536 [C(OP_READ)] = {
537 [C(RESULT_ACCESS)] = { CACHE_OP_UNSUPPORTED },
538 [C(RESULT_MISS)] = { CACHE_OP_UNSUPPORTED },
539 },
540 [ C(OP_WRITE) ] = {
541 [ C(RESULT_ACCESS) ] = { CACHE_OP_UNSUPPORTED },
542 [ C(RESULT_MISS) ] = { CACHE_OP_UNSUPPORTED },
543 },
544 [ C(OP_PREFETCH) ] = {
545 [ C(RESULT_ACCESS) ] = { CACHE_OP_UNSUPPORTED },
546 [ C(RESULT_MISS) ] = { CACHE_OP_UNSUPPORTED },
547 },
548},
Peter Zijlstra89d6c0b2011-04-22 23:37:06 +0200549[C(NODE)] = {
550 [C(OP_READ)] = {
551 [C(RESULT_ACCESS)] = { CACHE_OP_UNSUPPORTED },
552 [C(RESULT_MISS) ] = { CACHE_OP_UNSUPPORTED },
553 },
554 [ C(OP_WRITE) ] = {
555 [ C(RESULT_ACCESS) ] = { CACHE_OP_UNSUPPORTED },
556 [ C(RESULT_MISS) ] = { CACHE_OP_UNSUPPORTED },
557 },
558 [ C(OP_PREFETCH) ] = {
559 [ C(RESULT_ACCESS) ] = { CACHE_OP_UNSUPPORTED },
560 [ C(RESULT_MISS) ] = { CACHE_OP_UNSUPPORTED },
561 },
562},
David S. Millerd0b86482009-09-26 21:04:16 -0700563};
564
David S. Millerb73d8842009-09-10 07:22:18 -0700565static const struct sparc_pmu niagara2_pmu = {
566 .event_map = niagara2_event_map,
David S. Millerd0b86482009-09-26 21:04:16 -0700567 .cache_map = &niagara2_cache_map,
David S. Millerb73d8842009-09-10 07:22:18 -0700568 .max_events = ARRAY_SIZE(niagara2_perfmon_event_map),
David S. Miller53443032012-08-17 02:37:06 -0700569 .read_pmc = sparc_default_read_pmc,
570 .write_pmc = sparc_default_write_pmc,
David S. Millerb73d8842009-09-10 07:22:18 -0700571 .upper_shift = 19,
572 .lower_shift = 6,
573 .event_mask = 0xfff,
574 .hv_bit = 0x8,
David S. Millerde23cf32009-10-09 00:42:40 -0700575 .irq_bit = 0x30,
David S. Millerb73d8842009-09-10 07:22:18 -0700576 .upper_nop = 0x220,
577 .lower_nop = 0x220,
David S. Millerb38e99f2012-08-17 02:31:10 -0700578 .flags = (SPARC_PMU_ALL_EXCLUDES_SAME |
579 SPARC_PMU_HAS_CONFLICTS),
David S. Miller59660492012-08-17 02:33:44 -0700580 .max_hw_events = 2,
David S. Millerb73d8842009-09-10 07:22:18 -0700581};
582
David S. Miller59abbd12009-09-10 06:28:20 -0700583static const struct sparc_pmu *sparc_pmu __read_mostly;
584
Ingo Molnarcdd6c482009-09-21 12:02:48 +0200585static u64 event_encoding(u64 event_id, int idx)
David S. Miller59abbd12009-09-10 06:28:20 -0700586{
587 if (idx == PIC_UPPER_INDEX)
Ingo Molnarcdd6c482009-09-21 12:02:48 +0200588 event_id <<= sparc_pmu->upper_shift;
David S. Miller59abbd12009-09-10 06:28:20 -0700589 else
Ingo Molnarcdd6c482009-09-21 12:02:48 +0200590 event_id <<= sparc_pmu->lower_shift;
591 return event_id;
David S. Miller59abbd12009-09-10 06:28:20 -0700592}
593
594static u64 mask_for_index(int idx)
595{
596 return event_encoding(sparc_pmu->event_mask, idx);
597}
598
599static u64 nop_for_index(int idx)
600{
601 return event_encoding(idx == PIC_UPPER_INDEX ?
David S. Miller660d1372009-09-10 07:13:26 -0700602 sparc_pmu->upper_nop :
603 sparc_pmu->lower_nop, idx);
David S. Miller59abbd12009-09-10 06:28:20 -0700604}
605
David S. Millerd1751382009-09-29 21:27:06 -0700606static inline void sparc_pmu_enable_event(struct cpu_hw_events *cpuc, struct hw_perf_event *hwc, int idx)
David S. Miller59abbd12009-09-10 06:28:20 -0700607{
608 u64 val, mask = mask_for_index(idx);
609
David S. Millerd1751382009-09-29 21:27:06 -0700610 val = cpuc->pcr;
611 val &= ~mask;
612 val |= hwc->config;
613 cpuc->pcr = val;
614
David S. Miller09d053c2012-08-16 23:19:32 -0700615 pcr_ops->write_pcr(0, cpuc->pcr);
David S. Miller59abbd12009-09-10 06:28:20 -0700616}
617
David S. Millerd1751382009-09-29 21:27:06 -0700618static inline void sparc_pmu_disable_event(struct cpu_hw_events *cpuc, struct hw_perf_event *hwc, int idx)
David S. Miller59abbd12009-09-10 06:28:20 -0700619{
620 u64 mask = mask_for_index(idx);
621 u64 nop = nop_for_index(idx);
David S. Millerd1751382009-09-29 21:27:06 -0700622 u64 val;
David S. Miller59abbd12009-09-10 06:28:20 -0700623
David S. Millerd1751382009-09-29 21:27:06 -0700624 val = cpuc->pcr;
625 val &= ~mask;
626 val |= nop;
627 cpuc->pcr = val;
628
David S. Miller09d053c2012-08-16 23:19:32 -0700629 pcr_ops->write_pcr(0, cpuc->pcr);
David S. Miller59abbd12009-09-10 06:28:20 -0700630}
631
David S. Millere7bef6b2010-01-20 02:59:47 -0800632static u64 sparc_perf_event_update(struct perf_event *event,
633 struct hw_perf_event *hwc, int idx)
634{
635 int shift = 64 - 32;
636 u64 prev_raw_count, new_raw_count;
637 s64 delta;
638
639again:
Peter Zijlstrae7850592010-05-21 14:43:08 +0200640 prev_raw_count = local64_read(&hwc->prev_count);
David S. Miller53443032012-08-17 02:37:06 -0700641 new_raw_count = sparc_pmu->read_pmc(idx);
David S. Millere7bef6b2010-01-20 02:59:47 -0800642
Peter Zijlstrae7850592010-05-21 14:43:08 +0200643 if (local64_cmpxchg(&hwc->prev_count, prev_raw_count,
David S. Millere7bef6b2010-01-20 02:59:47 -0800644 new_raw_count) != prev_raw_count)
645 goto again;
646
647 delta = (new_raw_count << shift) - (prev_raw_count << shift);
648 delta >>= shift;
649
Peter Zijlstrae7850592010-05-21 14:43:08 +0200650 local64_add(delta, &event->count);
651 local64_sub(delta, &hwc->period_left);
David S. Millere7bef6b2010-01-20 02:59:47 -0800652
653 return new_raw_count;
654}
655
Ingo Molnarcdd6c482009-09-21 12:02:48 +0200656static int sparc_perf_event_set_period(struct perf_event *event,
David S. Millerd29862f2009-09-28 17:37:12 -0700657 struct hw_perf_event *hwc, int idx)
David S. Miller59abbd12009-09-10 06:28:20 -0700658{
Peter Zijlstrae7850592010-05-21 14:43:08 +0200659 s64 left = local64_read(&hwc->period_left);
David S. Miller59abbd12009-09-10 06:28:20 -0700660 s64 period = hwc->sample_period;
661 int ret = 0;
662
663 if (unlikely(left <= -period)) {
664 left = period;
Peter Zijlstrae7850592010-05-21 14:43:08 +0200665 local64_set(&hwc->period_left, left);
David S. Miller59abbd12009-09-10 06:28:20 -0700666 hwc->last_period = period;
667 ret = 1;
668 }
669
670 if (unlikely(left <= 0)) {
671 left += period;
Peter Zijlstrae7850592010-05-21 14:43:08 +0200672 local64_set(&hwc->period_left, left);
David S. Miller59abbd12009-09-10 06:28:20 -0700673 hwc->last_period = period;
674 ret = 1;
675 }
676 if (left > MAX_PERIOD)
677 left = MAX_PERIOD;
678
Peter Zijlstrae7850592010-05-21 14:43:08 +0200679 local64_set(&hwc->prev_count, (u64)-left);
David S. Miller59abbd12009-09-10 06:28:20 -0700680
David S. Miller53443032012-08-17 02:37:06 -0700681 sparc_pmu->write_pmc(idx, (u64)(-left) & 0xffffffff);
David S. Miller59abbd12009-09-10 06:28:20 -0700682
Ingo Molnarcdd6c482009-09-21 12:02:48 +0200683 perf_event_update_userpage(event);
David S. Miller59abbd12009-09-10 06:28:20 -0700684
685 return ret;
686}
687
David S. Millere7bef6b2010-01-20 02:59:47 -0800688/* If performance event entries have been added, move existing
689 * events around (if necessary) and then assign new entries to
690 * counters.
691 */
692static u64 maybe_change_configuration(struct cpu_hw_events *cpuc, u64 pcr)
David S. Miller59abbd12009-09-10 06:28:20 -0700693{
David S. Millere7bef6b2010-01-20 02:59:47 -0800694 int i;
David S. Miller59abbd12009-09-10 06:28:20 -0700695
David S. Millere7bef6b2010-01-20 02:59:47 -0800696 if (!cpuc->n_added)
697 goto out;
David S. Miller59abbd12009-09-10 06:28:20 -0700698
David S. Millere7bef6b2010-01-20 02:59:47 -0800699 /* Read in the counters which are moving. */
700 for (i = 0; i < cpuc->n_events; i++) {
701 struct perf_event *cp = cpuc->event[i];
David S. Miller59abbd12009-09-10 06:28:20 -0700702
David S. Millere7bef6b2010-01-20 02:59:47 -0800703 if (cpuc->current_idx[i] != PIC_NO_INDEX &&
704 cpuc->current_idx[i] != cp->hw.idx) {
705 sparc_perf_event_update(cp, &cp->hw,
706 cpuc->current_idx[i]);
707 cpuc->current_idx[i] = PIC_NO_INDEX;
708 }
709 }
David S. Miller59abbd12009-09-10 06:28:20 -0700710
David S. Millere7bef6b2010-01-20 02:59:47 -0800711 /* Assign to counters all unassigned events. */
712 for (i = 0; i < cpuc->n_events; i++) {
713 struct perf_event *cp = cpuc->event[i];
714 struct hw_perf_event *hwc = &cp->hw;
715 int idx = hwc->idx;
716 u64 enc;
717
718 if (cpuc->current_idx[i] != PIC_NO_INDEX)
719 continue;
720
721 sparc_perf_event_set_period(cp, hwc, idx);
722 cpuc->current_idx[i] = idx;
723
724 enc = perf_event_get_enc(cpuc->events[i]);
David S. Millerb7d45c32010-06-23 11:39:02 -0700725 pcr &= ~mask_for_index(idx);
Peter Zijlstraa4eaf7f2010-06-16 14:37:10 +0200726 if (hwc->state & PERF_HES_STOPPED)
727 pcr |= nop_for_index(idx);
728 else
729 pcr |= event_encoding(enc, idx);
David S. Millere7bef6b2010-01-20 02:59:47 -0800730 }
731out:
732 return pcr;
David S. Miller59abbd12009-09-10 06:28:20 -0700733}
734
Peter Zijlstraa4eaf7f2010-06-16 14:37:10 +0200735static void sparc_pmu_enable(struct pmu *pmu)
David S. Miller59abbd12009-09-10 06:28:20 -0700736{
David S. Millere7bef6b2010-01-20 02:59:47 -0800737 struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events);
738 u64 pcr;
David S. Miller59abbd12009-09-10 06:28:20 -0700739
David S. Millere7bef6b2010-01-20 02:59:47 -0800740 if (cpuc->enabled)
741 return;
David S. Miller59abbd12009-09-10 06:28:20 -0700742
David S. Millere7bef6b2010-01-20 02:59:47 -0800743 cpuc->enabled = 1;
744 barrier();
David S. Miller59abbd12009-09-10 06:28:20 -0700745
David S. Millere7bef6b2010-01-20 02:59:47 -0800746 pcr = cpuc->pcr;
747 if (!cpuc->n_events) {
748 pcr = 0;
749 } else {
750 pcr = maybe_change_configuration(cpuc, pcr);
David S. Miller59abbd12009-09-10 06:28:20 -0700751
David S. Millere7bef6b2010-01-20 02:59:47 -0800752 /* We require that all of the events have the same
753 * configuration, so just fetch the settings from the
754 * first entry.
755 */
756 cpuc->pcr = pcr | cpuc->event[0]->hw.config_base;
757 }
David S. Miller59abbd12009-09-10 06:28:20 -0700758
David S. Miller09d053c2012-08-16 23:19:32 -0700759 pcr_ops->write_pcr(0, cpuc->pcr);
David S. Millere7bef6b2010-01-20 02:59:47 -0800760}
761
Peter Zijlstraa4eaf7f2010-06-16 14:37:10 +0200762static void sparc_pmu_disable(struct pmu *pmu)
David S. Millere7bef6b2010-01-20 02:59:47 -0800763{
764 struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events);
765 u64 val;
766
767 if (!cpuc->enabled)
768 return;
769
770 cpuc->enabled = 0;
771 cpuc->n_added = 0;
772
773 val = cpuc->pcr;
774 val &= ~(PCR_UTRACE | PCR_STRACE |
775 sparc_pmu->hv_bit | sparc_pmu->irq_bit);
776 cpuc->pcr = val;
777
David S. Miller09d053c2012-08-16 23:19:32 -0700778 pcr_ops->write_pcr(0, cpuc->pcr);
David S. Miller59abbd12009-09-10 06:28:20 -0700779}
780
David S. Millere7bef6b2010-01-20 02:59:47 -0800781static int active_event_index(struct cpu_hw_events *cpuc,
782 struct perf_event *event)
783{
784 int i;
785
786 for (i = 0; i < cpuc->n_events; i++) {
787 if (cpuc->event[i] == event)
788 break;
789 }
790 BUG_ON(i == cpuc->n_events);
791 return cpuc->current_idx[i];
David S. Miller59abbd12009-09-10 06:28:20 -0700792}
793
Peter Zijlstraa4eaf7f2010-06-16 14:37:10 +0200794static void sparc_pmu_start(struct perf_event *event, int flags)
795{
796 struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events);
797 int idx = active_event_index(cpuc, event);
798
799 if (flags & PERF_EF_RELOAD) {
800 WARN_ON_ONCE(!(event->hw.state & PERF_HES_UPTODATE));
801 sparc_perf_event_set_period(event, &event->hw, idx);
802 }
803
804 event->hw.state = 0;
805
806 sparc_pmu_enable_event(cpuc, &event->hw, idx);
807}
808
809static void sparc_pmu_stop(struct perf_event *event, int flags)
810{
811 struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events);
812 int idx = active_event_index(cpuc, event);
813
814 if (!(event->hw.state & PERF_HES_STOPPED)) {
815 sparc_pmu_disable_event(cpuc, &event->hw, idx);
816 event->hw.state |= PERF_HES_STOPPED;
817 }
818
819 if (!(event->hw.state & PERF_HES_UPTODATE) && (flags & PERF_EF_UPDATE)) {
820 sparc_perf_event_update(event, &event->hw, idx);
821 event->hw.state |= PERF_HES_UPTODATE;
822 }
823}
824
825static void sparc_pmu_del(struct perf_event *event, int _flags)
826{
827 struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events);
828 unsigned long flags;
829 int i;
830
831 local_irq_save(flags);
832 perf_pmu_disable(event->pmu);
833
834 for (i = 0; i < cpuc->n_events; i++) {
835 if (event == cpuc->event[i]) {
836 /* Absorb the final count and turn off the
837 * event.
838 */
839 sparc_pmu_stop(event, PERF_EF_UPDATE);
840
841 /* Shift remaining entries down into
842 * the existing slot.
843 */
844 while (++i < cpuc->n_events) {
845 cpuc->event[i - 1] = cpuc->event[i];
846 cpuc->events[i - 1] = cpuc->events[i];
847 cpuc->current_idx[i - 1] =
848 cpuc->current_idx[i];
849 }
850
851 perf_event_update_userpage(event);
852
853 cpuc->n_events--;
854 break;
855 }
856 }
857
858 perf_pmu_enable(event->pmu);
859 local_irq_restore(flags);
860}
861
Ingo Molnarcdd6c482009-09-21 12:02:48 +0200862static void sparc_pmu_read(struct perf_event *event)
David S. Miller59abbd12009-09-10 06:28:20 -0700863{
David S. Millere7bef6b2010-01-20 02:59:47 -0800864 struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events);
865 int idx = active_event_index(cpuc, event);
Ingo Molnarcdd6c482009-09-21 12:02:48 +0200866 struct hw_perf_event *hwc = &event->hw;
David S. Millerd1751382009-09-29 21:27:06 -0700867
David S. Millere7bef6b2010-01-20 02:59:47 -0800868 sparc_perf_event_update(event, hwc, idx);
David S. Miller59abbd12009-09-10 06:28:20 -0700869}
870
Ingo Molnarcdd6c482009-09-21 12:02:48 +0200871static atomic_t active_events = ATOMIC_INIT(0);
David S. Miller59abbd12009-09-10 06:28:20 -0700872static DEFINE_MUTEX(pmc_grab_mutex);
873
David S. Millerd1751382009-09-29 21:27:06 -0700874static void perf_stop_nmi_watchdog(void *unused)
875{
876 struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events);
877
878 stop_nmi_watchdog(NULL);
David S. Miller09d053c2012-08-16 23:19:32 -0700879 cpuc->pcr = pcr_ops->read_pcr(0);
David S. Millerd1751382009-09-29 21:27:06 -0700880}
881
Ingo Molnarcdd6c482009-09-21 12:02:48 +0200882void perf_event_grab_pmc(void)
David S. Miller59abbd12009-09-10 06:28:20 -0700883{
Ingo Molnarcdd6c482009-09-21 12:02:48 +0200884 if (atomic_inc_not_zero(&active_events))
David S. Miller59abbd12009-09-10 06:28:20 -0700885 return;
886
887 mutex_lock(&pmc_grab_mutex);
Ingo Molnarcdd6c482009-09-21 12:02:48 +0200888 if (atomic_read(&active_events) == 0) {
David S. Miller59abbd12009-09-10 06:28:20 -0700889 if (atomic_read(&nmi_active) > 0) {
David S. Millerd1751382009-09-29 21:27:06 -0700890 on_each_cpu(perf_stop_nmi_watchdog, NULL, 1);
David S. Miller59abbd12009-09-10 06:28:20 -0700891 BUG_ON(atomic_read(&nmi_active) != 0);
892 }
Ingo Molnarcdd6c482009-09-21 12:02:48 +0200893 atomic_inc(&active_events);
David S. Miller59abbd12009-09-10 06:28:20 -0700894 }
895 mutex_unlock(&pmc_grab_mutex);
896}
897
Ingo Molnarcdd6c482009-09-21 12:02:48 +0200898void perf_event_release_pmc(void)
David S. Miller59abbd12009-09-10 06:28:20 -0700899{
Ingo Molnarcdd6c482009-09-21 12:02:48 +0200900 if (atomic_dec_and_mutex_lock(&active_events, &pmc_grab_mutex)) {
David S. Miller59abbd12009-09-10 06:28:20 -0700901 if (atomic_read(&nmi_active) == 0)
902 on_each_cpu(start_nmi_watchdog, NULL, 1);
903 mutex_unlock(&pmc_grab_mutex);
904 }
905}
906
David S. Miller2ce4da22009-09-26 20:42:10 -0700907static const struct perf_event_map *sparc_map_cache_event(u64 config)
908{
909 unsigned int cache_type, cache_op, cache_result;
910 const struct perf_event_map *pmap;
911
912 if (!sparc_pmu->cache_map)
913 return ERR_PTR(-ENOENT);
914
915 cache_type = (config >> 0) & 0xff;
916 if (cache_type >= PERF_COUNT_HW_CACHE_MAX)
917 return ERR_PTR(-EINVAL);
918
919 cache_op = (config >> 8) & 0xff;
920 if (cache_op >= PERF_COUNT_HW_CACHE_OP_MAX)
921 return ERR_PTR(-EINVAL);
922
923 cache_result = (config >> 16) & 0xff;
924 if (cache_result >= PERF_COUNT_HW_CACHE_RESULT_MAX)
925 return ERR_PTR(-EINVAL);
926
927 pmap = &((*sparc_pmu->cache_map)[cache_type][cache_op][cache_result]);
928
929 if (pmap->encoding == CACHE_OP_UNSUPPORTED)
930 return ERR_PTR(-ENOENT);
931
932 if (pmap->encoding == CACHE_OP_NONSENSE)
933 return ERR_PTR(-EINVAL);
934
935 return pmap;
936}
937
Ingo Molnarcdd6c482009-09-21 12:02:48 +0200938static void hw_perf_event_destroy(struct perf_event *event)
David S. Miller59abbd12009-09-10 06:28:20 -0700939{
Ingo Molnarcdd6c482009-09-21 12:02:48 +0200940 perf_event_release_pmc();
David S. Miller59abbd12009-09-10 06:28:20 -0700941}
942
David S. Millera72a8a52009-09-28 17:35:20 -0700943/* Make sure all events can be scheduled into the hardware at
944 * the same time. This is simplified by the fact that we only
945 * need to support 2 simultaneous HW events.
David S. Millere7bef6b2010-01-20 02:59:47 -0800946 *
947 * As a side effect, the evts[]->hw.idx values will be assigned
948 * on success. These are pending indexes. When the events are
949 * actually programmed into the chip, these values will propagate
950 * to the per-cpu cpuc->current_idx[] slots, see the code in
951 * maybe_change_configuration() for details.
David S. Millera72a8a52009-09-28 17:35:20 -0700952 */
David S. Millere7bef6b2010-01-20 02:59:47 -0800953static int sparc_check_constraints(struct perf_event **evts,
954 unsigned long *events, int n_ev)
David S. Millera72a8a52009-09-28 17:35:20 -0700955{
David S. Millere7bef6b2010-01-20 02:59:47 -0800956 u8 msk0 = 0, msk1 = 0;
957 int idx0 = 0;
David S. Millera72a8a52009-09-28 17:35:20 -0700958
David S. Millere7bef6b2010-01-20 02:59:47 -0800959 /* This case is possible when we are invoked from
960 * hw_perf_group_sched_in().
961 */
962 if (!n_ev)
963 return 0;
David S. Millera72a8a52009-09-28 17:35:20 -0700964
David S. Miller59660492012-08-17 02:33:44 -0700965 if (n_ev > sparc_pmu->max_hw_events)
David S. Millere7bef6b2010-01-20 02:59:47 -0800966 return -1;
David S. Millera72a8a52009-09-28 17:35:20 -0700967
David S. Millerb38e99f2012-08-17 02:31:10 -0700968 if (!(sparc_pmu->flags & SPARC_PMU_HAS_CONFLICTS)) {
969 int i;
970
971 for (i = 0; i < n_ev; i++)
972 evts[i]->hw.idx = i;
973 return 0;
974 }
975
David S. Millere7bef6b2010-01-20 02:59:47 -0800976 msk0 = perf_event_get_msk(events[0]);
977 if (n_ev == 1) {
978 if (msk0 & PIC_LOWER)
979 idx0 = 1;
980 goto success;
981 }
982 BUG_ON(n_ev != 2);
983 msk1 = perf_event_get_msk(events[1]);
David S. Millera72a8a52009-09-28 17:35:20 -0700984
David S. Millere7bef6b2010-01-20 02:59:47 -0800985 /* If both events can go on any counter, OK. */
986 if (msk0 == (PIC_UPPER | PIC_LOWER) &&
987 msk1 == (PIC_UPPER | PIC_LOWER))
988 goto success;
David S. Millera72a8a52009-09-28 17:35:20 -0700989
David S. Millere7bef6b2010-01-20 02:59:47 -0800990 /* If one event is limited to a specific counter,
991 * and the other can go on both, OK.
992 */
993 if ((msk0 == PIC_UPPER || msk0 == PIC_LOWER) &&
994 msk1 == (PIC_UPPER | PIC_LOWER)) {
995 if (msk0 & PIC_LOWER)
996 idx0 = 1;
997 goto success;
David S. Millera72a8a52009-09-28 17:35:20 -0700998 }
999
David S. Millere7bef6b2010-01-20 02:59:47 -08001000 if ((msk1 == PIC_UPPER || msk1 == PIC_LOWER) &&
1001 msk0 == (PIC_UPPER | PIC_LOWER)) {
1002 if (msk1 & PIC_UPPER)
1003 idx0 = 1;
1004 goto success;
1005 }
1006
1007 /* If the events are fixed to different counters, OK. */
1008 if ((msk0 == PIC_UPPER && msk1 == PIC_LOWER) ||
1009 (msk0 == PIC_LOWER && msk1 == PIC_UPPER)) {
1010 if (msk0 & PIC_LOWER)
1011 idx0 = 1;
1012 goto success;
1013 }
1014
1015 /* Otherwise, there is a conflict. */
David S. Millera72a8a52009-09-28 17:35:20 -07001016 return -1;
David S. Millere7bef6b2010-01-20 02:59:47 -08001017
1018success:
1019 evts[0]->hw.idx = idx0;
1020 if (n_ev == 2)
1021 evts[1]->hw.idx = idx0 ^ 1;
1022 return 0;
David S. Millera72a8a52009-09-28 17:35:20 -07001023}
1024
David S. Miller01552f72009-09-27 20:43:07 -07001025static int check_excludes(struct perf_event **evts, int n_prev, int n_new)
1026{
1027 int eu = 0, ek = 0, eh = 0;
1028 struct perf_event *event;
1029 int i, n, first;
1030
David S. Millerb38e99f2012-08-17 02:31:10 -07001031 if (!(sparc_pmu->flags & SPARC_PMU_ALL_EXCLUDES_SAME))
1032 return 0;
1033
David S. Miller01552f72009-09-27 20:43:07 -07001034 n = n_prev + n_new;
1035 if (n <= 1)
1036 return 0;
1037
1038 first = 1;
1039 for (i = 0; i < n; i++) {
1040 event = evts[i];
1041 if (first) {
1042 eu = event->attr.exclude_user;
1043 ek = event->attr.exclude_kernel;
1044 eh = event->attr.exclude_hv;
1045 first = 0;
1046 } else if (event->attr.exclude_user != eu ||
1047 event->attr.exclude_kernel != ek ||
1048 event->attr.exclude_hv != eh) {
1049 return -EAGAIN;
1050 }
1051 }
1052
1053 return 0;
1054}
1055
1056static int collect_events(struct perf_event *group, int max_count,
David S. Millere7bef6b2010-01-20 02:59:47 -08001057 struct perf_event *evts[], unsigned long *events,
1058 int *current_idx)
David S. Miller01552f72009-09-27 20:43:07 -07001059{
1060 struct perf_event *event;
1061 int n = 0;
1062
1063 if (!is_software_event(group)) {
1064 if (n >= max_count)
1065 return -1;
1066 evts[n] = group;
David S. Millere7bef6b2010-01-20 02:59:47 -08001067 events[n] = group->hw.event_base;
1068 current_idx[n++] = PIC_NO_INDEX;
David S. Miller01552f72009-09-27 20:43:07 -07001069 }
1070 list_for_each_entry(event, &group->sibling_list, group_entry) {
1071 if (!is_software_event(event) &&
1072 event->state != PERF_EVENT_STATE_OFF) {
1073 if (n >= max_count)
1074 return -1;
1075 evts[n] = event;
David S. Millere7bef6b2010-01-20 02:59:47 -08001076 events[n] = event->hw.event_base;
1077 current_idx[n++] = PIC_NO_INDEX;
David S. Miller01552f72009-09-27 20:43:07 -07001078 }
1079 }
1080 return n;
1081}
1082
Peter Zijlstraa4eaf7f2010-06-16 14:37:10 +02001083static int sparc_pmu_add(struct perf_event *event, int ef_flags)
David S. Millere7bef6b2010-01-20 02:59:47 -08001084{
1085 struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events);
1086 int n0, ret = -EAGAIN;
1087 unsigned long flags;
1088
1089 local_irq_save(flags);
Peter Zijlstra33696fc2010-06-14 08:49:00 +02001090 perf_pmu_disable(event->pmu);
David S. Millere7bef6b2010-01-20 02:59:47 -08001091
1092 n0 = cpuc->n_events;
David S. Miller59660492012-08-17 02:33:44 -07001093 if (n0 >= sparc_pmu->max_hw_events)
David S. Millere7bef6b2010-01-20 02:59:47 -08001094 goto out;
1095
1096 cpuc->event[n0] = event;
1097 cpuc->events[n0] = event->hw.event_base;
1098 cpuc->current_idx[n0] = PIC_NO_INDEX;
1099
Peter Zijlstraa4eaf7f2010-06-16 14:37:10 +02001100 event->hw.state = PERF_HES_UPTODATE;
1101 if (!(ef_flags & PERF_EF_START))
1102 event->hw.state |= PERF_HES_STOPPED;
1103
Lin Minga13c3af2010-04-23 13:56:33 +08001104 /*
1105 * If group events scheduling transaction was started,
Lucas De Marchi25985ed2011-03-30 22:57:33 -03001106 * skip the schedulability test here, it will be performed
Lin Minga13c3af2010-04-23 13:56:33 +08001107 * at commit time(->commit_txn) as a whole
1108 */
Peter Zijlstra8d2cacb2010-05-25 17:49:05 +02001109 if (cpuc->group_flag & PERF_EVENT_TXN)
Lin Minga13c3af2010-04-23 13:56:33 +08001110 goto nocheck;
1111
David S. Millere7bef6b2010-01-20 02:59:47 -08001112 if (check_excludes(cpuc->event, n0, 1))
1113 goto out;
1114 if (sparc_check_constraints(cpuc->event, cpuc->events, n0 + 1))
1115 goto out;
1116
Lin Minga13c3af2010-04-23 13:56:33 +08001117nocheck:
David S. Millere7bef6b2010-01-20 02:59:47 -08001118 cpuc->n_events++;
1119 cpuc->n_added++;
1120
1121 ret = 0;
1122out:
Peter Zijlstra33696fc2010-06-14 08:49:00 +02001123 perf_pmu_enable(event->pmu);
David S. Millere7bef6b2010-01-20 02:59:47 -08001124 local_irq_restore(flags);
1125 return ret;
1126}
1127
Peter Zijlstrab0a873e2010-06-11 13:35:08 +02001128static int sparc_pmu_event_init(struct perf_event *event)
David S. Miller59abbd12009-09-10 06:28:20 -07001129{
Ingo Molnarcdd6c482009-09-21 12:02:48 +02001130 struct perf_event_attr *attr = &event->attr;
David S. Miller01552f72009-09-27 20:43:07 -07001131 struct perf_event *evts[MAX_HWEVENTS];
Ingo Molnarcdd6c482009-09-21 12:02:48 +02001132 struct hw_perf_event *hwc = &event->hw;
David S. Millera72a8a52009-09-28 17:35:20 -07001133 unsigned long events[MAX_HWEVENTS];
David S. Millere7bef6b2010-01-20 02:59:47 -08001134 int current_idx_dmy[MAX_HWEVENTS];
David S. Miller59abbd12009-09-10 06:28:20 -07001135 const struct perf_event_map *pmap;
David S. Miller01552f72009-09-27 20:43:07 -07001136 int n;
David S. Miller59abbd12009-09-10 06:28:20 -07001137
1138 if (atomic_read(&nmi_active) < 0)
1139 return -ENODEV;
1140
Stephane Eranian2481c5f2012-02-09 23:20:59 +01001141 /* does not support taken branch sampling */
1142 if (has_branch_stack(event))
1143 return -EOPNOTSUPP;
1144
Peter Zijlstrab0a873e2010-06-11 13:35:08 +02001145 switch (attr->type) {
1146 case PERF_TYPE_HARDWARE:
David S. Miller2ce4da22009-09-26 20:42:10 -07001147 if (attr->config >= sparc_pmu->max_events)
1148 return -EINVAL;
1149 pmap = sparc_pmu->event_map(attr->config);
Peter Zijlstrab0a873e2010-06-11 13:35:08 +02001150 break;
1151
1152 case PERF_TYPE_HW_CACHE:
David S. Miller2ce4da22009-09-26 20:42:10 -07001153 pmap = sparc_map_cache_event(attr->config);
1154 if (IS_ERR(pmap))
1155 return PTR_ERR(pmap);
Peter Zijlstrab0a873e2010-06-11 13:35:08 +02001156 break;
1157
1158 case PERF_TYPE_RAW:
Ingo Molnard0303d72010-09-23 08:02:09 +02001159 pmap = NULL;
1160 break;
David S. Miller59abbd12009-09-10 06:28:20 -07001161
Peter Zijlstrab0a873e2010-06-11 13:35:08 +02001162 default:
1163 return -ENOENT;
1164
1165 }
1166
David S. Millerb343ae52010-09-12 17:20:24 -07001167 if (pmap) {
1168 hwc->event_base = perf_event_encode(pmap);
1169 } else {
Ingo Molnard0303d72010-09-23 08:02:09 +02001170 /*
1171 * User gives us "(encoding << 16) | pic_mask" for
David S. Millerb343ae52010-09-12 17:20:24 -07001172 * PERF_TYPE_RAW events.
1173 */
1174 hwc->event_base = attr->config;
1175 }
1176
David S. Millere7bef6b2010-01-20 02:59:47 -08001177 /* We save the enable bits in the config_base. */
David S. Miller496c07e2009-09-10 07:10:59 -07001178 hwc->config_base = sparc_pmu->irq_bit;
David S. Miller59abbd12009-09-10 06:28:20 -07001179 if (!attr->exclude_user)
1180 hwc->config_base |= PCR_UTRACE;
1181 if (!attr->exclude_kernel)
1182 hwc->config_base |= PCR_STRACE;
David S. Miller91b92862009-09-10 07:09:06 -07001183 if (!attr->exclude_hv)
1184 hwc->config_base |= sparc_pmu->hv_bit;
David S. Miller59abbd12009-09-10 06:28:20 -07001185
David S. Miller01552f72009-09-27 20:43:07 -07001186 n = 0;
1187 if (event->group_leader != event) {
1188 n = collect_events(event->group_leader,
David S. Miller59660492012-08-17 02:33:44 -07001189 sparc_pmu->max_hw_events - 1,
David S. Millere7bef6b2010-01-20 02:59:47 -08001190 evts, events, current_idx_dmy);
David S. Miller01552f72009-09-27 20:43:07 -07001191 if (n < 0)
1192 return -EINVAL;
1193 }
David S. Millera72a8a52009-09-28 17:35:20 -07001194 events[n] = hwc->event_base;
David S. Miller01552f72009-09-27 20:43:07 -07001195 evts[n] = event;
1196
1197 if (check_excludes(evts, n, 1))
1198 return -EINVAL;
1199
David S. Millere7bef6b2010-01-20 02:59:47 -08001200 if (sparc_check_constraints(evts, events, n + 1))
David S. Millera72a8a52009-09-28 17:35:20 -07001201 return -EINVAL;
1202
David S. Millere7bef6b2010-01-20 02:59:47 -08001203 hwc->idx = PIC_NO_INDEX;
1204
David S. Miller01552f72009-09-27 20:43:07 -07001205 /* Try to do all error checking before this point, as unwinding
1206 * state after grabbing the PMC is difficult.
1207 */
1208 perf_event_grab_pmc();
1209 event->destroy = hw_perf_event_destroy;
1210
David S. Miller59abbd12009-09-10 06:28:20 -07001211 if (!hwc->sample_period) {
1212 hwc->sample_period = MAX_PERIOD;
1213 hwc->last_period = hwc->sample_period;
Peter Zijlstrae7850592010-05-21 14:43:08 +02001214 local64_set(&hwc->period_left, hwc->sample_period);
David S. Miller59abbd12009-09-10 06:28:20 -07001215 }
1216
David S. Miller59abbd12009-09-10 06:28:20 -07001217 return 0;
1218}
1219
Lin Minga13c3af2010-04-23 13:56:33 +08001220/*
1221 * Start group events scheduling transaction
1222 * Set the flag to make pmu::enable() not perform the
1223 * schedulability test, it will be performed at commit time
1224 */
Peter Zijlstra51b0fe32010-06-11 13:35:57 +02001225static void sparc_pmu_start_txn(struct pmu *pmu)
Lin Minga13c3af2010-04-23 13:56:33 +08001226{
1227 struct cpu_hw_events *cpuhw = &__get_cpu_var(cpu_hw_events);
1228
Peter Zijlstra33696fc2010-06-14 08:49:00 +02001229 perf_pmu_disable(pmu);
Peter Zijlstra8d2cacb2010-05-25 17:49:05 +02001230 cpuhw->group_flag |= PERF_EVENT_TXN;
Lin Minga13c3af2010-04-23 13:56:33 +08001231}
1232
1233/*
1234 * Stop group events scheduling transaction
1235 * Clear the flag and pmu::enable() will perform the
1236 * schedulability test.
1237 */
Peter Zijlstra51b0fe32010-06-11 13:35:57 +02001238static void sparc_pmu_cancel_txn(struct pmu *pmu)
Lin Minga13c3af2010-04-23 13:56:33 +08001239{
1240 struct cpu_hw_events *cpuhw = &__get_cpu_var(cpu_hw_events);
1241
Peter Zijlstra8d2cacb2010-05-25 17:49:05 +02001242 cpuhw->group_flag &= ~PERF_EVENT_TXN;
Peter Zijlstra33696fc2010-06-14 08:49:00 +02001243 perf_pmu_enable(pmu);
Lin Minga13c3af2010-04-23 13:56:33 +08001244}
1245
1246/*
1247 * Commit group events scheduling transaction
1248 * Perform the group schedulability test as a whole
1249 * Return 0 if success
1250 */
Peter Zijlstra51b0fe32010-06-11 13:35:57 +02001251static int sparc_pmu_commit_txn(struct pmu *pmu)
Lin Minga13c3af2010-04-23 13:56:33 +08001252{
1253 struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events);
1254 int n;
1255
1256 if (!sparc_pmu)
1257 return -EINVAL;
1258
1259 cpuc = &__get_cpu_var(cpu_hw_events);
1260 n = cpuc->n_events;
1261 if (check_excludes(cpuc->event, 0, n))
1262 return -EINVAL;
1263 if (sparc_check_constraints(cpuc->event, cpuc->events, n))
1264 return -EAGAIN;
1265
Peter Zijlstra8d2cacb2010-05-25 17:49:05 +02001266 cpuc->group_flag &= ~PERF_EVENT_TXN;
Peter Zijlstra33696fc2010-06-14 08:49:00 +02001267 perf_pmu_enable(pmu);
Lin Minga13c3af2010-04-23 13:56:33 +08001268 return 0;
1269}
1270
Peter Zijlstra51b0fe32010-06-11 13:35:57 +02001271static struct pmu pmu = {
Peter Zijlstraa4eaf7f2010-06-16 14:37:10 +02001272 .pmu_enable = sparc_pmu_enable,
1273 .pmu_disable = sparc_pmu_disable,
Peter Zijlstrab0a873e2010-06-11 13:35:08 +02001274 .event_init = sparc_pmu_event_init,
Peter Zijlstraa4eaf7f2010-06-16 14:37:10 +02001275 .add = sparc_pmu_add,
1276 .del = sparc_pmu_del,
1277 .start = sparc_pmu_start,
1278 .stop = sparc_pmu_stop,
David S. Miller59abbd12009-09-10 06:28:20 -07001279 .read = sparc_pmu_read,
Lin Minga13c3af2010-04-23 13:56:33 +08001280 .start_txn = sparc_pmu_start_txn,
1281 .cancel_txn = sparc_pmu_cancel_txn,
1282 .commit_txn = sparc_pmu_commit_txn,
David S. Miller59abbd12009-09-10 06:28:20 -07001283};
1284
Ingo Molnarcdd6c482009-09-21 12:02:48 +02001285void perf_event_print_debug(void)
David S. Miller59abbd12009-09-10 06:28:20 -07001286{
1287 unsigned long flags;
1288 u64 pcr, pic;
1289 int cpu;
1290
1291 if (!sparc_pmu)
1292 return;
1293
1294 local_irq_save(flags);
1295
1296 cpu = smp_processor_id();
1297
David S. Miller09d053c2012-08-16 23:19:32 -07001298 pcr = pcr_ops->read_pcr(0);
1299 pic = pcr_ops->read_pic(0);
David S. Miller59abbd12009-09-10 06:28:20 -07001300
1301 pr_info("\n");
1302 pr_info("CPU#%d: PCR[%016llx] PIC[%016llx]\n",
1303 cpu, pcr, pic);
1304
1305 local_irq_restore(flags);
1306}
1307
Ingo Molnarcdd6c482009-09-21 12:02:48 +02001308static int __kprobes perf_event_nmi_handler(struct notifier_block *self,
David S. Millerd29862f2009-09-28 17:37:12 -07001309 unsigned long cmd, void *__args)
David S. Miller59abbd12009-09-10 06:28:20 -07001310{
1311 struct die_args *args = __args;
1312 struct perf_sample_data data;
Ingo Molnarcdd6c482009-09-21 12:02:48 +02001313 struct cpu_hw_events *cpuc;
David S. Miller59abbd12009-09-10 06:28:20 -07001314 struct pt_regs *regs;
David S. Millere7bef6b2010-01-20 02:59:47 -08001315 int i;
David S. Miller59abbd12009-09-10 06:28:20 -07001316
Ingo Molnarcdd6c482009-09-21 12:02:48 +02001317 if (!atomic_read(&active_events))
David S. Miller59abbd12009-09-10 06:28:20 -07001318 return NOTIFY_DONE;
1319
1320 switch (cmd) {
1321 case DIE_NMI:
1322 break;
1323
1324 default:
1325 return NOTIFY_DONE;
1326 }
1327
1328 regs = args->regs;
1329
Ingo Molnarcdd6c482009-09-21 12:02:48 +02001330 cpuc = &__get_cpu_var(cpu_hw_events);
David S. Millere04ed382010-01-04 23:16:03 -08001331
1332 /* If the PMU has the TOE IRQ enable bits, we need to do a
1333 * dummy write to the %pcr to clear the overflow bits and thus
1334 * the interrupt.
1335 *
1336 * Do this before we peek at the counters to determine
1337 * overflow so we don't lose any events.
1338 */
1339 if (sparc_pmu->irq_bit)
David S. Miller09d053c2012-08-16 23:19:32 -07001340 pcr_ops->write_pcr(0, cpuc->pcr);
David S. Millere04ed382010-01-04 23:16:03 -08001341
David S. Millere7bef6b2010-01-20 02:59:47 -08001342 for (i = 0; i < cpuc->n_events; i++) {
1343 struct perf_event *event = cpuc->event[i];
1344 int idx = cpuc->current_idx[i];
Ingo Molnarcdd6c482009-09-21 12:02:48 +02001345 struct hw_perf_event *hwc;
David S. Miller59abbd12009-09-10 06:28:20 -07001346 u64 val;
1347
Ingo Molnarcdd6c482009-09-21 12:02:48 +02001348 hwc = &event->hw;
1349 val = sparc_perf_event_update(event, hwc, idx);
David S. Miller59abbd12009-09-10 06:28:20 -07001350 if (val & (1ULL << 31))
1351 continue;
1352
Robert Richterfd0d0002012-04-02 20:19:08 +02001353 perf_sample_data_init(&data, 0, hwc->last_period);
Ingo Molnarcdd6c482009-09-21 12:02:48 +02001354 if (!sparc_perf_event_set_period(event, hwc, idx))
David S. Miller59abbd12009-09-10 06:28:20 -07001355 continue;
1356
Peter Zijlstraa8b0ca12011-06-27 14:41:57 +02001357 if (perf_event_overflow(event, &data, regs))
Peter Zijlstraa4eaf7f2010-06-16 14:37:10 +02001358 sparc_pmu_stop(event, 0);
David S. Miller59abbd12009-09-10 06:28:20 -07001359 }
1360
1361 return NOTIFY_STOP;
1362}
1363
Ingo Molnarcdd6c482009-09-21 12:02:48 +02001364static __read_mostly struct notifier_block perf_event_nmi_notifier = {
1365 .notifier_call = perf_event_nmi_handler,
David S. Miller59abbd12009-09-10 06:28:20 -07001366};
1367
1368static bool __init supported_pmu(void)
1369{
David S. Miller28e8f9b2009-09-26 20:54:22 -07001370 if (!strcmp(sparc_pmu_type, "ultra3") ||
1371 !strcmp(sparc_pmu_type, "ultra3+") ||
1372 !strcmp(sparc_pmu_type, "ultra3i") ||
1373 !strcmp(sparc_pmu_type, "ultra4+")) {
1374 sparc_pmu = &ultra3_pmu;
David S. Miller59abbd12009-09-10 06:28:20 -07001375 return true;
1376 }
David S. Miller7eebda62009-09-26 21:23:41 -07001377 if (!strcmp(sparc_pmu_type, "niagara")) {
1378 sparc_pmu = &niagara1_pmu;
1379 return true;
1380 }
David S. Miller4ba991d2011-07-27 21:06:16 -07001381 if (!strcmp(sparc_pmu_type, "niagara2") ||
1382 !strcmp(sparc_pmu_type, "niagara3")) {
David S. Millerb73d8842009-09-10 07:22:18 -07001383 sparc_pmu = &niagara2_pmu;
1384 return true;
1385 }
David S. Miller59abbd12009-09-10 06:28:20 -07001386 return false;
1387}
1388
Peter Zijlstra004417a2010-11-25 18:38:29 +01001389int __init init_hw_perf_events(void)
David S. Miller59abbd12009-09-10 06:28:20 -07001390{
Ingo Molnarcdd6c482009-09-21 12:02:48 +02001391 pr_info("Performance events: ");
David S. Miller59abbd12009-09-10 06:28:20 -07001392
1393 if (!supported_pmu()) {
1394 pr_cont("No support for PMU type '%s'\n", sparc_pmu_type);
Peter Zijlstra004417a2010-11-25 18:38:29 +01001395 return 0;
David S. Miller59abbd12009-09-10 06:28:20 -07001396 }
1397
1398 pr_cont("Supported PMU type is '%s'\n", sparc_pmu_type);
1399
Peter Zijlstra2e80a822010-11-17 23:17:36 +01001400 perf_pmu_register(&pmu, "cpu", PERF_TYPE_RAW);
Ingo Molnarcdd6c482009-09-21 12:02:48 +02001401 register_die_notifier(&perf_event_nmi_notifier);
Peter Zijlstra004417a2010-11-25 18:38:29 +01001402
1403 return 0;
David S. Miller59abbd12009-09-10 06:28:20 -07001404}
Ingo Molnarefc70d22010-12-10 00:27:23 +01001405early_initcall(init_hw_perf_events);
David S. Miller4f6dbe42010-01-19 00:26:13 -08001406
Frederic Weisbecker56962b42010-06-30 23:03:51 +02001407void perf_callchain_kernel(struct perf_callchain_entry *entry,
1408 struct pt_regs *regs)
David S. Miller4f6dbe42010-01-19 00:26:13 -08001409{
1410 unsigned long ksp, fp;
David S. Miller667f0ce2010-04-21 03:08:11 -07001411#ifdef CONFIG_FUNCTION_GRAPH_TRACER
1412 int graph = 0;
1413#endif
David S. Miller4f6dbe42010-01-19 00:26:13 -08001414
Frederic Weisbecker56962b42010-06-30 23:03:51 +02001415 stack_trace_flush();
1416
Frederic Weisbecker70791ce2010-06-29 19:34:05 +02001417 perf_callchain_store(entry, regs->tpc);
David S. Miller4f6dbe42010-01-19 00:26:13 -08001418
1419 ksp = regs->u_regs[UREG_I6];
1420 fp = ksp + STACK_BIAS;
1421 do {
1422 struct sparc_stackf *sf;
1423 struct pt_regs *regs;
1424 unsigned long pc;
1425
1426 if (!kstack_valid(current_thread_info(), fp))
1427 break;
1428
1429 sf = (struct sparc_stackf *) fp;
1430 regs = (struct pt_regs *) (sf + 1);
1431
1432 if (kstack_is_trap_frame(current_thread_info(), regs)) {
1433 if (user_mode(regs))
1434 break;
1435 pc = regs->tpc;
1436 fp = regs->u_regs[UREG_I6] + STACK_BIAS;
1437 } else {
1438 pc = sf->callers_pc;
1439 fp = (unsigned long)sf->fp + STACK_BIAS;
1440 }
Frederic Weisbecker70791ce2010-06-29 19:34:05 +02001441 perf_callchain_store(entry, pc);
David S. Miller667f0ce2010-04-21 03:08:11 -07001442#ifdef CONFIG_FUNCTION_GRAPH_TRACER
1443 if ((pc + 8UL) == (unsigned long) &return_to_handler) {
1444 int index = current->curr_ret_stack;
1445 if (current->ret_stack && index >= graph) {
1446 pc = current->ret_stack[index - graph].ret;
Frederic Weisbecker70791ce2010-06-29 19:34:05 +02001447 perf_callchain_store(entry, pc);
David S. Miller667f0ce2010-04-21 03:08:11 -07001448 graph++;
1449 }
1450 }
1451#endif
David S. Miller4f6dbe42010-01-19 00:26:13 -08001452 } while (entry->nr < PERF_MAX_STACK_DEPTH);
1453}
1454
Frederic Weisbecker56962b42010-06-30 23:03:51 +02001455static void perf_callchain_user_64(struct perf_callchain_entry *entry,
1456 struct pt_regs *regs)
David S. Miller4f6dbe42010-01-19 00:26:13 -08001457{
1458 unsigned long ufp;
1459
Frederic Weisbecker70791ce2010-06-29 19:34:05 +02001460 perf_callchain_store(entry, regs->tpc);
David S. Miller4f6dbe42010-01-19 00:26:13 -08001461
1462 ufp = regs->u_regs[UREG_I6] + STACK_BIAS;
1463 do {
1464 struct sparc_stackf *usf, sf;
1465 unsigned long pc;
1466
1467 usf = (struct sparc_stackf *) ufp;
1468 if (__copy_from_user_inatomic(&sf, usf, sizeof(sf)))
1469 break;
1470
1471 pc = sf.callers_pc;
1472 ufp = (unsigned long)sf.fp + STACK_BIAS;
Frederic Weisbecker70791ce2010-06-29 19:34:05 +02001473 perf_callchain_store(entry, pc);
David S. Miller4f6dbe42010-01-19 00:26:13 -08001474 } while (entry->nr < PERF_MAX_STACK_DEPTH);
1475}
1476
Frederic Weisbecker56962b42010-06-30 23:03:51 +02001477static void perf_callchain_user_32(struct perf_callchain_entry *entry,
1478 struct pt_regs *regs)
David S. Miller4f6dbe42010-01-19 00:26:13 -08001479{
1480 unsigned long ufp;
1481
Frederic Weisbecker70791ce2010-06-29 19:34:05 +02001482 perf_callchain_store(entry, regs->tpc);
David S. Miller4f6dbe42010-01-19 00:26:13 -08001483
David S. Miller9e8307e2010-03-29 13:08:52 -07001484 ufp = regs->u_regs[UREG_I6] & 0xffffffffUL;
David S. Miller4f6dbe42010-01-19 00:26:13 -08001485 do {
1486 struct sparc_stackf32 *usf, sf;
1487 unsigned long pc;
1488
1489 usf = (struct sparc_stackf32 *) ufp;
1490 if (__copy_from_user_inatomic(&sf, usf, sizeof(sf)))
1491 break;
1492
1493 pc = sf.callers_pc;
1494 ufp = (unsigned long)sf.fp;
Frederic Weisbecker70791ce2010-06-29 19:34:05 +02001495 perf_callchain_store(entry, pc);
David S. Miller4f6dbe42010-01-19 00:26:13 -08001496 } while (entry->nr < PERF_MAX_STACK_DEPTH);
1497}
1498
Frederic Weisbecker56962b42010-06-30 23:03:51 +02001499void
1500perf_callchain_user(struct perf_callchain_entry *entry, struct pt_regs *regs)
David S. Miller4f6dbe42010-01-19 00:26:13 -08001501{
Frederic Weisbecker56962b42010-06-30 23:03:51 +02001502 flushw_user();
1503 if (test_thread_flag(TIF_32BIT))
1504 perf_callchain_user_32(entry, regs);
1505 else
1506 perf_callchain_user_64(entry, regs);
David S. Miller4f6dbe42010-01-19 00:26:13 -08001507}